Zum Hauptinhalt springen

Class: OpenAI

Extends

Extended by

Constructors

new OpenAI()

new OpenAI(init?): OpenAI

Parameters

init?: Partial <OpenAI> & object

Returns

OpenAI

Overrides

ToolCallLLM . constructor

Source

packages/llamaindex/src/llm/openai.ts:173

Properties

additionalChatOptions?

optional additionalChatOptions: OpenAIAdditionalChatOptions

Source

packages/llamaindex/src/llm/openai.ts:161


additionalSessionOptions?

optional additionalSessionOptions: Omit<Partial<ClientOptions>, "apiKey" | "timeout" | "maxRetries">

Source

packages/llamaindex/src/llm/openai.ts:168


apiKey?

optional apiKey: string = undefined

Source

packages/llamaindex/src/llm/openai.ts:164


maxRetries

maxRetries: number

Source

packages/llamaindex/src/llm/openai.ts:165


maxTokens?

optional maxTokens: number

Source

packages/llamaindex/src/llm/openai.ts:160


model

model: string

Source

packages/llamaindex/src/llm/openai.ts:157


session

session: OpenAISession

Source

packages/llamaindex/src/llm/openai.ts:167


temperature

temperature: number

Source

packages/llamaindex/src/llm/openai.ts:158


timeout?

optional timeout: number

Source

packages/llamaindex/src/llm/openai.ts:166


topP

topP: number

Source

packages/llamaindex/src/llm/openai.ts:159

Accessors

metadata

get metadata(): LLMMetadata

Returns

LLMMetadata

Source

packages/llamaindex/src/llm/openai.ts:224


supportToolCall

get supportToolCall(): boolean

Returns

boolean

Source

packages/llamaindex/src/llm/openai.ts:220

Methods

chat()

chat(params)

chat(params): Promise<AsyncIterable <ChatResponseChunk <ToolCallLLMMessageOptions>>>

Parameters

params: LLMChatParamsStreaming <OpenAIAdditionalChatOptions, ToolCallLLMMessageOptions>

Returns

Promise<AsyncIterable <ChatResponseChunk <ToolCallLLMMessageOptions>>>

Overrides

ToolCallLLM . chat

Source

packages/llamaindex/src/llm/openai.ts:301

chat(params)

chat(params): Promise <ChatResponse <ToolCallLLMMessageOptions>>

Parameters

params: LLMChatParamsNonStreaming <OpenAIAdditionalChatOptions, ToolCallLLMMessageOptions>

Returns

Promise <ChatResponse <ToolCallLLMMessageOptions>>

Overrides

ToolCallLLM . chat

Source

packages/llamaindex/src/llm/openai.ts:307


complete()

complete(params)

complete(params): Promise<AsyncIterable <CompletionResponse>>

Parameters

params: LLMCompletionParamsStreaming

Returns

Promise<AsyncIterable <CompletionResponse>>

Inherited from

ToolCallLLM . complete

Source

packages/llamaindex/src/llm/base.ts:22

complete(params)

complete(params): Promise <CompletionResponse>

Parameters

params: LLMCompletionParamsNonStreaming

Returns

Promise <CompletionResponse>

Inherited from

ToolCallLLM . complete

Source

packages/llamaindex/src/llm/base.ts:25


streamChat()

protected streamChat(baseRequestParams): AsyncIterable <ChatResponseChunk <ToolCallLLMMessageOptions>>

Parameters

baseRequestParams: ChatCompletionCreateParams

Returns

AsyncIterable <ChatResponseChunk <ToolCallLLMMessageOptions>>

Source

packages/llamaindex/src/llm/openai.ts:383


toOpenAIMessage()

static toOpenAIMessage(messages): ChatCompletionMessageParam[]

Parameters

messages: ChatMessage <ToolCallLLMMessageOptions>[]

Returns

ChatCompletionMessageParam[]

Source

packages/llamaindex/src/llm/openai.ts:252


toOpenAIRole()

static toOpenAIRole(messageType): ChatCompletionRole

Parameters

messageType: MessageType

Returns

ChatCompletionRole

Source

packages/llamaindex/src/llm/openai.ts:239


toTool()

static toTool(tool): ChatCompletionTool

Parameters

tool: BaseTool<any>

Returns

ChatCompletionTool

Source

packages/llamaindex/src/llm/openai.ts:461