Class: OpenAI
Extends
ToolCallLLM
<OpenAIAdditionalChatOptions
>
Extended by
Constructors
new OpenAI()
new OpenAI(
init
?):OpenAI
Parameters
• init?: Partial
<OpenAI
> & object
Returns
Overrides
ToolCallLLM<OpenAIAdditionalChatOptions>.constructor
Source
packages/core/src/llm/openai.ts:179
Properties
additionalChatOptions?
optional
additionalChatOptions:OpenAIAdditionalChatOptions
Source
packages/core/src/llm/openai.ts:167
additionalSessionOptions?
optional
additionalSessionOptions:Omit
<Partial
<ClientOptions
>,"apiKey"
|"timeout"
|"maxRetries"
>
Source
packages/core/src/llm/openai.ts:174
apiKey?
optional
apiKey:string
=undefined
Source
packages/core/src/llm/openai.ts:170
maxRetries
maxRetries:
number
Source
packages/core/src/llm/openai.ts:171
maxTokens?
optional
maxTokens:number
Source
packages/core/src/llm/openai.ts:166
model
model:
string
Source
packages/core/src/llm/openai.ts:163
session
session:
OpenAISession
Source
packages/core/src/llm/openai.ts:173
temperature
temperature:
number
Source
packages/core/src/llm/openai.ts:164
timeout?
optional
timeout:number
Source
packages/core/src/llm/openai.ts:172
topP
topP:
number
Source
packages/core/src/llm/openai.ts:165
Accessors
metadata
get
metadata():LLMMetadata
Returns
Source
packages/core/src/llm/openai.ts:236
supportToolCall
get
supportToolCall():boolean
Returns
boolean
Source
packages/core/src/llm/openai.ts:232
Methods
chat()
chat(params)
chat(
params
):Promise
<AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>>
Parameters
• params: LLMChatParamsStreaming
<OpenAIAdditionalChatOptions
, ToolCallLLMMessageOptions
>
Returns
Promise
<AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>>
Overrides
ToolCallLLM.chat
Source
packages/core/src/llm/openai.ts:313
chat(params)
chat(
params
):Promise
<ChatResponse
<ToolCallLLMMessageOptions
>>
Parameters
• params: LLMChatParamsNonStreaming
<OpenAIAdditionalChatOptions
, ToolCallLLMMessageOptions
>
Returns
Promise
<ChatResponse
<ToolCallLLMMessageOptions
>>
Overrides
ToolCallLLM.chat
Source
packages/core/src/llm/openai.ts:319
complete()
complete(params)
complete(
params
):Promise
<AsyncIterable
<CompletionResponse
>>
Parameters
• params: LLMCompletionParamsStreaming
Returns
Promise
<AsyncIterable
<CompletionResponse
>>
Inherited from
ToolCallLLM.complete
Source
packages/core/src/llm/base.ts:22
complete(params)
complete(
params
):Promise
<CompletionResponse
>
Parameters
• params: LLMCompletionParamsNonStreaming
Returns
Promise
<CompletionResponse
>
Inherited from
ToolCallLLM.complete
Source
packages/core/src/llm/base.ts:25
streamChat()
protected
streamChat(baseRequestParams
):AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>
Parameters
• baseRequestParams: ChatCompletionCreateParams
Returns
AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>
Source
packages/core/src/llm/openai.ts:394
toOpenAIMessage()
static
toOpenAIMessage(messages
):ChatCompletionMessageParam
[]
Parameters
• messages: ChatMessage
<ToolCallLLMMessageOptions
>[]
Returns
ChatCompletionMessageParam
[]
Source
packages/core/src/llm/openai.ts:264
toOpenAIRole()
static
toOpenAIRole(messageType
):ChatCompletionRole
Parameters
• messageType: MessageType
Returns
ChatCompletionRole
Source
packages/core/src/llm/openai.ts:251
toTool()
static
toTool(tool
):ChatCompletionTool
Parameters
• tool: BaseTool
<any
>
Returns
ChatCompletionTool