Class: OpenAI
Extends
ToolCallLLM
<OpenAIAdditionalChatOptions
>
Extended by
Constructors
new OpenAI()
new OpenAI(
init
?):OpenAI
Parameters
• init?: Partial
<OpenAI
> & object
Returns
Overrides
ToolCallLLM<OpenAIAdditionalChatOptions>.constructor
Source
packages/core/src/llm/openai.ts:179
Properties
additionalChatOptions?
optional
additionalChatOptions:OpenAIAdditionalChatOptions
Source
packages/core/src/llm/openai.ts:167
additionalSessionOptions?
optional
additionalSessionOptions:Omit
<Partial
<ClientOptions
>,"apiKey"
|"timeout"
|"maxRetries"
>
Source
packages/core/src/llm/openai.ts:174
apiKey?
optional
apiKey:string
=undefined
Source
packages/core/src/llm/openai.ts:170
maxRetries
maxRetries:
number
Source
packages/core/src/llm/openai.ts:171
maxTokens?
optional
maxTokens:number
Source
packages/core/src/llm/openai.ts:166
model
model:
string
Source
packages/core/src/llm/openai.ts:163
session
session:
OpenAISession
Source
packages/core/src/llm/openai.ts:173
temperature
temperature:
number
Source
packages/core/src/llm/openai.ts:164
timeout?
optional
timeout:number
Source
packages/core/src/llm/openai.ts:172
topP
topP:
number
Source
packages/core/src/llm/openai.ts:165
Accessors
metadata
get
metadata():LLMMetadata
Returns
Source
packages/core/src/llm/openai.ts:236
supportToolCall
get
supportToolCall():boolean
Returns
boolean
Source
packages/core/src/llm/openai.ts:232
Methods
chat()
chat(params)
chat(
params
):Promise
<AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>>
Parameters
• params: LLMChatParamsStreaming
<OpenAIAdditionalChatOptions
, ToolCallLLMMessageOptions
>
Returns
Promise
<AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>>
Overrides
ToolCallLLM.chat
Source
packages/core/src/llm/openai.ts:313
chat(params)
chat(
params
):Promise
<ChatResponse
<ToolCallLLMMessageOptions
>>
Parameters
• params: LLMChatParamsNonStreaming
<OpenAIAdditionalChatOptions
, ToolCallLLMMessageOptions
>
Returns
Promise
<ChatResponse
<ToolCallLLMMessageOptions
>>
Overrides
ToolCallLLM.chat
Source
packages/core/src/llm/openai.ts:319
complete()
complete(params)
complete(
params
):Promise
<AsyncIterable
<CompletionResponse
>>
Parameters
• params: LLMCompletionParamsStreaming
Returns
Promise
<AsyncIterable
<CompletionResponse
>>