Skip to content

Create thread and run

Deprecated
client.Beta.Threads.NewAndRun(ctx, body) (*Run, error)
POST/threads/runs

Create a thread and run it in one request.

ParametersExpand Collapse
body BetaThreadNewAndRunParams
AssistantID param.Field[string]

The ID of the assistant to use to execute this run.

Instructions param.Field[string]optional

Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis.

MaxCompletionTokens param.Field[int64]optional

The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status incomplete. See incomplete_details for more info.

minimum256
MaxPromptTokens param.Field[int64]optional

The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status incomplete. See incomplete_details for more info.

minimum256
Metadata param.Field[Metadata]optional

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model param.Field[ChatModel]optional

The ID of the Model to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.

string
type ChatModel string
Accepts one of the following:
const ChatModelGPT5_2 ChatModel = "gpt-5.2"
const ChatModelGPT5_2_2025_12_11 ChatModel = "gpt-5.2-2025-12-11"
const ChatModelGPT5_2ChatLatest ChatModel = "gpt-5.2-chat-latest"
const ChatModelGPT5_2Pro ChatModel = "gpt-5.2-pro"
const ChatModelGPT5_2Pro2025_12_11 ChatModel = "gpt-5.2-pro-2025-12-11"
const ChatModelGPT5_1 ChatModel = "gpt-5.1"
const ChatModelGPT5_1_2025_11_13 ChatModel = "gpt-5.1-2025-11-13"
const ChatModelGPT5_1Codex ChatModel = "gpt-5.1-codex"
const ChatModelGPT5_1Mini ChatModel = "gpt-5.1-mini"
const ChatModelGPT5_1ChatLatest ChatModel = "gpt-5.1-chat-latest"
const ChatModelGPT5 ChatModel = "gpt-5"
const ChatModelGPT5Mini ChatModel = "gpt-5-mini"
const ChatModelGPT5Nano ChatModel = "gpt-5-nano"
const ChatModelGPT5_2025_08_07 ChatModel = "gpt-5-2025-08-07"
const ChatModelGPT5Mini2025_08_07 ChatModel = "gpt-5-mini-2025-08-07"
const ChatModelGPT5Nano2025_08_07 ChatModel = "gpt-5-nano-2025-08-07"
const ChatModelGPT5ChatLatest ChatModel = "gpt-5-chat-latest"
const ChatModelGPT4_1 ChatModel = "gpt-4.1"
const ChatModelGPT4_1Mini ChatModel = "gpt-4.1-mini"
const ChatModelGPT4_1Nano ChatModel = "gpt-4.1-nano"
const ChatModelGPT4_1_2025_04_14 ChatModel = "gpt-4.1-2025-04-14"
const ChatModelGPT4_1Mini2025_04_14 ChatModel = "gpt-4.1-mini-2025-04-14"
const ChatModelGPT4_1Nano2025_04_14 ChatModel = "gpt-4.1-nano-2025-04-14"
const ChatModelO4Mini ChatModel = "o4-mini"
const ChatModelO4Mini2025_04_16 ChatModel = "o4-mini-2025-04-16"
const ChatModelO3 ChatModel = "o3"
const ChatModelO3_2025_04_16 ChatModel = "o3-2025-04-16"
const ChatModelO3Mini ChatModel = "o3-mini"
const ChatModelO3Mini2025_01_31 ChatModel = "o3-mini-2025-01-31"
const ChatModelO1 ChatModel = "o1"
const ChatModelO1_2024_12_17 ChatModel = "o1-2024-12-17"
const ChatModelO1Preview ChatModel = "o1-preview"
const ChatModelO1Preview2024_09_12 ChatModel = "o1-preview-2024-09-12"
const ChatModelO1Mini ChatModel = "o1-mini"
const ChatModelO1Mini2024_09_12 ChatModel = "o1-mini-2024-09-12"
const ChatModelGPT4o ChatModel = "gpt-4o"
const ChatModelGPT4o2024_11_20 ChatModel = "gpt-4o-2024-11-20"
const ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06"
const ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13"
const ChatModelGPT4oAudioPreview ChatModel = "gpt-4o-audio-preview"
const ChatModelGPT4oAudioPreview2024_10_01 ChatModel = "gpt-4o-audio-preview-2024-10-01"
const ChatModelGPT4oAudioPreview2024_12_17 ChatModel = "gpt-4o-audio-preview-2024-12-17"
const ChatModelGPT4oAudioPreview2025_06_03 ChatModel = "gpt-4o-audio-preview-2025-06-03"
const ChatModelGPT4oMiniAudioPreview ChatModel = "gpt-4o-mini-audio-preview"
const ChatModelGPT4oMiniAudioPreview2024_12_17 ChatModel = "gpt-4o-mini-audio-preview-2024-12-17"
const ChatModelGPT4oSearchPreview ChatModel = "gpt-4o-search-preview"
const ChatModelGPT4oMiniSearchPreview ChatModel = "gpt-4o-mini-search-preview"
const ChatModelGPT4oSearchPreview2025_03_11 ChatModel = "gpt-4o-search-preview-2025-03-11"
const ChatModelGPT4oMiniSearchPreview2025_03_11 ChatModel = "gpt-4o-mini-search-preview-2025-03-11"
const ChatModelChatgpt4oLatest ChatModel = "chatgpt-4o-latest"
const ChatModelCodexMiniLatest ChatModel = "codex-mini-latest"
const ChatModelGPT4oMini ChatModel = "gpt-4o-mini"
const ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18"
const ChatModelGPT4Turbo ChatModel = "gpt-4-turbo"
const ChatModelGPT4Turbo2024_04_09 ChatModel = "gpt-4-turbo-2024-04-09"
const ChatModelGPT4_0125Preview ChatModel = "gpt-4-0125-preview"
const ChatModelGPT4TurboPreview ChatModel = "gpt-4-turbo-preview"
const ChatModelGPT4_1106Preview ChatModel = "gpt-4-1106-preview"
const ChatModelGPT4VisionPreview ChatModel = "gpt-4-vision-preview"
const ChatModelGPT4 ChatModel = "gpt-4"
const ChatModelGPT4_0314 ChatModel = "gpt-4-0314"
const ChatModelGPT4_0613 ChatModel = "gpt-4-0613"
const ChatModelGPT4_32k ChatModel = "gpt-4-32k"
const ChatModelGPT4_32k0314 ChatModel = "gpt-4-32k-0314"
const ChatModelGPT4_32k0613 ChatModel = "gpt-4-32k-0613"
const ChatModelGPT3_5Turbo ChatModel = "gpt-3.5-turbo"
const ChatModelGPT3_5Turbo16k ChatModel = "gpt-3.5-turbo-16k"
const ChatModelGPT3_5Turbo0301 ChatModel = "gpt-3.5-turbo-0301"
const ChatModelGPT3_5Turbo0613 ChatModel = "gpt-3.5-turbo-0613"
const ChatModelGPT3_5Turbo1106 ChatModel = "gpt-3.5-turbo-1106"
const ChatModelGPT3_5Turbo0125 ChatModel = "gpt-3.5-turbo-0125"
const ChatModelGPT3_5Turbo16k0613 ChatModel = "gpt-3.5-turbo-16k-0613"
ParallelToolCalls param.Field[bool]optional

Whether to enable parallel function calling during tool use.

ResponseFormat param.Field[AssistantResponseFormatOptionUnion]optional

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Temperature param.Field[float64]optional

What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

minimum0
maximum2
Thread param.Field[BetaThreadNewAndRunParamsThread]optional

Options to create a new thread. If no thread is provided when running a request, an empty thread will be created.

Messages []BetaThreadNewAndRunParamsThreadMessageoptional

A list of messages to start the thread with.

Content BetaThreadNewAndRunParamsThreadMessageContentUnion

The text contents of the message.

Accepts one of the following:
string
Accepts one of the following:
type ImageFileContentBlock struct{…}

References an image File in the content of a message.

ImageFile ImageFile
FileID string

The File ID of the image in the message content. Set purpose="vision" when uploading the File if you need to later display the file content.

Detail ImageFileDetailoptional

Specifies the detail level of the image if specified by the user. low uses fewer tokens, you can opt in to high resolution using high.

Accepts one of the following:
const ImageFileDetailAuto ImageFileDetail = "auto"
const ImageFileDetailLow ImageFileDetail = "low"
const ImageFileDetailHigh ImageFileDetail = "high"
Type ImageFile

Always image_file.

type ImageURLContentBlock struct{…}

References an image URL in the content of a message.

ImageURL ImageURL
URL string

The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.

formaturi
Detail ImageURLDetailoptional

Specifies the detail level of the image. low uses fewer tokens, you can opt in to high resolution using high. Default value is auto

Accepts one of the following:
const ImageURLDetailAuto ImageURLDetail = "auto"
const ImageURLDetailLow ImageURLDetail = "low"
const ImageURLDetailHigh ImageURLDetail = "high"
Type ImageURL

The type of the content part.

type TextContentBlockParam struct{…}

The text content that is part of a message.

Text string

Text content to be sent to the model

Type Text

Always text.

Role string

The role of the entity that is creating the message. Allowed values include:

  • user: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.
  • assistant: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.
Accepts one of the following:
const BetaThreadNewAndRunParamsThreadMessageRoleUser BetaThreadNewAndRunParamsThreadMessageRole = "user"
const BetaThreadNewAndRunParamsThreadMessageRoleAssistant BetaThreadNewAndRunParamsThreadMessageRole = "assistant"
Attachments []BetaThreadNewAndRunParamsThreadMessageAttachmentoptional

A list of files attached to the message, and the tools they should be added to.

FileID stringoptional

The ID of the file to attach to the message.

Tools []BetaThreadNewAndRunParamsThreadMessageAttachmentToolUnionoptional

The tools to add this file to.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

BetaThreadNewAndRunParamsThreadMessageAttachmentToolFileSearch
Type FileSearch

The type of tool being defined: file_search

Metadata Metadataoptional

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Metadata Metadataoptional

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

ToolResources BetaThreadNewAndRunParamsThreadToolResourcesoptional

A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.

CodeInterpreter BetaThreadNewAndRunParamsThreadToolResourcesCodeInterpreteroptional
FileIDs []stringoptional

A list of file IDs made available to the code_interpreter tool. There can be a maximum of 20 files associated with the tool.

Accepts one of the following:
ToolChoice param.Field[AssistantToolChoiceOptionUnion]optional

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

ToolResources param.Field[BetaThreadNewAndRunParamsToolResources]optional

A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.

CodeInterpreter BetaThreadNewAndRunParamsToolResourcesCodeInterpreteroptional
FileIDs []stringoptional

A list of file IDs made available to the code_interpreter tool. There can be a maximum of 20 files associated with the tool.

Tools param.Field[[]AssistantToolUnion]optional

Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.

type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TopP param.Field[float64]optional

An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or temperature but not both.

minimum0
maximum1
TruncationStrategy param.Field[BetaThreadNewAndRunParamsTruncationStrategy]optional

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const BetaThreadNewAndRunParamsTruncationStrategyTypeAuto BetaThreadNewAndRunParamsTruncationStrategyType = "auto"
const BetaThreadNewAndRunParamsTruncationStrategyTypeLastMessages BetaThreadNewAndRunParamsTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
ReturnsExpand Collapse
type Run struct{…}

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

type AssistantStreamEventUnion interface{…}

Represents an event emitted when streaming a Run.

Each event in a server-sent events stream has an event and data property:

event: thread.created
data: {"id": "thread_123", "object": "thread", ...}

We emit events whenever a new object is created, transitions to a new state, or is being streamed in parts (deltas). For example, we emit thread.run.created when a new run is created, thread.run.completed when a run completes, and so on. When an Assistant chooses to create a message during a run, we emit a thread.message.created event, a thread.message.in_progress event, many thread.message.delta events, and finally a thread.message.completed event.

We may add additional events over time, so we recommend handling unknown events gracefully in your code. See the Assistants API quickstart to learn how to integrate the Assistants API with streaming.

Accepts one of the following:
type AssistantStreamEventThreadCreated struct{…}

Occurs when a new thread is created.

Data Thread

Represents a thread that contains messages.

ID string

The identifier, which can be referenced in API endpoints.

CreatedAt int64

The Unix timestamp (in seconds) for when the thread was created.

Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object Thread

The object type, which is always thread.

ToolResources ThreadToolResources

A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs.

CodeInterpreter ThreadToolResourcesCodeInterpreteroptional
FileIDs []stringoptional

A list of file IDs made available to the code_interpreter tool. There can be a maximum of 20 files associated with the tool.

Event ThreadCreated
Enabled booloptional

Whether to enable input audio transcription.

type AssistantStreamEventThreadRunCreated struct{…}

Occurs when a new run is created.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunCreated
type AssistantStreamEventThreadRunQueued struct{…}

Occurs when a run moves to a queued status.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunQueued
type AssistantStreamEventThreadRunInProgress struct{…}

Occurs when a run moves to an in_progress status.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunInProgress
type AssistantStreamEventThreadRunRequiresAction struct{…}

Occurs when a run moves to a requires_action status.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunRequiresAction
type AssistantStreamEventThreadRunCompleted struct{…}

Occurs when a run is completed.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunCompleted
type AssistantStreamEventThreadRunIncomplete struct{…}

Occurs when a run ends with status incomplete.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunIncomplete
type AssistantStreamEventThreadRunFailed struct{…}

Occurs when a run fails.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunFailed
type AssistantStreamEventThreadRunCancelling struct{…}

Occurs when a run moves to a cancelling status.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunCancelling
type AssistantStreamEventThreadRunCancelled struct{…}

Occurs when a run is cancelled.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunCancelled
type AssistantStreamEventThreadRunExpired struct{…}

Occurs when a run expires.

Data Run

Represents an execution run on a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant used for execution of this run.

CancelledAt int64

The Unix timestamp (in seconds) for when the run was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run was completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run was created.

ExpiresAt int64

The Unix timestamp (in seconds) for when the run will expire.

FailedAt int64

The Unix timestamp (in seconds) for when the run failed.

IncompleteDetails RunIncompleteDetails

Details on why the run is incomplete. Will be null if the run is not incomplete.

Reason stringoptional

The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run.

Accepts one of the following:
const RunIncompleteDetailsReasonMaxCompletionTokens RunIncompleteDetailsReason = "max_completion_tokens"
const RunIncompleteDetailsReasonMaxPromptTokens RunIncompleteDetailsReason = "max_prompt_tokens"
Instructions string

The instructions that the assistant used for this run.

LastError RunLastError

The last error associated with this run. Will be null if there are no errors.

Code string

One of server_error, rate_limit_exceeded, or invalid_prompt.

Accepts one of the following:
const RunLastErrorCodeServerError RunLastErrorCode = "server_error"
const RunLastErrorCodeRateLimitExceeded RunLastErrorCode = "rate_limit_exceeded"
const RunLastErrorCodeInvalidPrompt RunLastErrorCode = "invalid_prompt"
Message string

A human-readable description of the error.

MaxCompletionTokens int64

The maximum number of completion tokens specified to have been used over the course of the run.

minimum256
MaxPromptTokens int64

The maximum number of prompt tokens specified to have been used over the course of the run.

minimum256
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Model string

The model that the assistant used for this run.

Object ThreadRun

The object type, which is always thread.run.

ParallelToolCalls bool

Whether to enable parallel function calling during tool use.

RequiredAction RunRequiredAction

Details on the action required to continue the run. Will be null if no action is required.

SubmitToolOutputs RunRequiredActionSubmitToolOutputs

Details on the tool outputs needed for this run to continue.

A list of the relevant tool calls.

ID string

The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the Submit tool outputs to run endpoint.

Function RequiredActionFunctionToolCallFunction

The function definition.

Arguments string

The arguments that the model expects you to pass to the function.

Name string

The name of the function.

Type Function

The type of tool call the output is required for. For now, this is always function.

Type SubmitToolOutputs

For now, this is always submit_tool_outputs.

Specifies the format that the model must output. Compatible with GPT-4o, GPT-4 Turbo, and all GPT-3.5 Turbo models since gpt-3.5-turbo-1106.

Setting to { "type": "json_schema", "json_schema": {...} } enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the Structured Outputs guide.

Setting to { "type": "json_object" } enables JSON mode, which ensures the message the model generates is valid JSON.

Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length", which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.

Accepts one of the following:
type Auto string

auto is the default value

type ResponseFormatText struct{…}

Default response format. Used to generate text responses.

Type Text

The type of response format being defined. Always text.

type ResponseFormatJSONObject struct{…}

JSON object response format. An older method of generating JSON responses. Using json_schema is recommended for models that support it. Note that the model will not generate JSON without a system or user message instructing it to do so.

Type JSONObject

The type of response format being defined. Always json_object.

type ResponseFormatJSONSchema struct{…}

JSON Schema response format. Used to generate structured JSON responses. Learn more about Structured Outputs.

JSONSchema ResponseFormatJSONSchemaJSONSchema

Structured Outputs configuration options, including a JSON Schema.

Name string

The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the response format is for, used by the model to determine how to respond in the format.

Schema map[string, any]optional

The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas here.

Strict booloptional

Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the schema field. Only a subset of JSON Schema is supported when strict is true. To learn more, read the Structured Outputs guide.

Type JSONSchema

The type of response format being defined. Always json_schema.

StartedAt int64

The Unix timestamp (in seconds) for when the run was started.

Status RunStatus

The status of the run, which can be either queued, in_progress, requires_action, cancelling, cancelled, failed, completed, incomplete, or expired.

Accepts one of the following:
const RunStatusQueued RunStatus = "queued"
const RunStatusInProgress RunStatus = "in_progress"
const RunStatusRequiresAction RunStatus = "requires_action"
const RunStatusCancelling RunStatus = "cancelling"
const RunStatusCancelled RunStatus = "cancelled"
const RunStatusFailed RunStatus = "failed"
const RunStatusCompleted RunStatus = "completed"
const RunStatusIncomplete RunStatus = "incomplete"
const RunStatusExpired RunStatus = "expired"
ThreadID string

The ID of the thread that was executed on as a part of this run.

Controls which (if any) tool is called by the model. none means the model will not call any tools and instead generates a message. auto is the default value and means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user. Specifying a particular tool like {"type": "file_search"} or {"type": "function", "function": {"name": "my_function"}} forces the model to call that tool.

Accepts one of the following:
type AssistantToolChoiceOptionAuto string

none means the model will not call any tools and instead generates a message. auto means the model can pick between generating a message or calling one or more tools. required means the model must call one or more tools before responding to the user.

Accepts one of the following:
const AssistantToolChoiceOptionAutoNone AssistantToolChoiceOptionAuto = "none"
const AssistantToolChoiceOptionAutoAuto AssistantToolChoiceOptionAuto = "auto"
const AssistantToolChoiceOptionAutoRequired AssistantToolChoiceOptionAuto = "required"
type AssistantToolChoice struct{…}

Specifies a tool the model should use. Use to force the model to call a specific tool.

Type AssistantToolChoiceType

The type of the tool. If type is function, the function name must be set

Accepts one of the following:
const AssistantToolChoiceTypeFunction AssistantToolChoiceType = "function"
const AssistantToolChoiceTypeCodeInterpreter AssistantToolChoiceType = "code_interpreter"
const AssistantToolChoiceTypeFileSearch AssistantToolChoiceType = "file_search"
Name string

The name of the function to call.

The list of tools that the assistant used for this run.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type FileSearchTool struct{…}
Type FileSearch

The type of tool being defined: file_search

Accepts one of the following:
type FunctionTool struct{…}
Name string

The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.

Description stringoptional

A description of what the function does, used by the model to choose when and how to call the function.

Parameters FunctionParametersoptional

The parameters the functions accepts, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format.

Omitting parameters defines a function with an empty parameter list.

Strict booloptional

Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.

Type Function

The type of tool being defined: function

TruncationStrategy RunTruncationStrategy

Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.

Type string

The truncation strategy to use for the thread. The default is auto. If set to last_messages, the thread will be truncated to the n most recent messages in the thread. When set to auto, messages in the middle of the thread will be dropped to fit the context length of the model, max_prompt_tokens.

Accepts one of the following:
const RunTruncationStrategyTypeAuto RunTruncationStrategyType = "auto"
const RunTruncationStrategyTypeLastMessages RunTruncationStrategyType = "last_messages"
LastMessages int64optional

The number of most recent messages from the thread when constructing the context for the run.

minimum1
Usage RunUsage

Usage statistics related to the run. This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).

CompletionTokens int64

Number of completion tokens used over the course of the run.

PromptTokens int64

Number of prompt tokens used over the course of the run.

TotalTokens int64

Total number of tokens used (prompt + completion).

Temperature float64optional

The sampling temperature used for this run. If not set, defaults to 1.

TopP float64optional

The nucleus sampling value used for this run. If not set, defaults to 1.

Event ThreadRunExpired
type AssistantStreamEventThreadRunStepCreated struct{…}

Occurs when a run step is created.

Data RunStep

Represents a step in execution of a run.

ID string

The identifier of the run step, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant associated with the run step.

CancelledAt int64

The Unix timestamp (in seconds) for when the run step was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run step completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run step was created.

ExpiredAt int64

The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired.

FailedAt int64

The Unix timestamp (in seconds) for when the run step failed.

LastError RunStepLastError

The last error associated with this run step. Will be null if there are no errors.

Code string

One of server_error or rate_limit_exceeded.

Accepts one of the following:
const RunStepLastErrorCodeServerError RunStepLastErrorCode = "server_error"
const RunStepLastErrorCodeRateLimitExceeded RunStepLastErrorCode = "rate_limit_exceeded"
Message string

A human-readable description of the error.

Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadRunStep

The object type, which is always thread.run.step.

RunID string

The ID of the run that this run step is a part of.

Status RunStepStatus

The status of the run step, which can be either in_progress, cancelled, failed, completed, or expired.

Accepts one of the following:
const RunStepStatusInProgress RunStepStatus = "in_progress"
const RunStepStatusCancelled RunStepStatus = "cancelled"
const RunStepStatusFailed RunStepStatus = "failed"
const RunStepStatusCompleted RunStepStatus = "completed"
const RunStepStatusExpired RunStepStatus = "expired"
StepDetails RunStepStepDetailsUnion

The details of the run step.

Accepts one of the following:
type MessageCreationStepDetails struct{…}

Details of the message creation by the run step.

MessageCreation MessageCreationStepDetailsMessageCreation
MessageID string

The ID of the message that was created by this run step.

Type MessageCreation

Always message_creation.

type ToolCallsStepDetails struct{…}

Details of the tool call.

ToolCalls []ToolCallUnion

An array of tool calls the run step was involved in. These can be associated with one of three types of tools: code_interpreter, file_search, or function.

Accepts one of the following:
type CodeInterpreterToolCall struct{…}

Details of the Code Interpreter tool call the run step was involved in.

ID string

The ID of the tool call.

CodeInterpreter CodeInterpreterToolCallCodeInterpreter

The Code Interpreter tool call definition.

Input string

The input to the Code Interpreter tool call.

Outputs []CodeInterpreterToolCallCodeInterpreterOutputUnion

The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (logs) or images (image). Each of these are represented by a different object type.

Accepts one of the following:
type CodeInterpreterToolCallCodeInterpreterOutputLogs struct{…}

Text output from the Code Interpreter tool call as part of a run step.

Logs string

The text output from the Code Interpreter tool call.

Type Logs

Always logs.

type CodeInterpreterToolCallCodeInterpreterOutputImage struct{…}
Image CodeInterpreterToolCallCodeInterpreterOutputImageImage
FileID string

The file ID of the image.

Type Image

Always image.

Type CodeInterpreter

The type of tool call. This is always going to be code_interpreter for this type of tool call.

type FileSearchToolCall struct{…}
ID string

The ID of the tool call object.

Accepts one of the following:
Type FileSearch

The type of tool call. This is always going to be file_search for this type of tool call.

type FunctionToolCall struct{…}
ID string

The ID of the tool call object.

Function FunctionToolCallFunction

The definition of the function that was called.

Arguments string

The arguments passed to the function.

Name string

The name of the function.

Output string

The output of the function. This will be null if the outputs have not been submitted yet.

Type Function

The type of tool call. This is always going to be function for this type of tool call.

Type ToolCalls

Always tool_calls.

ThreadID string

The ID of the thread that was run.

Type RunStepType

The type of run step, which can be either message_creation or tool_calls.

Accepts one of the following:
const RunStepTypeMessageCreation RunStepType = "message_creation"
const RunStepTypeToolCalls RunStepType = "tool_calls"
Usage RunStepUsage

Usage statistics related to the run step. This value will be null while the run step's status is in_progress.

CompletionTokens int64

Number of completion tokens used over the course of the run step.

PromptTokens int64

Number of prompt tokens used over the course of the run step.

TotalTokens int64

Total number of tokens used (prompt + completion).

Event ThreadRunStepCreated
type AssistantStreamEventThreadRunStepInProgress struct{…}

Occurs when a run step moves to an in_progress state.

Data RunStep

Represents a step in execution of a run.

ID string

The identifier of the run step, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant associated with the run step.

CancelledAt int64

The Unix timestamp (in seconds) for when the run step was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run step completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run step was created.

ExpiredAt int64

The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired.

FailedAt int64

The Unix timestamp (in seconds) for when the run step failed.

LastError RunStepLastError

The last error associated with this run step. Will be null if there are no errors.

Code string

One of server_error or rate_limit_exceeded.

Accepts one of the following:
const RunStepLastErrorCodeServerError RunStepLastErrorCode = "server_error"
const RunStepLastErrorCodeRateLimitExceeded RunStepLastErrorCode = "rate_limit_exceeded"
Message string

A human-readable description of the error.

Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadRunStep

The object type, which is always thread.run.step.

RunID string

The ID of the run that this run step is a part of.

Status RunStepStatus

The status of the run step, which can be either in_progress, cancelled, failed, completed, or expired.

Accepts one of the following:
const RunStepStatusInProgress RunStepStatus = "in_progress"
const RunStepStatusCancelled RunStepStatus = "cancelled"
const RunStepStatusFailed RunStepStatus = "failed"
const RunStepStatusCompleted RunStepStatus = "completed"
const RunStepStatusExpired RunStepStatus = "expired"
StepDetails RunStepStepDetailsUnion

The details of the run step.

Accepts one of the following:
type MessageCreationStepDetails struct{…}

Details of the message creation by the run step.

MessageCreation MessageCreationStepDetailsMessageCreation
MessageID string

The ID of the message that was created by this run step.

Type MessageCreation

Always message_creation.

type ToolCallsStepDetails struct{…}

Details of the tool call.

ToolCalls []ToolCallUnion

An array of tool calls the run step was involved in. These can be associated with one of three types of tools: code_interpreter, file_search, or function.

Accepts one of the following:
type CodeInterpreterToolCall struct{…}

Details of the Code Interpreter tool call the run step was involved in.

ID string

The ID of the tool call.

CodeInterpreter CodeInterpreterToolCallCodeInterpreter

The Code Interpreter tool call definition.

Input string

The input to the Code Interpreter tool call.

Outputs []CodeInterpreterToolCallCodeInterpreterOutputUnion

The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (logs) or images (image). Each of these are represented by a different object type.

Accepts one of the following:
type CodeInterpreterToolCallCodeInterpreterOutputLogs struct{…}

Text output from the Code Interpreter tool call as part of a run step.

Logs string

The text output from the Code Interpreter tool call.

Type Logs

Always logs.

type CodeInterpreterToolCallCodeInterpreterOutputImage struct{…}
Image CodeInterpreterToolCallCodeInterpreterOutputImageImage
FileID string

The file ID of the image.

Type Image

Always image.

Type CodeInterpreter

The type of tool call. This is always going to be code_interpreter for this type of tool call.

type FileSearchToolCall struct{…}
ID string

The ID of the tool call object.

Accepts one of the following:
Type FileSearch

The type of tool call. This is always going to be file_search for this type of tool call.

type FunctionToolCall struct{…}
ID string

The ID of the tool call object.

Function FunctionToolCallFunction

The definition of the function that was called.

Arguments string

The arguments passed to the function.

Name string

The name of the function.

Output string

The output of the function. This will be null if the outputs have not been submitted yet.

Type Function

The type of tool call. This is always going to be function for this type of tool call.

Type ToolCalls

Always tool_calls.

ThreadID string

The ID of the thread that was run.

Type RunStepType

The type of run step, which can be either message_creation or tool_calls.

Accepts one of the following:
const RunStepTypeMessageCreation RunStepType = "message_creation"
const RunStepTypeToolCalls RunStepType = "tool_calls"
Usage RunStepUsage

Usage statistics related to the run step. This value will be null while the run step's status is in_progress.

CompletionTokens int64

Number of completion tokens used over the course of the run step.

PromptTokens int64

Number of prompt tokens used over the course of the run step.

TotalTokens int64

Total number of tokens used (prompt + completion).

Event ThreadRunStepInProgress
type AssistantStreamEventThreadRunStepDelta struct{…}

Occurs when parts of a run step are being streamed.

Represents a run step delta i.e. any changed fields on a run step during streaming.

ID string

The identifier of the run step, which can be referenced in API endpoints.

The delta containing the fields that have changed on the run step.

StepDetails RunStepDeltaStepDetailsUnionoptional

The details of the run step.

Accepts one of the following:
type RunStepDeltaMessageDelta struct{…}

Details of the message creation by the run step.

Type MessageCreation

Always message_creation.

MessageCreation RunStepDeltaMessageDeltaMessageCreationoptional
MessageID stringoptional

The ID of the message that was created by this run step.

type ToolCallDeltaObject struct{…}

Details of the tool call.

Type ToolCalls

Always tool_calls.

ToolCalls []ToolCallDeltaUnionoptional

An array of tool calls the run step was involved in. These can be associated with one of three types of tools: code_interpreter, file_search, or function.

Accepts one of the following:
type CodeInterpreterToolCallDelta struct{…}

Details of the Code Interpreter tool call the run step was involved in.

Index int64

The index of the tool call in the tool calls array.

Type CodeInterpreter

The type of tool call. This is always going to be code_interpreter for this type of tool call.

ID stringoptional

The ID of the tool call.

CodeInterpreter CodeInterpreterToolCallDeltaCodeInterpreteroptional

The Code Interpreter tool call definition.

Input stringoptional

The input to the Code Interpreter tool call.

Outputs []CodeInterpreterToolCallDeltaCodeInterpreterOutputUnionoptional

The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (logs) or images (image). Each of these are represented by a different object type.

Accepts one of the following:
type CodeInterpreterLogs struct{…}

Text output from the Code Interpreter tool call as part of a run step.

Index int64

The index of the output in the outputs array.

Type Logs

Always logs.

Logs stringoptional

The text output from the Code Interpreter tool call.

type CodeInterpreterOutputImage struct{…}
Index int64

The index of the output in the outputs array.

Type Image

Always image.

Image CodeInterpreterOutputImageImageoptional
FileID stringoptional

The file ID of the image.

type FileSearchToolCallDelta struct{…}
Index int64

The index of the tool call in the tool calls array.

Type FileSearch

The type of tool call. This is always going to be file_search for this type of tool call.

ID stringoptional

The ID of the tool call object.

type FunctionToolCallDelta struct{…}
Index int64

The index of the tool call in the tool calls array.

Type Function

The type of tool call. This is always going to be function for this type of tool call.

ID stringoptional

The ID of the tool call object.

Function FunctionToolCallDeltaFunctionoptional

The definition of the function that was called.

Arguments stringoptional

The arguments passed to the function.

Name stringoptional

The name of the function.

Output stringoptional

The output of the function. This will be null if the outputs have not been submitted yet.

Object ThreadRunStepDelta

The object type, which is always thread.run.step.delta.

Event ThreadRunStepDelta
type AssistantStreamEventThreadRunStepCompleted struct{…}

Occurs when a run step is completed.

Data RunStep

Represents a step in execution of a run.

ID string

The identifier of the run step, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant associated with the run step.

CancelledAt int64

The Unix timestamp (in seconds) for when the run step was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run step completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run step was created.

ExpiredAt int64

The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired.

FailedAt int64

The Unix timestamp (in seconds) for when the run step failed.

LastError RunStepLastError

The last error associated with this run step. Will be null if there are no errors.

Code string

One of server_error or rate_limit_exceeded.

Accepts one of the following:
const RunStepLastErrorCodeServerError RunStepLastErrorCode = "server_error"
const RunStepLastErrorCodeRateLimitExceeded RunStepLastErrorCode = "rate_limit_exceeded"
Message string

A human-readable description of the error.

Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadRunStep

The object type, which is always thread.run.step.

RunID string

The ID of the run that this run step is a part of.

Status RunStepStatus

The status of the run step, which can be either in_progress, cancelled, failed, completed, or expired.

Accepts one of the following:
const RunStepStatusInProgress RunStepStatus = "in_progress"
const RunStepStatusCancelled RunStepStatus = "cancelled"
const RunStepStatusFailed RunStepStatus = "failed"
const RunStepStatusCompleted RunStepStatus = "completed"
const RunStepStatusExpired RunStepStatus = "expired"
StepDetails RunStepStepDetailsUnion

The details of the run step.

Accepts one of the following:
type MessageCreationStepDetails struct{…}

Details of the message creation by the run step.

MessageCreation MessageCreationStepDetailsMessageCreation
MessageID string

The ID of the message that was created by this run step.

Type MessageCreation

Always message_creation.

type ToolCallsStepDetails struct{…}

Details of the tool call.

ToolCalls []ToolCallUnion

An array of tool calls the run step was involved in. These can be associated with one of three types of tools: code_interpreter, file_search, or function.

Accepts one of the following:
type CodeInterpreterToolCall struct{…}

Details of the Code Interpreter tool call the run step was involved in.

ID string

The ID of the tool call.

CodeInterpreter CodeInterpreterToolCallCodeInterpreter

The Code Interpreter tool call definition.

Input string

The input to the Code Interpreter tool call.

Outputs []CodeInterpreterToolCallCodeInterpreterOutputUnion

The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (logs) or images (image). Each of these are represented by a different object type.

Accepts one of the following:
type CodeInterpreterToolCallCodeInterpreterOutputLogs struct{…}

Text output from the Code Interpreter tool call as part of a run step.

Logs string

The text output from the Code Interpreter tool call.

Type Logs

Always logs.

type CodeInterpreterToolCallCodeInterpreterOutputImage struct{…}
Image CodeInterpreterToolCallCodeInterpreterOutputImageImage
FileID string

The file ID of the image.

Type Image

Always image.

Type CodeInterpreter

The type of tool call. This is always going to be code_interpreter for this type of tool call.

type FileSearchToolCall struct{…}
ID string

The ID of the tool call object.

Accepts one of the following:
Type FileSearch

The type of tool call. This is always going to be file_search for this type of tool call.

type FunctionToolCall struct{…}
ID string

The ID of the tool call object.

Function FunctionToolCallFunction

The definition of the function that was called.

Arguments string

The arguments passed to the function.

Name string

The name of the function.

Output string

The output of the function. This will be null if the outputs have not been submitted yet.

Type Function

The type of tool call. This is always going to be function for this type of tool call.

Type ToolCalls

Always tool_calls.

ThreadID string

The ID of the thread that was run.

Type RunStepType

The type of run step, which can be either message_creation or tool_calls.

Accepts one of the following:
const RunStepTypeMessageCreation RunStepType = "message_creation"
const RunStepTypeToolCalls RunStepType = "tool_calls"
Usage RunStepUsage

Usage statistics related to the run step. This value will be null while the run step's status is in_progress.

CompletionTokens int64

Number of completion tokens used over the course of the run step.

PromptTokens int64

Number of prompt tokens used over the course of the run step.

TotalTokens int64

Total number of tokens used (prompt + completion).

Event ThreadRunStepCompleted
type AssistantStreamEventThreadRunStepFailed struct{…}

Occurs when a run step fails.

Data RunStep

Represents a step in execution of a run.

ID string

The identifier of the run step, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant associated with the run step.

CancelledAt int64

The Unix timestamp (in seconds) for when the run step was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run step completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run step was created.

ExpiredAt int64

The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired.

FailedAt int64

The Unix timestamp (in seconds) for when the run step failed.

LastError RunStepLastError

The last error associated with this run step. Will be null if there are no errors.

Code string

One of server_error or rate_limit_exceeded.

Accepts one of the following:
const RunStepLastErrorCodeServerError RunStepLastErrorCode = "server_error"
const RunStepLastErrorCodeRateLimitExceeded RunStepLastErrorCode = "rate_limit_exceeded"
Message string

A human-readable description of the error.

Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadRunStep

The object type, which is always thread.run.step.

RunID string

The ID of the run that this run step is a part of.

Status RunStepStatus

The status of the run step, which can be either in_progress, cancelled, failed, completed, or expired.

Accepts one of the following:
const RunStepStatusInProgress RunStepStatus = "in_progress"
const RunStepStatusCancelled RunStepStatus = "cancelled"
const RunStepStatusFailed RunStepStatus = "failed"
const RunStepStatusCompleted RunStepStatus = "completed"
const RunStepStatusExpired RunStepStatus = "expired"
StepDetails RunStepStepDetailsUnion

The details of the run step.

Accepts one of the following:
type MessageCreationStepDetails struct{…}

Details of the message creation by the run step.

MessageCreation MessageCreationStepDetailsMessageCreation
MessageID string

The ID of the message that was created by this run step.

Type MessageCreation

Always message_creation.

type ToolCallsStepDetails struct{…}

Details of the tool call.

ToolCalls []ToolCallUnion

An array of tool calls the run step was involved in. These can be associated with one of three types of tools: code_interpreter, file_search, or function.

Accepts one of the following:
type CodeInterpreterToolCall struct{…}

Details of the Code Interpreter tool call the run step was involved in.

ID string

The ID of the tool call.

CodeInterpreter CodeInterpreterToolCallCodeInterpreter

The Code Interpreter tool call definition.

Input string

The input to the Code Interpreter tool call.

Outputs []CodeInterpreterToolCallCodeInterpreterOutputUnion

The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (logs) or images (image). Each of these are represented by a different object type.

Accepts one of the following:
type CodeInterpreterToolCallCodeInterpreterOutputLogs struct{…}

Text output from the Code Interpreter tool call as part of a run step.

Logs string

The text output from the Code Interpreter tool call.

Type Logs

Always logs.

type CodeInterpreterToolCallCodeInterpreterOutputImage struct{…}
Image CodeInterpreterToolCallCodeInterpreterOutputImageImage
FileID string

The file ID of the image.

Type Image

Always image.

Type CodeInterpreter

The type of tool call. This is always going to be code_interpreter for this type of tool call.

type FileSearchToolCall struct{…}
ID string

The ID of the tool call object.

Accepts one of the following:
Type FileSearch

The type of tool call. This is always going to be file_search for this type of tool call.

type FunctionToolCall struct{…}
ID string

The ID of the tool call object.

Function FunctionToolCallFunction

The definition of the function that was called.

Arguments string

The arguments passed to the function.

Name string

The name of the function.

Output string

The output of the function. This will be null if the outputs have not been submitted yet.

Type Function

The type of tool call. This is always going to be function for this type of tool call.

Type ToolCalls

Always tool_calls.

ThreadID string

The ID of the thread that was run.

Type RunStepType

The type of run step, which can be either message_creation or tool_calls.

Accepts one of the following:
const RunStepTypeMessageCreation RunStepType = "message_creation"
const RunStepTypeToolCalls RunStepType = "tool_calls"
Usage RunStepUsage

Usage statistics related to the run step. This value will be null while the run step's status is in_progress.

CompletionTokens int64

Number of completion tokens used over the course of the run step.

PromptTokens int64

Number of prompt tokens used over the course of the run step.

TotalTokens int64

Total number of tokens used (prompt + completion).

Event ThreadRunStepFailed
type AssistantStreamEventThreadRunStepCancelled struct{…}

Occurs when a run step is cancelled.

Data RunStep

Represents a step in execution of a run.

ID string

The identifier of the run step, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant associated with the run step.

CancelledAt int64

The Unix timestamp (in seconds) for when the run step was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run step completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run step was created.

ExpiredAt int64

The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired.

FailedAt int64

The Unix timestamp (in seconds) for when the run step failed.

LastError RunStepLastError

The last error associated with this run step. Will be null if there are no errors.

Code string

One of server_error or rate_limit_exceeded.

Accepts one of the following:
const RunStepLastErrorCodeServerError RunStepLastErrorCode = "server_error"
const RunStepLastErrorCodeRateLimitExceeded RunStepLastErrorCode = "rate_limit_exceeded"
Message string

A human-readable description of the error.

Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadRunStep

The object type, which is always thread.run.step.

RunID string

The ID of the run that this run step is a part of.

Status RunStepStatus

The status of the run step, which can be either in_progress, cancelled, failed, completed, or expired.

Accepts one of the following:
const RunStepStatusInProgress RunStepStatus = "in_progress"
const RunStepStatusCancelled RunStepStatus = "cancelled"
const RunStepStatusFailed RunStepStatus = "failed"
const RunStepStatusCompleted RunStepStatus = "completed"
const RunStepStatusExpired RunStepStatus = "expired"
StepDetails RunStepStepDetailsUnion

The details of the run step.

Accepts one of the following:
type MessageCreationStepDetails struct{…}

Details of the message creation by the run step.

MessageCreation MessageCreationStepDetailsMessageCreation
MessageID string

The ID of the message that was created by this run step.

Type MessageCreation

Always message_creation.

type ToolCallsStepDetails struct{…}

Details of the tool call.

ToolCalls []ToolCallUnion

An array of tool calls the run step was involved in. These can be associated with one of three types of tools: code_interpreter, file_search, or function.

Accepts one of the following:
type CodeInterpreterToolCall struct{…}

Details of the Code Interpreter tool call the run step was involved in.

ID string

The ID of the tool call.

CodeInterpreter CodeInterpreterToolCallCodeInterpreter

The Code Interpreter tool call definition.

Input string

The input to the Code Interpreter tool call.

Outputs []CodeInterpreterToolCallCodeInterpreterOutputUnion

The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (logs) or images (image). Each of these are represented by a different object type.

Accepts one of the following:
type CodeInterpreterToolCallCodeInterpreterOutputLogs struct{…}

Text output from the Code Interpreter tool call as part of a run step.

Logs string

The text output from the Code Interpreter tool call.

Type Logs

Always logs.

type CodeInterpreterToolCallCodeInterpreterOutputImage struct{…}
Image CodeInterpreterToolCallCodeInterpreterOutputImageImage
FileID string

The file ID of the image.

Type Image

Always image.

Type CodeInterpreter

The type of tool call. This is always going to be code_interpreter for this type of tool call.

type FileSearchToolCall struct{…}
ID string

The ID of the tool call object.

Accepts one of the following:
Type FileSearch

The type of tool call. This is always going to be file_search for this type of tool call.

type FunctionToolCall struct{…}
ID string

The ID of the tool call object.

Function FunctionToolCallFunction

The definition of the function that was called.

Arguments string

The arguments passed to the function.

Name string

The name of the function.

Output string

The output of the function. This will be null if the outputs have not been submitted yet.

Type Function

The type of tool call. This is always going to be function for this type of tool call.

Type ToolCalls

Always tool_calls.

ThreadID string

The ID of the thread that was run.

Type RunStepType

The type of run step, which can be either message_creation or tool_calls.

Accepts one of the following:
const RunStepTypeMessageCreation RunStepType = "message_creation"
const RunStepTypeToolCalls RunStepType = "tool_calls"
Usage RunStepUsage

Usage statistics related to the run step. This value will be null while the run step's status is in_progress.

CompletionTokens int64

Number of completion tokens used over the course of the run step.

PromptTokens int64

Number of prompt tokens used over the course of the run step.

TotalTokens int64

Total number of tokens used (prompt + completion).

Event ThreadRunStepCancelled
type AssistantStreamEventThreadRunStepExpired struct{…}

Occurs when a run step expires.

Data RunStep

Represents a step in execution of a run.

ID string

The identifier of the run step, which can be referenced in API endpoints.

AssistantID string

The ID of the assistant associated with the run step.

CancelledAt int64

The Unix timestamp (in seconds) for when the run step was cancelled.

CompletedAt int64

The Unix timestamp (in seconds) for when the run step completed.

CreatedAt int64

The Unix timestamp (in seconds) for when the run step was created.

ExpiredAt int64

The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired.

FailedAt int64

The Unix timestamp (in seconds) for when the run step failed.

LastError RunStepLastError

The last error associated with this run step. Will be null if there are no errors.

Code string

One of server_error or rate_limit_exceeded.

Accepts one of the following:
const RunStepLastErrorCodeServerError RunStepLastErrorCode = "server_error"
const RunStepLastErrorCodeRateLimitExceeded RunStepLastErrorCode = "rate_limit_exceeded"
Message string

A human-readable description of the error.

Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadRunStep

The object type, which is always thread.run.step.

RunID string

The ID of the run that this run step is a part of.

Status RunStepStatus

The status of the run step, which can be either in_progress, cancelled, failed, completed, or expired.

Accepts one of the following:
const RunStepStatusInProgress RunStepStatus = "in_progress"
const RunStepStatusCancelled RunStepStatus = "cancelled"
const RunStepStatusFailed RunStepStatus = "failed"
const RunStepStatusCompleted RunStepStatus = "completed"
const RunStepStatusExpired RunStepStatus = "expired"
StepDetails RunStepStepDetailsUnion

The details of the run step.

Accepts one of the following:
type MessageCreationStepDetails struct{…}

Details of the message creation by the run step.

MessageCreation MessageCreationStepDetailsMessageCreation
MessageID string

The ID of the message that was created by this run step.

Type MessageCreation

Always message_creation.

type ToolCallsStepDetails struct{…}

Details of the tool call.

ToolCalls []ToolCallUnion

An array of tool calls the run step was involved in. These can be associated with one of three types of tools: code_interpreter, file_search, or function.

Accepts one of the following:
type CodeInterpreterToolCall struct{…}

Details of the Code Interpreter tool call the run step was involved in.

ID string

The ID of the tool call.

CodeInterpreter CodeInterpreterToolCallCodeInterpreter

The Code Interpreter tool call definition.

Input string

The input to the Code Interpreter tool call.

Outputs []CodeInterpreterToolCallCodeInterpreterOutputUnion

The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (logs) or images (image). Each of these are represented by a different object type.

Accepts one of the following:
type CodeInterpreterToolCallCodeInterpreterOutputLogs struct{…}

Text output from the Code Interpreter tool call as part of a run step.

Logs string

The text output from the Code Interpreter tool call.

Type Logs

Always logs.

type CodeInterpreterToolCallCodeInterpreterOutputImage struct{…}
Image CodeInterpreterToolCallCodeInterpreterOutputImageImage
FileID string

The file ID of the image.

Type Image

Always image.

Type CodeInterpreter

The type of tool call. This is always going to be code_interpreter for this type of tool call.

type FileSearchToolCall struct{…}
ID string

The ID of the tool call object.

Accepts one of the following:
Type FileSearch

The type of tool call. This is always going to be file_search for this type of tool call.

type FunctionToolCall struct{…}
ID string

The ID of the tool call object.

Function FunctionToolCallFunction

The definition of the function that was called.

Arguments string

The arguments passed to the function.

Name string

The name of the function.

Output string

The output of the function. This will be null if the outputs have not been submitted yet.

Type Function

The type of tool call. This is always going to be function for this type of tool call.

Type ToolCalls

Always tool_calls.

ThreadID string

The ID of the thread that was run.

Type RunStepType

The type of run step, which can be either message_creation or tool_calls.

Accepts one of the following:
const RunStepTypeMessageCreation RunStepType = "message_creation"
const RunStepTypeToolCalls RunStepType = "tool_calls"
Usage RunStepUsage

Usage statistics related to the run step. This value will be null while the run step's status is in_progress.

CompletionTokens int64

Number of completion tokens used over the course of the run step.

PromptTokens int64

Number of prompt tokens used over the course of the run step.

TotalTokens int64

Total number of tokens used (prompt + completion).

Event ThreadRunStepExpired
type AssistantStreamEventThreadMessageCreated struct{…}

Occurs when a message is created.

Data Message

Represents a message within a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

If applicable, the ID of the assistant that authored this message.

Attachments []MessageAttachment

A list of files attached to the message, and the tools they were added to.

FileID stringoptional

The ID of the file to attach to the message.

Tools []MessageAttachmentToolUnionoptional

The tools to add this file to.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type MessageAttachmentToolAssistantToolsFileSearchTypeOnly struct{…}
Type FileSearch

The type of tool being defined: file_search

CompletedAt int64

The Unix timestamp (in seconds) for when the message was completed.

The content of the message in array of text and/or images.

Accepts one of the following:
type ImageFileContentBlock struct{…}

References an image File in the content of a message.

ImageFile ImageFile
FileID string

The File ID of the image in the message content. Set purpose="vision" when uploading the File if you need to later display the file content.

Detail ImageFileDetailoptional

Specifies the detail level of the image if specified by the user. low uses fewer tokens, you can opt in to high resolution using high.

Accepts one of the following:
const ImageFileDetailAuto ImageFileDetail = "auto"
const ImageFileDetailLow ImageFileDetail = "low"
const ImageFileDetailHigh ImageFileDetail = "high"
Type ImageFile

Always image_file.

type ImageURLContentBlock struct{…}

References an image URL in the content of a message.

ImageURL ImageURL
URL string

The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.

formaturi
Detail ImageURLDetailoptional

Specifies the detail level of the image. low uses fewer tokens, you can opt in to high resolution using high. Default value is auto

Accepts one of the following:
const ImageURLDetailAuto ImageURLDetail = "auto"
const ImageURLDetailLow ImageURLDetail = "low"
const ImageURLDetailHigh ImageURLDetail = "high"
Type ImageURL

The type of the content part.

type TextContentBlock struct{…}

The text content that is part of a message.

Text Text
Annotations []AnnotationUnion
Accepts one of the following:
type FileCitationAnnotation struct{…}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

EndIndex int64
minimum0
FileCitation FileCitationAnnotationFileCitation
FileID string

The ID of the specific File the citation is from.

StartIndex int64
minimum0
Text string

The text in the message content that needs to be replaced.

Type FileCitation

Always file_citation.

type FilePathAnnotation struct{…}

A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file.

EndIndex int64
minimum0
FilePath FilePathAnnotationFilePath
FileID string

The ID of the file that was generated.

StartIndex int64
minimum0
Text string

The text in the message content that needs to be replaced.

Type FilePath

Always file_path.

Value string

The data that makes up the text.

Type Text

Always text.

type RefusalContentBlock struct{…}

The refusal content generated by the assistant.

Refusal string
Type Refusal

Always refusal.

CreatedAt int64

The Unix timestamp (in seconds) for when the message was created.

IncompleteAt int64

The Unix timestamp (in seconds) for when the message was marked as incomplete.

IncompleteDetails MessageIncompleteDetails

On an incomplete message, details about why the message is incomplete.

Reason string

The reason the message is incomplete.

Accepts one of the following:
const MessageIncompleteDetailsReasonContentFilter MessageIncompleteDetailsReason = "content_filter"
const MessageIncompleteDetailsReasonMaxTokens MessageIncompleteDetailsReason = "max_tokens"
const MessageIncompleteDetailsReasonRunCancelled MessageIncompleteDetailsReason = "run_cancelled"
const MessageIncompleteDetailsReasonRunExpired MessageIncompleteDetailsReason = "run_expired"
const MessageIncompleteDetailsReasonRunFailed MessageIncompleteDetailsReason = "run_failed"
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadMessage

The object type, which is always thread.message.

Role MessageRole

The entity that produced the message. One of user or assistant.

Accepts one of the following:
const MessageRoleUser MessageRole = "user"
const MessageRoleAssistant MessageRole = "assistant"
RunID string

The ID of the run associated with the creation of this message. Value is null when messages are created manually using the create message or create thread endpoints.

Status MessageStatus

The status of the message, which can be either in_progress, incomplete, or completed.

Accepts one of the following:
const MessageStatusInProgress MessageStatus = "in_progress"
const MessageStatusIncomplete MessageStatus = "incomplete"
const MessageStatusCompleted MessageStatus = "completed"
ThreadID string

The thread ID that this message belongs to.

Event ThreadMessageCreated
type AssistantStreamEventThreadMessageInProgress struct{…}

Occurs when a message moves to an in_progress state.

Data Message

Represents a message within a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

If applicable, the ID of the assistant that authored this message.

Attachments []MessageAttachment

A list of files attached to the message, and the tools they were added to.

FileID stringoptional

The ID of the file to attach to the message.

Tools []MessageAttachmentToolUnionoptional

The tools to add this file to.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type MessageAttachmentToolAssistantToolsFileSearchTypeOnly struct{…}
Type FileSearch

The type of tool being defined: file_search

CompletedAt int64

The Unix timestamp (in seconds) for when the message was completed.

The content of the message in array of text and/or images.

Accepts one of the following:
type ImageFileContentBlock struct{…}

References an image File in the content of a message.

ImageFile ImageFile
FileID string

The File ID of the image in the message content. Set purpose="vision" when uploading the File if you need to later display the file content.

Detail ImageFileDetailoptional

Specifies the detail level of the image if specified by the user. low uses fewer tokens, you can opt in to high resolution using high.

Accepts one of the following:
const ImageFileDetailAuto ImageFileDetail = "auto"
const ImageFileDetailLow ImageFileDetail = "low"
const ImageFileDetailHigh ImageFileDetail = "high"
Type ImageFile

Always image_file.

type ImageURLContentBlock struct{…}

References an image URL in the content of a message.

ImageURL ImageURL
URL string

The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.

formaturi
Detail ImageURLDetailoptional

Specifies the detail level of the image. low uses fewer tokens, you can opt in to high resolution using high. Default value is auto

Accepts one of the following:
const ImageURLDetailAuto ImageURLDetail = "auto"
const ImageURLDetailLow ImageURLDetail = "low"
const ImageURLDetailHigh ImageURLDetail = "high"
Type ImageURL

The type of the content part.

type TextContentBlock struct{…}

The text content that is part of a message.

Text Text
Annotations []AnnotationUnion
Accepts one of the following:
type FileCitationAnnotation struct{…}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

EndIndex int64
minimum0
FileCitation FileCitationAnnotationFileCitation
FileID string

The ID of the specific File the citation is from.

StartIndex int64
minimum0
Text string

The text in the message content that needs to be replaced.

Type FileCitation

Always file_citation.

type FilePathAnnotation struct{…}

A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file.

EndIndex int64
minimum0
FilePath FilePathAnnotationFilePath
FileID string

The ID of the file that was generated.

StartIndex int64
minimum0
Text string

The text in the message content that needs to be replaced.

Type FilePath

Always file_path.

Value string

The data that makes up the text.

Type Text

Always text.

type RefusalContentBlock struct{…}

The refusal content generated by the assistant.

Refusal string
Type Refusal

Always refusal.

CreatedAt int64

The Unix timestamp (in seconds) for when the message was created.

IncompleteAt int64

The Unix timestamp (in seconds) for when the message was marked as incomplete.

IncompleteDetails MessageIncompleteDetails

On an incomplete message, details about why the message is incomplete.

Reason string

The reason the message is incomplete.

Accepts one of the following:
const MessageIncompleteDetailsReasonContentFilter MessageIncompleteDetailsReason = "content_filter"
const MessageIncompleteDetailsReasonMaxTokens MessageIncompleteDetailsReason = "max_tokens"
const MessageIncompleteDetailsReasonRunCancelled MessageIncompleteDetailsReason = "run_cancelled"
const MessageIncompleteDetailsReasonRunExpired MessageIncompleteDetailsReason = "run_expired"
const MessageIncompleteDetailsReasonRunFailed MessageIncompleteDetailsReason = "run_failed"
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadMessage

The object type, which is always thread.message.

Role MessageRole

The entity that produced the message. One of user or assistant.

Accepts one of the following:
const MessageRoleUser MessageRole = "user"
const MessageRoleAssistant MessageRole = "assistant"
RunID string

The ID of the run associated with the creation of this message. Value is null when messages are created manually using the create message or create thread endpoints.

Status MessageStatus

The status of the message, which can be either in_progress, incomplete, or completed.

Accepts one of the following:
const MessageStatusInProgress MessageStatus = "in_progress"
const MessageStatusIncomplete MessageStatus = "incomplete"
const MessageStatusCompleted MessageStatus = "completed"
ThreadID string

The thread ID that this message belongs to.

Event ThreadMessageInProgress
type AssistantStreamEventThreadMessageDelta struct{…}

Occurs when parts of a Message are being streamed.

Represents a message delta i.e. any changed fields on a message during streaming.

ID string

The identifier of the message, which can be referenced in API endpoints.

The delta containing the fields that have changed on the Message.

Content []MessageContentDeltaUnionoptional

The content of the message in array of text and/or images.

Accepts one of the following:
type ImageFileDeltaBlock struct{…}

References an image File in the content of a message.

Index int64

The index of the content part in the message.

Type ImageFile

Always image_file.

ImageFile ImageFileDeltaoptional
Detail ImageFileDeltaDetailoptional

Specifies the detail level of the image if specified by the user. low uses fewer tokens, you can opt in to high resolution using high.

Accepts one of the following:
const ImageFileDeltaDetailAuto ImageFileDeltaDetail = "auto"
const ImageFileDeltaDetailLow ImageFileDeltaDetail = "low"
const ImageFileDeltaDetailHigh ImageFileDeltaDetail = "high"
FileID stringoptional

The File ID of the image in the message content. Set purpose="vision" when uploading the File if you need to later display the file content.

type TextDeltaBlock struct{…}

The text content that is part of a message.

Index int64

The index of the content part in the message.

Type Text

Always text.

Text TextDeltaoptional
Annotations []AnnotationDeltaUnionoptional
Accepts one of the following:
type FileCitationDeltaAnnotation struct{…}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

Index int64

The index of the annotation in the text content part.

Type FileCitation

Always file_citation.

EndIndex int64optional
minimum0
FileCitation FileCitationDeltaAnnotationFileCitationoptional
FileID stringoptional

The ID of the specific File the citation is from.

Quote stringoptional

The specific quote in the file.

StartIndex int64optional
minimum0
Text stringoptional

The text in the message content that needs to be replaced.

type FilePathDeltaAnnotation struct{…}

A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file.

Index int64

The index of the annotation in the text content part.

Type FilePath

Always file_path.

EndIndex int64optional
minimum0
FilePath FilePathDeltaAnnotationFilePathoptional
FileID stringoptional

The ID of the file that was generated.

StartIndex int64optional
minimum0
Text stringoptional

The text in the message content that needs to be replaced.

Value stringoptional

The data that makes up the text.

type RefusalDeltaBlock struct{…}

The refusal content that is part of a message.

Index int64

The index of the refusal part in the message.

Type Refusal

Always refusal.

Refusal stringoptional
type ImageURLDeltaBlock struct{…}

References an image URL in the content of a message.

Index int64

The index of the content part in the message.

Type ImageURL

Always image_url.

ImageURL ImageURLDeltaoptional
Detail ImageURLDeltaDetailoptional

Specifies the detail level of the image. low uses fewer tokens, you can opt in to high resolution using high.

Accepts one of the following:
const ImageURLDeltaDetailAuto ImageURLDeltaDetail = "auto"
const ImageURLDeltaDetailLow ImageURLDeltaDetail = "low"
const ImageURLDeltaDetailHigh ImageURLDeltaDetail = "high"
URL stringoptional

The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.

Role MessageDeltaRoleoptional

The entity that produced the message. One of user or assistant.

Accepts one of the following:
const MessageDeltaRoleUser MessageDeltaRole = "user"
const MessageDeltaRoleAssistant MessageDeltaRole = "assistant"
Object ThreadMessageDelta

The object type, which is always thread.message.delta.

Event ThreadMessageDelta
type AssistantStreamEventThreadMessageCompleted struct{…}

Occurs when a message is completed.

Data Message

Represents a message within a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

If applicable, the ID of the assistant that authored this message.

Attachments []MessageAttachment

A list of files attached to the message, and the tools they were added to.

FileID stringoptional

The ID of the file to attach to the message.

Tools []MessageAttachmentToolUnionoptional

The tools to add this file to.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type MessageAttachmentToolAssistantToolsFileSearchTypeOnly struct{…}
Type FileSearch

The type of tool being defined: file_search

CompletedAt int64

The Unix timestamp (in seconds) for when the message was completed.

The content of the message in array of text and/or images.

Accepts one of the following:
type ImageFileContentBlock struct{…}

References an image File in the content of a message.

ImageFile ImageFile
FileID string

The File ID of the image in the message content. Set purpose="vision" when uploading the File if you need to later display the file content.

Detail ImageFileDetailoptional

Specifies the detail level of the image if specified by the user. low uses fewer tokens, you can opt in to high resolution using high.

Accepts one of the following:
const ImageFileDetailAuto ImageFileDetail = "auto"
const ImageFileDetailLow ImageFileDetail = "low"
const ImageFileDetailHigh ImageFileDetail = "high"
Type ImageFile

Always image_file.

type ImageURLContentBlock struct{…}

References an image URL in the content of a message.

ImageURL ImageURL
URL string

The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.

formaturi
Detail ImageURLDetailoptional

Specifies the detail level of the image. low uses fewer tokens, you can opt in to high resolution using high. Default value is auto

Accepts one of the following:
const ImageURLDetailAuto ImageURLDetail = "auto"
const ImageURLDetailLow ImageURLDetail = "low"
const ImageURLDetailHigh ImageURLDetail = "high"
Type ImageURL

The type of the content part.

type TextContentBlock struct{…}

The text content that is part of a message.

Text Text
Annotations []AnnotationUnion
Accepts one of the following:
type FileCitationAnnotation struct{…}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

EndIndex int64
minimum0
FileCitation FileCitationAnnotationFileCitation
FileID string

The ID of the specific File the citation is from.

StartIndex int64
minimum0
Text string

The text in the message content that needs to be replaced.

Type FileCitation

Always file_citation.

type FilePathAnnotation struct{…}

A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file.

EndIndex int64
minimum0
FilePath FilePathAnnotationFilePath
FileID string

The ID of the file that was generated.

StartIndex int64
minimum0
Text string

The text in the message content that needs to be replaced.

Type FilePath

Always file_path.

Value string

The data that makes up the text.

Type Text

Always text.

type RefusalContentBlock struct{…}

The refusal content generated by the assistant.

Refusal string
Type Refusal

Always refusal.

CreatedAt int64

The Unix timestamp (in seconds) for when the message was created.

IncompleteAt int64

The Unix timestamp (in seconds) for when the message was marked as incomplete.

IncompleteDetails MessageIncompleteDetails

On an incomplete message, details about why the message is incomplete.

Reason string

The reason the message is incomplete.

Accepts one of the following:
const MessageIncompleteDetailsReasonContentFilter MessageIncompleteDetailsReason = "content_filter"
const MessageIncompleteDetailsReasonMaxTokens MessageIncompleteDetailsReason = "max_tokens"
const MessageIncompleteDetailsReasonRunCancelled MessageIncompleteDetailsReason = "run_cancelled"
const MessageIncompleteDetailsReasonRunExpired MessageIncompleteDetailsReason = "run_expired"
const MessageIncompleteDetailsReasonRunFailed MessageIncompleteDetailsReason = "run_failed"
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadMessage

The object type, which is always thread.message.

Role MessageRole

The entity that produced the message. One of user or assistant.

Accepts one of the following:
const MessageRoleUser MessageRole = "user"
const MessageRoleAssistant MessageRole = "assistant"
RunID string

The ID of the run associated with the creation of this message. Value is null when messages are created manually using the create message or create thread endpoints.

Status MessageStatus

The status of the message, which can be either in_progress, incomplete, or completed.

Accepts one of the following:
const MessageStatusInProgress MessageStatus = "in_progress"
const MessageStatusIncomplete MessageStatus = "incomplete"
const MessageStatusCompleted MessageStatus = "completed"
ThreadID string

The thread ID that this message belongs to.

Event ThreadMessageCompleted
type AssistantStreamEventThreadMessageIncomplete struct{…}

Occurs when a message ends before it is completed.

Data Message

Represents a message within a thread.

ID string

The identifier, which can be referenced in API endpoints.

AssistantID string

If applicable, the ID of the assistant that authored this message.

Attachments []MessageAttachment

A list of files attached to the message, and the tools they were added to.

FileID stringoptional

The ID of the file to attach to the message.

Tools []MessageAttachmentToolUnionoptional

The tools to add this file to.

Accepts one of the following:
type CodeInterpreterTool struct{…}
Type CodeInterpreter

The type of tool being defined: code_interpreter

type MessageAttachmentToolAssistantToolsFileSearchTypeOnly struct{…}
Type FileSearch

The type of tool being defined: file_search

CompletedAt int64

The Unix timestamp (in seconds) for when the message was completed.

The content of the message in array of text and/or images.

Accepts one of the following:
type ImageFileContentBlock struct{…}

References an image File in the content of a message.

ImageFile ImageFile
FileID string

The File ID of the image in the message content. Set purpose="vision" when uploading the File if you need to later display the file content.

Detail ImageFileDetailoptional

Specifies the detail level of the image if specified by the user. low uses fewer tokens, you can opt in to high resolution using high.

Accepts one of the following:
const ImageFileDetailAuto ImageFileDetail = "auto"
const ImageFileDetailLow ImageFileDetail = "low"
const ImageFileDetailHigh ImageFileDetail = "high"
Type ImageFile

Always image_file.

type ImageURLContentBlock struct{…}

References an image URL in the content of a message.

ImageURL ImageURL
URL string

The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.

formaturi
Detail ImageURLDetailoptional

Specifies the detail level of the image. low uses fewer tokens, you can opt in to high resolution using high. Default value is auto

Accepts one of the following:
const ImageURLDetailAuto ImageURLDetail = "auto"
const ImageURLDetailLow ImageURLDetail = "low"
const ImageURLDetailHigh ImageURLDetail = "high"
Type ImageURL

The type of the content part.

type TextContentBlock struct{…}

The text content that is part of a message.

Text Text
Annotations []AnnotationUnion
Accepts one of the following:
type FileCitationAnnotation struct{…}

A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.

EndIndex int64
minimum0
FileCitation FileCitationAnnotationFileCitation
FileID string

The ID of the specific File the citation is from.

StartIndex int64
minimum0
Text string

The text in the message content that needs to be replaced.

Type FileCitation

Always file_citation.

type FilePathAnnotation struct{…}

A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file.

EndIndex int64
minimum0
FilePath FilePathAnnotationFilePath
FileID string

The ID of the file that was generated.

StartIndex int64
minimum0
Text string

The text in the message content that needs to be replaced.

Type FilePath

Always file_path.

Value string

The data that makes up the text.

Type Text

Always text.

type RefusalContentBlock struct{…}

The refusal content generated by the assistant.

Refusal string
Type Refusal

Always refusal.

CreatedAt int64

The Unix timestamp (in seconds) for when the message was created.

IncompleteAt int64

The Unix timestamp (in seconds) for when the message was marked as incomplete.

IncompleteDetails MessageIncompleteDetails

On an incomplete message, details about why the message is incomplete.

Reason string

The reason the message is incomplete.

Accepts one of the following:
const MessageIncompleteDetailsReasonContentFilter MessageIncompleteDetailsReason = "content_filter"
const MessageIncompleteDetailsReasonMaxTokens MessageIncompleteDetailsReason = "max_tokens"
const MessageIncompleteDetailsReasonRunCancelled MessageIncompleteDetailsReason = "run_cancelled"
const MessageIncompleteDetailsReasonRunExpired MessageIncompleteDetailsReason = "run_expired"
const MessageIncompleteDetailsReasonRunFailed MessageIncompleteDetailsReason = "run_failed"
Metadata Metadata

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Object ThreadMessage

The object type, which is always thread.message.

Role MessageRole

The entity that produced the message. One of user or assistant.

Accepts one of the following:
const MessageRoleUser MessageRole = "user"
const MessageRoleAssistant MessageRole = "assistant"
RunID string

The ID of the run associated with the creation of this message. Value is null when messages are created manually using the create message or create thread endpoints.

Status MessageStatus

The status of the message, which can be either in_progress, incomplete, or completed.

Accepts one of the following:
const MessageStatusInProgress MessageStatus = "in_progress"
const MessageStatusIncomplete MessageStatus = "incomplete"
const MessageStatusCompleted MessageStatus = "completed"
ThreadID string

The thread ID that this message belongs to.

Event ThreadMessageIncomplete
type AssistantStreamEventErrorEvent struct{…}

Occurs when an error occurs. This can happen due to an internal server error or a timeout.

Code string
Message string
Param string
Type string
Event Error

Create thread and run

package main

import (
  "context"
  "fmt"

  "github.com/openai/openai-go"
  "github.com/openai/openai-go/option"
)

func main() {
  client := openai.NewClient(
    option.WithAPIKey("My API Key"),
  )
  run, err := client.Beta.Threads.NewAndRun(context.TODO(), openai.BetaThreadNewAndRunParams{
    AssistantID: "assistant_id",
  })
  if err != nil {
    panic(err.Error())
  }
  fmt.Printf("%+v\n", run.ID)
}
{
  "id": "id",
  "assistant_id": "assistant_id",
  "cancelled_at": 0,
  "completed_at": 0,
  "created_at": 0,
  "expires_at": 0,
  "failed_at": 0,
  "incomplete_details": {
    "reason": "max_completion_tokens"
  },
  "instructions": "instructions",
  "last_error": {
    "code": "server_error",
    "message": "message"
  },
  "max_completion_tokens": 256,
  "max_prompt_tokens": 256,
  "metadata": {
    "foo": "string"
  },
  "model": "model",
  "object": "thread.run",
  "parallel_tool_calls": true,
  "required_action": {
    "submit_tool_outputs": {
      "tool_calls": [
        {
          "id": "id",
          "function": {
            "arguments": "arguments",
            "name": "name"
          },
          "type": "function"
        }
      ]
    },
    "type": "submit_tool_outputs"
  },
  "response_format": "auto",
  "started_at": 0,
  "status": "queued",
  "thread_id": "thread_id",
  "tool_choice": "none",
  "tools": [
    {
      "type": "code_interpreter"
    }
  ],
  "truncation_strategy": {
    "type": "auto",
    "last_messages": 1
  },
  "usage": {
    "completion_tokens": 0,
    "prompt_tokens": 0,
    "total_tokens": 0
  },
  "temperature": 0,
  "top_p": 0
}
Returns Examples
{
  "id": "id",
  "assistant_id": "assistant_id",
  "cancelled_at": 0,
  "completed_at": 0,
  "created_at": 0,
  "expires_at": 0,
  "failed_at": 0,
  "incomplete_details": {
    "reason": "max_completion_tokens"
  },
  "instructions": "instructions",
  "last_error": {
    "code": "server_error",
    "message": "message"
  },
  "max_completion_tokens": 256,
  "max_prompt_tokens": 256,
  "metadata": {
    "foo": "string"
  },
  "model": "model",
  "object": "thread.run",
  "parallel_tool_calls": true,
  "required_action": {
    "submit_tool_outputs": {
      "tool_calls": [
        {
          "id": "id",
          "function": {
            "arguments": "arguments",
            "name": "name"
          },
          "type": "function"
        }
      ]
    },
    "type": "submit_tool_outputs"
  },
  "response_format": "auto",
  "started_at": 0,
  "status": "queued",
  "thread_id": "thread_id",
  "tool_choice": "none",
  "tools": [
    {
      "type": "code_interpreter"
    }
  ],
  "truncation_strategy": {
    "type": "auto",
    "last_messages": 1
  },
  "usage": {
    "completion_tokens": 0,
    "prompt_tokens": 0,
    "total_tokens": 0
  },
  "temperature": 0,
  "top_p": 0
}