1// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
3package openai
4
5import (
6 "context"
7 "errors"
8 "fmt"
9 "net/http"
10 "net/url"
11
12 "github.com/openai/openai-go/internal/apijson"
13 "github.com/openai/openai-go/internal/apiquery"
14 "github.com/openai/openai-go/internal/requestconfig"
15 "github.com/openai/openai-go/option"
16 "github.com/openai/openai-go/packages/pagination"
17 "github.com/openai/openai-go/packages/param"
18 "github.com/openai/openai-go/packages/respjson"
19 "github.com/openai/openai-go/packages/ssestream"
20 "github.com/openai/openai-go/shared"
21 "github.com/openai/openai-go/shared/constant"
22)
23
24// BetaThreadRunService contains methods and other services that help with
25// interacting with the openai API.
26//
27// Note, unlike clients, this service does not read variables from the environment
28// automatically. You should not instantiate this service directly, and instead use
29// the [NewBetaThreadRunService] method instead.
30//
31// Deprecated: The Assistants API is deprecated in favor of the Responses API
32type BetaThreadRunService struct {
33 Options []option.RequestOption
34 // Deprecated: The Assistants API is deprecated in favor of the Responses API
35 Steps BetaThreadRunStepService
36}
37
38// NewBetaThreadRunService generates a new service that applies the given options
39// to each request. These options are applied after the parent client's options (if
40// there is one), and before any request-specific options.
41func NewBetaThreadRunService(opts ...option.RequestOption) (r BetaThreadRunService) {
42 r = BetaThreadRunService{}
43 r.Options = opts
44 r.Steps = NewBetaThreadRunStepService(opts...)
45 return
46}
47
48// Create a run.
49//
50// Deprecated: The Assistants API is deprecated in favor of the Responses API
51func (r *BetaThreadRunService) New(ctx context.Context, threadID string, params BetaThreadRunNewParams, opts ...option.RequestOption) (res *Run, err error) {
52 opts = append(r.Options[:], opts...)
53 opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
54 if threadID == "" {
55 err = errors.New("missing required thread_id parameter")
56 return
57 }
58 path := fmt.Sprintf("threads/%s/runs", threadID)
59 err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, params, &res, opts...)
60 return
61}
62
63// Create a run.
64//
65// Deprecated: The Assistants API is deprecated in favor of the Responses API
66func (r *BetaThreadRunService) NewStreaming(ctx context.Context, threadID string, params BetaThreadRunNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[AssistantStreamEventUnion]) {
67 var (
68 raw *http.Response
69 err error
70 )
71 opts = append(r.Options[:], opts...)
72 opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2"), option.WithJSONSet("stream", true)}, opts...)
73 if threadID == "" {
74 err = errors.New("missing required thread_id parameter")
75 return
76 }
77 path := fmt.Sprintf("threads/%s/runs", threadID)
78 err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, params, &raw, opts...)
79 return ssestream.NewStream[AssistantStreamEventUnion](ssestream.NewDecoder(raw), err)
80}
81
82// Retrieves a run.
83//
84// Deprecated: The Assistants API is deprecated in favor of the Responses API
85func (r *BetaThreadRunService) Get(ctx context.Context, threadID string, runID string, opts ...option.RequestOption) (res *Run, err error) {
86 opts = append(r.Options[:], opts...)
87 opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
88 if threadID == "" {
89 err = errors.New("missing required thread_id parameter")
90 return
91 }
92 if runID == "" {
93 err = errors.New("missing required run_id parameter")
94 return
95 }
96 path := fmt.Sprintf("threads/%s/runs/%s", threadID, runID)
97 err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...)
98 return
99}
100
101// Modifies a run.
102//
103// Deprecated: The Assistants API is deprecated in favor of the Responses API
104func (r *BetaThreadRunService) Update(ctx context.Context, threadID string, runID string, body BetaThreadRunUpdateParams, opts ...option.RequestOption) (res *Run, err error) {
105 opts = append(r.Options[:], opts...)
106 opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
107 if threadID == "" {
108 err = errors.New("missing required thread_id parameter")
109 return
110 }
111 if runID == "" {
112 err = errors.New("missing required run_id parameter")
113 return
114 }
115 path := fmt.Sprintf("threads/%s/runs/%s", threadID, runID)
116 err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
117 return
118}
119
120// Returns a list of runs belonging to a thread.
121//
122// Deprecated: The Assistants API is deprecated in favor of the Responses API
123func (r *BetaThreadRunService) List(ctx context.Context, threadID string, query BetaThreadRunListParams, opts ...option.RequestOption) (res *pagination.CursorPage[Run], err error) {
124 var raw *http.Response
125 opts = append(r.Options[:], opts...)
126 opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2"), option.WithResponseInto(&raw)}, opts...)
127 if threadID == "" {
128 err = errors.New("missing required thread_id parameter")
129 return
130 }
131 path := fmt.Sprintf("threads/%s/runs", threadID)
132 cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...)
133 if err != nil {
134 return nil, err
135 }
136 err = cfg.Execute()
137 if err != nil {
138 return nil, err
139 }
140 res.SetPageConfig(cfg, raw)
141 return res, nil
142}
143
144// Returns a list of runs belonging to a thread.
145//
146// Deprecated: The Assistants API is deprecated in favor of the Responses API
147func (r *BetaThreadRunService) ListAutoPaging(ctx context.Context, threadID string, query BetaThreadRunListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[Run] {
148 return pagination.NewCursorPageAutoPager(r.List(ctx, threadID, query, opts...))
149}
150
151// Cancels a run that is `in_progress`.
152//
153// Deprecated: The Assistants API is deprecated in favor of the Responses API
154func (r *BetaThreadRunService) Cancel(ctx context.Context, threadID string, runID string, opts ...option.RequestOption) (res *Run, err error) {
155 opts = append(r.Options[:], opts...)
156 opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
157 if threadID == "" {
158 err = errors.New("missing required thread_id parameter")
159 return
160 }
161 if runID == "" {
162 err = errors.New("missing required run_id parameter")
163 return
164 }
165 path := fmt.Sprintf("threads/%s/runs/%s/cancel", threadID, runID)
166 err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...)
167 return
168}
169
170// When a run has the `status: "requires_action"` and `required_action.type` is
171// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
172// tool calls once they're all completed. All outputs must be submitted in a single
173// request.
174//
175// Deprecated: The Assistants API is deprecated in favor of the Responses API
176func (r *BetaThreadRunService) SubmitToolOutputs(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, opts ...option.RequestOption) (res *Run, err error) {
177 opts = append(r.Options[:], opts...)
178 opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
179 if threadID == "" {
180 err = errors.New("missing required thread_id parameter")
181 return
182 }
183 if runID == "" {
184 err = errors.New("missing required run_id parameter")
185 return
186 }
187 path := fmt.Sprintf("threads/%s/runs/%s/submit_tool_outputs", threadID, runID)
188 err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
189 return
190}
191
192// When a run has the `status: "requires_action"` and `required_action.type` is
193// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
194// tool calls once they're all completed. All outputs must be submitted in a single
195// request.
196//
197// Deprecated: The Assistants API is deprecated in favor of the Responses API
198func (r *BetaThreadRunService) SubmitToolOutputsStreaming(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, opts ...option.RequestOption) (stream *ssestream.Stream[AssistantStreamEventUnion]) {
199 var (
200 raw *http.Response
201 err error
202 )
203 opts = append(r.Options[:], opts...)
204 opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2"), option.WithJSONSet("stream", true)}, opts...)
205 if threadID == "" {
206 err = errors.New("missing required thread_id parameter")
207 return
208 }
209 if runID == "" {
210 err = errors.New("missing required run_id parameter")
211 return
212 }
213 path := fmt.Sprintf("threads/%s/runs/%s/submit_tool_outputs", threadID, runID)
214 err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...)
215 return ssestream.NewStream[AssistantStreamEventUnion](ssestream.NewDecoder(raw), err)
216}
217
218// Tool call objects
219type RequiredActionFunctionToolCall struct {
220 // The ID of the tool call. This ID must be referenced when you submit the tool
221 // outputs in using the
222 // [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
223 // endpoint.
224 ID string `json:"id,required"`
225 // The function definition.
226 Function RequiredActionFunctionToolCallFunction `json:"function,required"`
227 // The type of tool call the output is required for. For now, this is always
228 // `function`.
229 Type constant.Function `json:"type,required"`
230 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
231 JSON struct {
232 ID respjson.Field
233 Function respjson.Field
234 Type respjson.Field
235 ExtraFields map[string]respjson.Field
236 raw string
237 } `json:"-"`
238}
239
240// Returns the unmodified JSON received from the API
241func (r RequiredActionFunctionToolCall) RawJSON() string { return r.JSON.raw }
242func (r *RequiredActionFunctionToolCall) UnmarshalJSON(data []byte) error {
243 return apijson.UnmarshalRoot(data, r)
244}
245
246// The function definition.
247type RequiredActionFunctionToolCallFunction struct {
248 // The arguments that the model expects you to pass to the function.
249 Arguments string `json:"arguments,required"`
250 // The name of the function.
251 Name string `json:"name,required"`
252 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
253 JSON struct {
254 Arguments respjson.Field
255 Name respjson.Field
256 ExtraFields map[string]respjson.Field
257 raw string
258 } `json:"-"`
259}
260
261// Returns the unmodified JSON received from the API
262func (r RequiredActionFunctionToolCallFunction) RawJSON() string { return r.JSON.raw }
263func (r *RequiredActionFunctionToolCallFunction) UnmarshalJSON(data []byte) error {
264 return apijson.UnmarshalRoot(data, r)
265}
266
267// Represents an execution run on a
268// [thread](https://platform.openai.com/docs/api-reference/threads).
269type Run struct {
270 // The identifier, which can be referenced in API endpoints.
271 ID string `json:"id,required"`
272 // The ID of the
273 // [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
274 // execution of this run.
275 AssistantID string `json:"assistant_id,required"`
276 // The Unix timestamp (in seconds) for when the run was cancelled.
277 CancelledAt int64 `json:"cancelled_at,required"`
278 // The Unix timestamp (in seconds) for when the run was completed.
279 CompletedAt int64 `json:"completed_at,required"`
280 // The Unix timestamp (in seconds) for when the run was created.
281 CreatedAt int64 `json:"created_at,required"`
282 // The Unix timestamp (in seconds) for when the run will expire.
283 ExpiresAt int64 `json:"expires_at,required"`
284 // The Unix timestamp (in seconds) for when the run failed.
285 FailedAt int64 `json:"failed_at,required"`
286 // Details on why the run is incomplete. Will be `null` if the run is not
287 // incomplete.
288 IncompleteDetails RunIncompleteDetails `json:"incomplete_details,required"`
289 // The instructions that the
290 // [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
291 // this run.
292 Instructions string `json:"instructions,required"`
293 // The last error associated with this run. Will be `null` if there are no errors.
294 LastError RunLastError `json:"last_error,required"`
295 // The maximum number of completion tokens specified to have been used over the
296 // course of the run.
297 MaxCompletionTokens int64 `json:"max_completion_tokens,required"`
298 // The maximum number of prompt tokens specified to have been used over the course
299 // of the run.
300 MaxPromptTokens int64 `json:"max_prompt_tokens,required"`
301 // Set of 16 key-value pairs that can be attached to an object. This can be useful
302 // for storing additional information about the object in a structured format, and
303 // querying for objects via API or the dashboard.
304 //
305 // Keys are strings with a maximum length of 64 characters. Values are strings with
306 // a maximum length of 512 characters.
307 Metadata shared.Metadata `json:"metadata,required"`
308 // The model that the
309 // [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
310 // this run.
311 Model string `json:"model,required"`
312 // The object type, which is always `thread.run`.
313 Object constant.ThreadRun `json:"object,required"`
314 // Whether to enable
315 // [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
316 // during tool use.
317 ParallelToolCalls bool `json:"parallel_tool_calls,required"`
318 // Details on the action required to continue the run. Will be `null` if no action
319 // is required.
320 RequiredAction RunRequiredAction `json:"required_action,required"`
321 // Specifies the format that the model must output. Compatible with
322 // [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
323 // [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
324 // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
325 //
326 // Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
327 // Outputs which ensures the model will match your supplied JSON schema. Learn more
328 // in the
329 // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
330 //
331 // Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
332 // message the model generates is valid JSON.
333 //
334 // **Important:** when using JSON mode, you **must** also instruct the model to
335 // produce JSON yourself via a system or user message. Without this, the model may
336 // generate an unending stream of whitespace until the generation reaches the token
337 // limit, resulting in a long-running and seemingly "stuck" request. Also note that
338 // the message content may be partially cut off if `finish_reason="length"`, which
339 // indicates the generation exceeded `max_tokens` or the conversation exceeded the
340 // max context length.
341 ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,required"`
342 // The Unix timestamp (in seconds) for when the run was started.
343 StartedAt int64 `json:"started_at,required"`
344 // The status of the run, which can be either `queued`, `in_progress`,
345 // `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
346 // `incomplete`, or `expired`.
347 //
348 // Any of "queued", "in_progress", "requires_action", "cancelling", "cancelled",
349 // "failed", "completed", "incomplete", "expired".
350 Status RunStatus `json:"status,required"`
351 // The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
352 // that was executed on as a part of this run.
353 ThreadID string `json:"thread_id,required"`
354 // Controls which (if any) tool is called by the model. `none` means the model will
355 // not call any tools and instead generates a message. `auto` is the default value
356 // and means the model can pick between generating a message or calling one or more
357 // tools. `required` means the model must call one or more tools before responding
358 // to the user. Specifying a particular tool like `{"type": "file_search"}` or
359 // `{"type": "function", "function": {"name": "my_function"}}` forces the model to
360 // call that tool.
361 ToolChoice AssistantToolChoiceOptionUnion `json:"tool_choice,required"`
362 // The list of tools that the
363 // [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
364 // this run.
365 Tools []AssistantToolUnion `json:"tools,required"`
366 // Controls for how a thread will be truncated prior to the run. Use this to
367 // control the intial context window of the run.
368 TruncationStrategy RunTruncationStrategy `json:"truncation_strategy,required"`
369 // Usage statistics related to the run. This value will be `null` if the run is not
370 // in a terminal state (i.e. `in_progress`, `queued`, etc.).
371 Usage RunUsage `json:"usage,required"`
372 // The sampling temperature used for this run. If not set, defaults to 1.
373 Temperature float64 `json:"temperature,nullable"`
374 // The nucleus sampling value used for this run. If not set, defaults to 1.
375 TopP float64 `json:"top_p,nullable"`
376 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
377 JSON struct {
378 ID respjson.Field
379 AssistantID respjson.Field
380 CancelledAt respjson.Field
381 CompletedAt respjson.Field
382 CreatedAt respjson.Field
383 ExpiresAt respjson.Field
384 FailedAt respjson.Field
385 IncompleteDetails respjson.Field
386 Instructions respjson.Field
387 LastError respjson.Field
388 MaxCompletionTokens respjson.Field
389 MaxPromptTokens respjson.Field
390 Metadata respjson.Field
391 Model respjson.Field
392 Object respjson.Field
393 ParallelToolCalls respjson.Field
394 RequiredAction respjson.Field
395 ResponseFormat respjson.Field
396 StartedAt respjson.Field
397 Status respjson.Field
398 ThreadID respjson.Field
399 ToolChoice respjson.Field
400 Tools respjson.Field
401 TruncationStrategy respjson.Field
402 Usage respjson.Field
403 Temperature respjson.Field
404 TopP respjson.Field
405 ExtraFields map[string]respjson.Field
406 raw string
407 } `json:"-"`
408}
409
410// Returns the unmodified JSON received from the API
411func (r Run) RawJSON() string { return r.JSON.raw }
412func (r *Run) UnmarshalJSON(data []byte) error {
413 return apijson.UnmarshalRoot(data, r)
414}
415
416// Details on why the run is incomplete. Will be `null` if the run is not
417// incomplete.
418type RunIncompleteDetails struct {
419 // The reason why the run is incomplete. This will point to which specific token
420 // limit was reached over the course of the run.
421 //
422 // Any of "max_completion_tokens", "max_prompt_tokens".
423 Reason string `json:"reason"`
424 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
425 JSON struct {
426 Reason respjson.Field
427 ExtraFields map[string]respjson.Field
428 raw string
429 } `json:"-"`
430}
431
432// Returns the unmodified JSON received from the API
433func (r RunIncompleteDetails) RawJSON() string { return r.JSON.raw }
434func (r *RunIncompleteDetails) UnmarshalJSON(data []byte) error {
435 return apijson.UnmarshalRoot(data, r)
436}
437
438// The last error associated with this run. Will be `null` if there are no errors.
439type RunLastError struct {
440 // One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
441 //
442 // Any of "server_error", "rate_limit_exceeded", "invalid_prompt".
443 Code string `json:"code,required"`
444 // A human-readable description of the error.
445 Message string `json:"message,required"`
446 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
447 JSON struct {
448 Code respjson.Field
449 Message respjson.Field
450 ExtraFields map[string]respjson.Field
451 raw string
452 } `json:"-"`
453}
454
455// Returns the unmodified JSON received from the API
456func (r RunLastError) RawJSON() string { return r.JSON.raw }
457func (r *RunLastError) UnmarshalJSON(data []byte) error {
458 return apijson.UnmarshalRoot(data, r)
459}
460
461// Details on the action required to continue the run. Will be `null` if no action
462// is required.
463type RunRequiredAction struct {
464 // Details on the tool outputs needed for this run to continue.
465 SubmitToolOutputs RunRequiredActionSubmitToolOutputs `json:"submit_tool_outputs,required"`
466 // For now, this is always `submit_tool_outputs`.
467 Type constant.SubmitToolOutputs `json:"type,required"`
468 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
469 JSON struct {
470 SubmitToolOutputs respjson.Field
471 Type respjson.Field
472 ExtraFields map[string]respjson.Field
473 raw string
474 } `json:"-"`
475}
476
477// Returns the unmodified JSON received from the API
478func (r RunRequiredAction) RawJSON() string { return r.JSON.raw }
479func (r *RunRequiredAction) UnmarshalJSON(data []byte) error {
480 return apijson.UnmarshalRoot(data, r)
481}
482
483// Details on the tool outputs needed for this run to continue.
484type RunRequiredActionSubmitToolOutputs struct {
485 // A list of the relevant tool calls.
486 ToolCalls []RequiredActionFunctionToolCall `json:"tool_calls,required"`
487 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
488 JSON struct {
489 ToolCalls respjson.Field
490 ExtraFields map[string]respjson.Field
491 raw string
492 } `json:"-"`
493}
494
495// Returns the unmodified JSON received from the API
496func (r RunRequiredActionSubmitToolOutputs) RawJSON() string { return r.JSON.raw }
497func (r *RunRequiredActionSubmitToolOutputs) UnmarshalJSON(data []byte) error {
498 return apijson.UnmarshalRoot(data, r)
499}
500
501// Controls for how a thread will be truncated prior to the run. Use this to
502// control the intial context window of the run.
503type RunTruncationStrategy struct {
504 // The truncation strategy to use for the thread. The default is `auto`. If set to
505 // `last_messages`, the thread will be truncated to the n most recent messages in
506 // the thread. When set to `auto`, messages in the middle of the thread will be
507 // dropped to fit the context length of the model, `max_prompt_tokens`.
508 //
509 // Any of "auto", "last_messages".
510 Type string `json:"type,required"`
511 // The number of most recent messages from the thread when constructing the context
512 // for the run.
513 LastMessages int64 `json:"last_messages,nullable"`
514 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
515 JSON struct {
516 Type respjson.Field
517 LastMessages respjson.Field
518 ExtraFields map[string]respjson.Field
519 raw string
520 } `json:"-"`
521}
522
523// Returns the unmodified JSON received from the API
524func (r RunTruncationStrategy) RawJSON() string { return r.JSON.raw }
525func (r *RunTruncationStrategy) UnmarshalJSON(data []byte) error {
526 return apijson.UnmarshalRoot(data, r)
527}
528
529// Usage statistics related to the run. This value will be `null` if the run is not
530// in a terminal state (i.e. `in_progress`, `queued`, etc.).
531type RunUsage struct {
532 // Number of completion tokens used over the course of the run.
533 CompletionTokens int64 `json:"completion_tokens,required"`
534 // Number of prompt tokens used over the course of the run.
535 PromptTokens int64 `json:"prompt_tokens,required"`
536 // Total number of tokens used (prompt + completion).
537 TotalTokens int64 `json:"total_tokens,required"`
538 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
539 JSON struct {
540 CompletionTokens respjson.Field
541 PromptTokens respjson.Field
542 TotalTokens respjson.Field
543 ExtraFields map[string]respjson.Field
544 raw string
545 } `json:"-"`
546}
547
548// Returns the unmodified JSON received from the API
549func (r RunUsage) RawJSON() string { return r.JSON.raw }
550func (r *RunUsage) UnmarshalJSON(data []byte) error {
551 return apijson.UnmarshalRoot(data, r)
552}
553
554// The status of the run, which can be either `queued`, `in_progress`,
555// `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
556// `incomplete`, or `expired`.
557type RunStatus string
558
559const (
560 RunStatusQueued RunStatus = "queued"
561 RunStatusInProgress RunStatus = "in_progress"
562 RunStatusRequiresAction RunStatus = "requires_action"
563 RunStatusCancelling RunStatus = "cancelling"
564 RunStatusCancelled RunStatus = "cancelled"
565 RunStatusFailed RunStatus = "failed"
566 RunStatusCompleted RunStatus = "completed"
567 RunStatusIncomplete RunStatus = "incomplete"
568 RunStatusExpired RunStatus = "expired"
569)
570
571type BetaThreadRunNewParams struct {
572 // The ID of the
573 // [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
574 // execute this run.
575 AssistantID string `json:"assistant_id,required"`
576 // Appends additional instructions at the end of the instructions for the run. This
577 // is useful for modifying the behavior on a per-run basis without overriding other
578 // instructions.
579 AdditionalInstructions param.Opt[string] `json:"additional_instructions,omitzero"`
580 // Overrides the
581 // [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
582 // of the assistant. This is useful for modifying the behavior on a per-run basis.
583 Instructions param.Opt[string] `json:"instructions,omitzero"`
584 // The maximum number of completion tokens that may be used over the course of the
585 // run. The run will make a best effort to use only the number of completion tokens
586 // specified, across multiple turns of the run. If the run exceeds the number of
587 // completion tokens specified, the run will end with status `incomplete`. See
588 // `incomplete_details` for more info.
589 MaxCompletionTokens param.Opt[int64] `json:"max_completion_tokens,omitzero"`
590 // The maximum number of prompt tokens that may be used over the course of the run.
591 // The run will make a best effort to use only the number of prompt tokens
592 // specified, across multiple turns of the run. If the run exceeds the number of
593 // prompt tokens specified, the run will end with status `incomplete`. See
594 // `incomplete_details` for more info.
595 MaxPromptTokens param.Opt[int64] `json:"max_prompt_tokens,omitzero"`
596 // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
597 // make the output more random, while lower values like 0.2 will make it more
598 // focused and deterministic.
599 Temperature param.Opt[float64] `json:"temperature,omitzero"`
600 // An alternative to sampling with temperature, called nucleus sampling, where the
601 // model considers the results of the tokens with top_p probability mass. So 0.1
602 // means only the tokens comprising the top 10% probability mass are considered.
603 //
604 // We generally recommend altering this or temperature but not both.
605 TopP param.Opt[float64] `json:"top_p,omitzero"`
606 // Whether to enable
607 // [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
608 // during tool use.
609 ParallelToolCalls param.Opt[bool] `json:"parallel_tool_calls,omitzero"`
610 // Adds additional messages to the thread before creating the run.
611 AdditionalMessages []BetaThreadRunNewParamsAdditionalMessage `json:"additional_messages,omitzero"`
612 // Set of 16 key-value pairs that can be attached to an object. This can be useful
613 // for storing additional information about the object in a structured format, and
614 // querying for objects via API or the dashboard.
615 //
616 // Keys are strings with a maximum length of 64 characters. Values are strings with
617 // a maximum length of 512 characters.
618 Metadata shared.Metadata `json:"metadata,omitzero"`
619 // The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
620 // be used to execute this run. If a value is provided here, it will override the
621 // model associated with the assistant. If not, the model associated with the
622 // assistant will be used.
623 Model shared.ChatModel `json:"model,omitzero"`
624 // **o-series models only**
625 //
626 // Constrains effort on reasoning for
627 // [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
628 // supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
629 // result in faster responses and fewer tokens used on reasoning in a response.
630 //
631 // Any of "low", "medium", "high".
632 ReasoningEffort shared.ReasoningEffort `json:"reasoning_effort,omitzero"`
633 // Override the tools the assistant can use for this run. This is useful for
634 // modifying the behavior on a per-run basis.
635 Tools []AssistantToolUnionParam `json:"tools,omitzero"`
636 // Controls for how a thread will be truncated prior to the run. Use this to
637 // control the intial context window of the run.
638 TruncationStrategy BetaThreadRunNewParamsTruncationStrategy `json:"truncation_strategy,omitzero"`
639 // A list of additional fields to include in the response. Currently the only
640 // supported value is `step_details.tool_calls[*].file_search.results[*].content`
641 // to fetch the file search result content.
642 //
643 // See the
644 // [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
645 // for more information.
646 Include []RunStepInclude `query:"include,omitzero" json:"-"`
647 // Specifies the format that the model must output. Compatible with
648 // [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
649 // [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
650 // and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
651 //
652 // Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
653 // Outputs which ensures the model will match your supplied JSON schema. Learn more
654 // in the
655 // [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
656 //
657 // Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
658 // message the model generates is valid JSON.
659 //
660 // **Important:** when using JSON mode, you **must** also instruct the model to
661 // produce JSON yourself via a system or user message. Without this, the model may
662 // generate an unending stream of whitespace until the generation reaches the token
663 // limit, resulting in a long-running and seemingly "stuck" request. Also note that
664 // the message content may be partially cut off if `finish_reason="length"`, which
665 // indicates the generation exceeded `max_tokens` or the conversation exceeded the
666 // max context length.
667 ResponseFormat AssistantResponseFormatOptionUnionParam `json:"response_format,omitzero"`
668 // Controls which (if any) tool is called by the model. `none` means the model will
669 // not call any tools and instead generates a message. `auto` is the default value
670 // and means the model can pick between generating a message or calling one or more
671 // tools. `required` means the model must call one or more tools before responding
672 // to the user. Specifying a particular tool like `{"type": "file_search"}` or
673 // `{"type": "function", "function": {"name": "my_function"}}` forces the model to
674 // call that tool.
675 ToolChoice AssistantToolChoiceOptionUnionParam `json:"tool_choice,omitzero"`
676 paramObj
677}
678
679func (r BetaThreadRunNewParams) MarshalJSON() (data []byte, err error) {
680 type shadow BetaThreadRunNewParams
681 return param.MarshalObject(r, (*shadow)(&r))
682}
683func (r *BetaThreadRunNewParams) UnmarshalJSON(data []byte) error {
684 return apijson.UnmarshalRoot(data, r)
685}
686
687// URLQuery serializes [BetaThreadRunNewParams]'s query parameters as `url.Values`.
688func (r BetaThreadRunNewParams) URLQuery() (v url.Values, err error) {
689 return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
690 ArrayFormat: apiquery.ArrayQueryFormatBrackets,
691 NestedFormat: apiquery.NestedQueryFormatBrackets,
692 })
693}
694
695// The properties Content, Role are required.
696type BetaThreadRunNewParamsAdditionalMessage struct {
697 // The text contents of the message.
698 Content BetaThreadRunNewParamsAdditionalMessageContentUnion `json:"content,omitzero,required"`
699 // The role of the entity that is creating the message. Allowed values include:
700 //
701 // - `user`: Indicates the message is sent by an actual user and should be used in
702 // most cases to represent user-generated messages.
703 // - `assistant`: Indicates the message is generated by the assistant. Use this
704 // value to insert messages from the assistant into the conversation.
705 //
706 // Any of "user", "assistant".
707 Role string `json:"role,omitzero,required"`
708 // A list of files attached to the message, and the tools they should be added to.
709 Attachments []BetaThreadRunNewParamsAdditionalMessageAttachment `json:"attachments,omitzero"`
710 // Set of 16 key-value pairs that can be attached to an object. This can be useful
711 // for storing additional information about the object in a structured format, and
712 // querying for objects via API or the dashboard.
713 //
714 // Keys are strings with a maximum length of 64 characters. Values are strings with
715 // a maximum length of 512 characters.
716 Metadata shared.Metadata `json:"metadata,omitzero"`
717 paramObj
718}
719
720func (r BetaThreadRunNewParamsAdditionalMessage) MarshalJSON() (data []byte, err error) {
721 type shadow BetaThreadRunNewParamsAdditionalMessage
722 return param.MarshalObject(r, (*shadow)(&r))
723}
724func (r *BetaThreadRunNewParamsAdditionalMessage) UnmarshalJSON(data []byte) error {
725 return apijson.UnmarshalRoot(data, r)
726}
727
728func init() {
729 apijson.RegisterFieldValidator[BetaThreadRunNewParamsAdditionalMessage](
730 "role", "user", "assistant",
731 )
732}
733
734// Only one field can be non-zero.
735//
736// Use [param.IsOmitted] to confirm if a field is set.
737type BetaThreadRunNewParamsAdditionalMessageContentUnion struct {
738 OfString param.Opt[string] `json:",omitzero,inline"`
739 OfArrayOfContentParts []MessageContentPartParamUnion `json:",omitzero,inline"`
740 paramUnion
741}
742
743func (u BetaThreadRunNewParamsAdditionalMessageContentUnion) MarshalJSON() ([]byte, error) {
744 return param.MarshalUnion(u, u.OfString, u.OfArrayOfContentParts)
745}
746func (u *BetaThreadRunNewParamsAdditionalMessageContentUnion) UnmarshalJSON(data []byte) error {
747 return apijson.UnmarshalRoot(data, u)
748}
749
750func (u *BetaThreadRunNewParamsAdditionalMessageContentUnion) asAny() any {
751 if !param.IsOmitted(u.OfString) {
752 return &u.OfString.Value
753 } else if !param.IsOmitted(u.OfArrayOfContentParts) {
754 return &u.OfArrayOfContentParts
755 }
756 return nil
757}
758
759type BetaThreadRunNewParamsAdditionalMessageAttachment struct {
760 // The ID of the file to attach to the message.
761 FileID param.Opt[string] `json:"file_id,omitzero"`
762 // The tools to add this file to.
763 Tools []BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion `json:"tools,omitzero"`
764 paramObj
765}
766
767func (r BetaThreadRunNewParamsAdditionalMessageAttachment) MarshalJSON() (data []byte, err error) {
768 type shadow BetaThreadRunNewParamsAdditionalMessageAttachment
769 return param.MarshalObject(r, (*shadow)(&r))
770}
771func (r *BetaThreadRunNewParamsAdditionalMessageAttachment) UnmarshalJSON(data []byte) error {
772 return apijson.UnmarshalRoot(data, r)
773}
774
775// Only one field can be non-zero.
776//
777// Use [param.IsOmitted] to confirm if a field is set.
778type BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion struct {
779 OfCodeInterpreter *CodeInterpreterToolParam `json:",omitzero,inline"`
780 OfFileSearch *BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch `json:",omitzero,inline"`
781 paramUnion
782}
783
784func (u BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion) MarshalJSON() ([]byte, error) {
785 return param.MarshalUnion(u, u.OfCodeInterpreter, u.OfFileSearch)
786}
787func (u *BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion) UnmarshalJSON(data []byte) error {
788 return apijson.UnmarshalRoot(data, u)
789}
790
791func (u *BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion) asAny() any {
792 if !param.IsOmitted(u.OfCodeInterpreter) {
793 return u.OfCodeInterpreter
794 } else if !param.IsOmitted(u.OfFileSearch) {
795 return u.OfFileSearch
796 }
797 return nil
798}
799
800// Returns a pointer to the underlying variant's property, if present.
801func (u BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion) GetType() *string {
802 if vt := u.OfCodeInterpreter; vt != nil {
803 return (*string)(&vt.Type)
804 } else if vt := u.OfFileSearch; vt != nil {
805 return (*string)(&vt.Type)
806 }
807 return nil
808}
809
810func init() {
811 apijson.RegisterUnion[BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion](
812 "type",
813 apijson.Discriminator[CodeInterpreterToolParam]("code_interpreter"),
814 apijson.Discriminator[BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch]("file_search"),
815 )
816}
817
818func NewBetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch() BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch {
819 return BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch{
820 Type: "file_search",
821 }
822}
823
824// This struct has a constant value, construct it with
825// [NewBetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch].
826type BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch struct {
827 // The type of tool being defined: `file_search`
828 Type constant.FileSearch `json:"type,required"`
829 paramObj
830}
831
832func (r BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch) MarshalJSON() (data []byte, err error) {
833 type shadow BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch
834 return param.MarshalObject(r, (*shadow)(&r))
835}
836func (r *BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch) UnmarshalJSON(data []byte) error {
837 return apijson.UnmarshalRoot(data, r)
838}
839
840// Controls for how a thread will be truncated prior to the run. Use this to
841// control the intial context window of the run.
842//
843// The property Type is required.
844type BetaThreadRunNewParamsTruncationStrategy struct {
845 // The truncation strategy to use for the thread. The default is `auto`. If set to
846 // `last_messages`, the thread will be truncated to the n most recent messages in
847 // the thread. When set to `auto`, messages in the middle of the thread will be
848 // dropped to fit the context length of the model, `max_prompt_tokens`.
849 //
850 // Any of "auto", "last_messages".
851 Type string `json:"type,omitzero,required"`
852 // The number of most recent messages from the thread when constructing the context
853 // for the run.
854 LastMessages param.Opt[int64] `json:"last_messages,omitzero"`
855 paramObj
856}
857
858func (r BetaThreadRunNewParamsTruncationStrategy) MarshalJSON() (data []byte, err error) {
859 type shadow BetaThreadRunNewParamsTruncationStrategy
860 return param.MarshalObject(r, (*shadow)(&r))
861}
862func (r *BetaThreadRunNewParamsTruncationStrategy) UnmarshalJSON(data []byte) error {
863 return apijson.UnmarshalRoot(data, r)
864}
865
866func init() {
867 apijson.RegisterFieldValidator[BetaThreadRunNewParamsTruncationStrategy](
868 "type", "auto", "last_messages",
869 )
870}
871
872type BetaThreadRunUpdateParams struct {
873 // Set of 16 key-value pairs that can be attached to an object. This can be useful
874 // for storing additional information about the object in a structured format, and
875 // querying for objects via API or the dashboard.
876 //
877 // Keys are strings with a maximum length of 64 characters. Values are strings with
878 // a maximum length of 512 characters.
879 Metadata shared.Metadata `json:"metadata,omitzero"`
880 paramObj
881}
882
883func (r BetaThreadRunUpdateParams) MarshalJSON() (data []byte, err error) {
884 type shadow BetaThreadRunUpdateParams
885 return param.MarshalObject(r, (*shadow)(&r))
886}
887func (r *BetaThreadRunUpdateParams) UnmarshalJSON(data []byte) error {
888 return apijson.UnmarshalRoot(data, r)
889}
890
891type BetaThreadRunListParams struct {
892 // A cursor for use in pagination. `after` is an object ID that defines your place
893 // in the list. For instance, if you make a list request and receive 100 objects,
894 // ending with obj_foo, your subsequent call can include after=obj_foo in order to
895 // fetch the next page of the list.
896 After param.Opt[string] `query:"after,omitzero" json:"-"`
897 // A cursor for use in pagination. `before` is an object ID that defines your place
898 // in the list. For instance, if you make a list request and receive 100 objects,
899 // starting with obj_foo, your subsequent call can include before=obj_foo in order
900 // to fetch the previous page of the list.
901 Before param.Opt[string] `query:"before,omitzero" json:"-"`
902 // A limit on the number of objects to be returned. Limit can range between 1 and
903 // 100, and the default is 20.
904 Limit param.Opt[int64] `query:"limit,omitzero" json:"-"`
905 // Sort order by the `created_at` timestamp of the objects. `asc` for ascending
906 // order and `desc` for descending order.
907 //
908 // Any of "asc", "desc".
909 Order BetaThreadRunListParamsOrder `query:"order,omitzero" json:"-"`
910 paramObj
911}
912
913// URLQuery serializes [BetaThreadRunListParams]'s query parameters as
914// `url.Values`.
915func (r BetaThreadRunListParams) URLQuery() (v url.Values, err error) {
916 return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
917 ArrayFormat: apiquery.ArrayQueryFormatBrackets,
918 NestedFormat: apiquery.NestedQueryFormatBrackets,
919 })
920}
921
922// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
923// order and `desc` for descending order.
924type BetaThreadRunListParamsOrder string
925
926const (
927 BetaThreadRunListParamsOrderAsc BetaThreadRunListParamsOrder = "asc"
928 BetaThreadRunListParamsOrderDesc BetaThreadRunListParamsOrder = "desc"
929)
930
931type BetaThreadRunSubmitToolOutputsParams struct {
932 // A list of tools for which the outputs are being submitted.
933 ToolOutputs []BetaThreadRunSubmitToolOutputsParamsToolOutput `json:"tool_outputs,omitzero,required"`
934 paramObj
935}
936
937func (r BetaThreadRunSubmitToolOutputsParams) MarshalJSON() (data []byte, err error) {
938 type shadow BetaThreadRunSubmitToolOutputsParams
939 return param.MarshalObject(r, (*shadow)(&r))
940}
941func (r *BetaThreadRunSubmitToolOutputsParams) UnmarshalJSON(data []byte) error {
942 return apijson.UnmarshalRoot(data, r)
943}
944
945type BetaThreadRunSubmitToolOutputsParamsToolOutput struct {
946 // The output of the tool call to be submitted to continue the run.
947 Output param.Opt[string] `json:"output,omitzero"`
948 // The ID of the tool call in the `required_action` object within the run object
949 // the output is being submitted for.
950 ToolCallID param.Opt[string] `json:"tool_call_id,omitzero"`
951 paramObj
952}
953
954func (r BetaThreadRunSubmitToolOutputsParamsToolOutput) MarshalJSON() (data []byte, err error) {
955 type shadow BetaThreadRunSubmitToolOutputsParamsToolOutput
956 return param.MarshalObject(r, (*shadow)(&r))
957}
958func (r *BetaThreadRunSubmitToolOutputsParamsToolOutput) UnmarshalJSON(data []byte) error {
959 return apijson.UnmarshalRoot(data, r)
960}