betathreadrun.go

  1// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
  2
  3package openai
  4
  5import (
  6	"context"
  7	"errors"
  8	"fmt"
  9	"net/http"
 10	"net/url"
 11	"reflect"
 12
 13	"github.com/openai/openai-go/internal/apijson"
 14	"github.com/openai/openai-go/internal/apiquery"
 15	"github.com/openai/openai-go/internal/requestconfig"
 16	"github.com/openai/openai-go/option"
 17	"github.com/openai/openai-go/packages/pagination"
 18	"github.com/openai/openai-go/packages/param"
 19	"github.com/openai/openai-go/packages/resp"
 20	"github.com/openai/openai-go/packages/ssestream"
 21	"github.com/openai/openai-go/shared"
 22	"github.com/openai/openai-go/shared/constant"
 23	"github.com/tidwall/gjson"
 24)
 25
 26// BetaThreadRunService contains methods and other services that help with
 27// interacting with the openai API.
 28//
 29// Note, unlike clients, this service does not read variables from the environment
 30// automatically. You should not instantiate this service directly, and instead use
 31// the [NewBetaThreadRunService] method instead.
 32type BetaThreadRunService struct {
 33	Options []option.RequestOption
 34	Steps   BetaThreadRunStepService
 35}
 36
 37// NewBetaThreadRunService generates a new service that applies the given options
 38// to each request. These options are applied after the parent client's options (if
 39// there is one), and before any request-specific options.
 40func NewBetaThreadRunService(opts ...option.RequestOption) (r BetaThreadRunService) {
 41	r = BetaThreadRunService{}
 42	r.Options = opts
 43	r.Steps = NewBetaThreadRunStepService(opts...)
 44	return
 45}
 46
 47// Create a run.
 48func (r *BetaThreadRunService) New(ctx context.Context, threadID string, params BetaThreadRunNewParams, opts ...option.RequestOption) (res *Run, err error) {
 49	opts = append(r.Options[:], opts...)
 50	opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
 51	if threadID == "" {
 52		err = errors.New("missing required thread_id parameter")
 53		return
 54	}
 55	path := fmt.Sprintf("threads/%s/runs", threadID)
 56	err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, params, &res, opts...)
 57	return
 58}
 59
 60// Create a run and poll until task is completed.
 61// Pass 0 to pollIntervalMs to use the default polling interval.
 62func (r *BetaThreadRunService) NewAndPoll(ctx context.Context, threadID string, params BetaThreadRunNewParams, pollIntervalMs int, opts ...option.RequestOption) (res *Run, err error) {
 63	run, err := r.New(ctx, threadID, params, opts...)
 64	if err != nil {
 65		return nil, err
 66	}
 67	return r.PollStatus(ctx, threadID, run.ID, pollIntervalMs, opts...)
 68}
 69
 70// Create a run.
 71func (r *BetaThreadRunService) NewStreaming(ctx context.Context, threadID string, params BetaThreadRunNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[AssistantStreamEventUnion]) {
 72	var (
 73		raw *http.Response
 74		err error
 75	)
 76	opts = append(r.Options[:], opts...)
 77	opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2"), option.WithJSONSet("stream", true)}, opts...)
 78	if threadID == "" {
 79		err = errors.New("missing required thread_id parameter")
 80		return
 81	}
 82	path := fmt.Sprintf("threads/%s/runs", threadID)
 83	err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, params, &raw, opts...)
 84	return ssestream.NewStream[AssistantStreamEventUnion](ssestream.NewDecoder(raw), err)
 85}
 86
 87// Retrieves a run.
 88func (r *BetaThreadRunService) Get(ctx context.Context, threadID string, runID string, opts ...option.RequestOption) (res *Run, err error) {
 89	opts = append(r.Options[:], opts...)
 90	opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
 91	if threadID == "" {
 92		err = errors.New("missing required thread_id parameter")
 93		return
 94	}
 95	if runID == "" {
 96		err = errors.New("missing required run_id parameter")
 97		return
 98	}
 99	path := fmt.Sprintf("threads/%s/runs/%s", threadID, runID)
100	err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...)
101	return
102}
103
104// Modifies a run.
105func (r *BetaThreadRunService) Update(ctx context.Context, threadID string, runID string, body BetaThreadRunUpdateParams, opts ...option.RequestOption) (res *Run, err error) {
106	opts = append(r.Options[:], opts...)
107	opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
108	if threadID == "" {
109		err = errors.New("missing required thread_id parameter")
110		return
111	}
112	if runID == "" {
113		err = errors.New("missing required run_id parameter")
114		return
115	}
116	path := fmt.Sprintf("threads/%s/runs/%s", threadID, runID)
117	err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
118	return
119}
120
121// Returns a list of runs belonging to a thread.
122func (r *BetaThreadRunService) List(ctx context.Context, threadID string, query BetaThreadRunListParams, opts ...option.RequestOption) (res *pagination.CursorPage[Run], err error) {
123	var raw *http.Response
124	opts = append(r.Options[:], opts...)
125	opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2"), option.WithResponseInto(&raw)}, opts...)
126	if threadID == "" {
127		err = errors.New("missing required thread_id parameter")
128		return
129	}
130	path := fmt.Sprintf("threads/%s/runs", threadID)
131	cfg, err := requestconfig.NewRequestConfig(ctx, http.MethodGet, path, query, &res, opts...)
132	if err != nil {
133		return nil, err
134	}
135	err = cfg.Execute()
136	if err != nil {
137		return nil, err
138	}
139	res.SetPageConfig(cfg, raw)
140	return res, nil
141}
142
143// Returns a list of runs belonging to a thread.
144func (r *BetaThreadRunService) ListAutoPaging(ctx context.Context, threadID string, query BetaThreadRunListParams, opts ...option.RequestOption) *pagination.CursorPageAutoPager[Run] {
145	return pagination.NewCursorPageAutoPager(r.List(ctx, threadID, query, opts...))
146}
147
148// Cancels a run that is `in_progress`.
149func (r *BetaThreadRunService) Cancel(ctx context.Context, threadID string, runID string, opts ...option.RequestOption) (res *Run, err error) {
150	opts = append(r.Options[:], opts...)
151	opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
152	if threadID == "" {
153		err = errors.New("missing required thread_id parameter")
154		return
155	}
156	if runID == "" {
157		err = errors.New("missing required run_id parameter")
158		return
159	}
160	path := fmt.Sprintf("threads/%s/runs/%s/cancel", threadID, runID)
161	err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...)
162	return
163}
164
165// When a run has the `status: "requires_action"` and `required_action.type` is
166// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
167// tool calls once they're all completed. All outputs must be submitted in a single
168// request.
169func (r *BetaThreadRunService) SubmitToolOutputs(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, opts ...option.RequestOption) (res *Run, err error) {
170	opts = append(r.Options[:], opts...)
171	opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2")}, opts...)
172	if threadID == "" {
173		err = errors.New("missing required thread_id parameter")
174		return
175	}
176	if runID == "" {
177		err = errors.New("missing required run_id parameter")
178		return
179	}
180	path := fmt.Sprintf("threads/%s/runs/%s/submit_tool_outputs", threadID, runID)
181	err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
182	return
183}
184
185// A helper to submit a tool output to a run and poll for a terminal run state.
186// Pass 0 to pollIntervalMs to use the default polling interval.
187// More information on Run lifecycles can be found here:
188// https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
189func (r *BetaThreadRunService) SubmitToolOutputsAndPoll(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, pollIntervalMs int, opts ...option.RequestOption) (*Run, error) {
190	run, err := r.SubmitToolOutputs(ctx, threadID, runID, body, opts...)
191	if err != nil {
192		return nil, err
193	}
194	return r.PollStatus(ctx, threadID, run.ID, pollIntervalMs, opts...)
195}
196
197// When a run has the `status: "requires_action"` and `required_action.type` is
198// `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
199// tool calls once they're all completed. All outputs must be submitted in a single
200// request.
201func (r *BetaThreadRunService) SubmitToolOutputsStreaming(ctx context.Context, threadID string, runID string, body BetaThreadRunSubmitToolOutputsParams, opts ...option.RequestOption) (stream *ssestream.Stream[AssistantStreamEventUnion]) {
202	var (
203		raw *http.Response
204		err error
205	)
206	opts = append(r.Options[:], opts...)
207	opts = append([]option.RequestOption{option.WithHeader("OpenAI-Beta", "assistants=v2"), option.WithJSONSet("stream", true)}, opts...)
208	if threadID == "" {
209		err = errors.New("missing required thread_id parameter")
210		return
211	}
212	if runID == "" {
213		err = errors.New("missing required run_id parameter")
214		return
215	}
216	path := fmt.Sprintf("threads/%s/runs/%s/submit_tool_outputs", threadID, runID)
217	err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...)
218	return ssestream.NewStream[AssistantStreamEventUnion](ssestream.NewDecoder(raw), err)
219}
220
221// Tool call objects
222type RequiredActionFunctionToolCall struct {
223	// The ID of the tool call. This ID must be referenced when you submit the tool
224	// outputs in using the
225	// [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
226	// endpoint.
227	ID string `json:"id,required"`
228	// The function definition.
229	Function RequiredActionFunctionToolCallFunction `json:"function,required"`
230	// The type of tool call the output is required for. For now, this is always
231	// `function`.
232	Type constant.Function `json:"type,required"`
233	// Metadata for the response, check the presence of optional fields with the
234	// [resp.Field.IsPresent] method.
235	JSON struct {
236		ID          resp.Field
237		Function    resp.Field
238		Type        resp.Field
239		ExtraFields map[string]resp.Field
240		raw         string
241	} `json:"-"`
242}
243
244// Returns the unmodified JSON received from the API
245func (r RequiredActionFunctionToolCall) RawJSON() string { return r.JSON.raw }
246func (r *RequiredActionFunctionToolCall) UnmarshalJSON(data []byte) error {
247	return apijson.UnmarshalRoot(data, r)
248}
249
250// The function definition.
251type RequiredActionFunctionToolCallFunction struct {
252	// The arguments that the model expects you to pass to the function.
253	Arguments string `json:"arguments,required"`
254	// The name of the function.
255	Name string `json:"name,required"`
256	// Metadata for the response, check the presence of optional fields with the
257	// [resp.Field.IsPresent] method.
258	JSON struct {
259		Arguments   resp.Field
260		Name        resp.Field
261		ExtraFields map[string]resp.Field
262		raw         string
263	} `json:"-"`
264}
265
266// Returns the unmodified JSON received from the API
267func (r RequiredActionFunctionToolCallFunction) RawJSON() string { return r.JSON.raw }
268func (r *RequiredActionFunctionToolCallFunction) UnmarshalJSON(data []byte) error {
269	return apijson.UnmarshalRoot(data, r)
270}
271
272// Represents an execution run on a
273// [thread](https://platform.openai.com/docs/api-reference/threads).
274type Run struct {
275	// The identifier, which can be referenced in API endpoints.
276	ID string `json:"id,required"`
277	// The ID of the
278	// [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
279	// execution of this run.
280	AssistantID string `json:"assistant_id,required"`
281	// The Unix timestamp (in seconds) for when the run was cancelled.
282	CancelledAt int64 `json:"cancelled_at,required"`
283	// The Unix timestamp (in seconds) for when the run was completed.
284	CompletedAt int64 `json:"completed_at,required"`
285	// The Unix timestamp (in seconds) for when the run was created.
286	CreatedAt int64 `json:"created_at,required"`
287	// The Unix timestamp (in seconds) for when the run will expire.
288	ExpiresAt int64 `json:"expires_at,required"`
289	// The Unix timestamp (in seconds) for when the run failed.
290	FailedAt int64 `json:"failed_at,required"`
291	// Details on why the run is incomplete. Will be `null` if the run is not
292	// incomplete.
293	IncompleteDetails RunIncompleteDetails `json:"incomplete_details,required"`
294	// The instructions that the
295	// [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
296	// this run.
297	Instructions string `json:"instructions,required"`
298	// The last error associated with this run. Will be `null` if there are no errors.
299	LastError RunLastError `json:"last_error,required"`
300	// The maximum number of completion tokens specified to have been used over the
301	// course of the run.
302	MaxCompletionTokens int64 `json:"max_completion_tokens,required"`
303	// The maximum number of prompt tokens specified to have been used over the course
304	// of the run.
305	MaxPromptTokens int64 `json:"max_prompt_tokens,required"`
306	// Set of 16 key-value pairs that can be attached to an object. This can be useful
307	// for storing additional information about the object in a structured format, and
308	// querying for objects via API or the dashboard.
309	//
310	// Keys are strings with a maximum length of 64 characters. Values are strings with
311	// a maximum length of 512 characters.
312	Metadata shared.Metadata `json:"metadata,required"`
313	// The model that the
314	// [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
315	// this run.
316	Model string `json:"model,required"`
317	// The object type, which is always `thread.run`.
318	Object constant.ThreadRun `json:"object,required"`
319	// Whether to enable
320	// [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
321	// during tool use.
322	ParallelToolCalls bool `json:"parallel_tool_calls,required"`
323	// Details on the action required to continue the run. Will be `null` if no action
324	// is required.
325	RequiredAction RunRequiredAction `json:"required_action,required"`
326	// Specifies the format that the model must output. Compatible with
327	// [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
328	// [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
329	// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
330	//
331	// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
332	// Outputs which ensures the model will match your supplied JSON schema. Learn more
333	// in the
334	// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
335	//
336	// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
337	// message the model generates is valid JSON.
338	//
339	// **Important:** when using JSON mode, you **must** also instruct the model to
340	// produce JSON yourself via a system or user message. Without this, the model may
341	// generate an unending stream of whitespace until the generation reaches the token
342	// limit, resulting in a long-running and seemingly "stuck" request. Also note that
343	// the message content may be partially cut off if `finish_reason="length"`, which
344	// indicates the generation exceeded `max_tokens` or the conversation exceeded the
345	// max context length.
346	ResponseFormat AssistantResponseFormatOptionUnion `json:"response_format,required"`
347	// The Unix timestamp (in seconds) for when the run was started.
348	StartedAt int64 `json:"started_at,required"`
349	// The status of the run, which can be either `queued`, `in_progress`,
350	// `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
351	// `incomplete`, or `expired`.
352	//
353	// Any of "queued", "in_progress", "requires_action", "cancelling", "cancelled",
354	// "failed", "completed", "incomplete", "expired".
355	Status RunStatus `json:"status,required"`
356	// The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
357	// that was executed on as a part of this run.
358	ThreadID string `json:"thread_id,required"`
359	// Controls which (if any) tool is called by the model. `none` means the model will
360	// not call any tools and instead generates a message. `auto` is the default value
361	// and means the model can pick between generating a message or calling one or more
362	// tools. `required` means the model must call one or more tools before responding
363	// to the user. Specifying a particular tool like `{"type": "file_search"}` or
364	// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
365	// call that tool.
366	ToolChoice AssistantToolChoiceOptionUnion `json:"tool_choice,required"`
367	// The list of tools that the
368	// [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
369	// this run.
370	Tools []AssistantToolUnion `json:"tools,required"`
371	// Controls for how a thread will be truncated prior to the run. Use this to
372	// control the intial context window of the run.
373	TruncationStrategy RunTruncationStrategy `json:"truncation_strategy,required"`
374	// Usage statistics related to the run. This value will be `null` if the run is not
375	// in a terminal state (i.e. `in_progress`, `queued`, etc.).
376	Usage RunUsage `json:"usage,required"`
377	// The sampling temperature used for this run. If not set, defaults to 1.
378	Temperature float64 `json:"temperature,nullable"`
379	// The nucleus sampling value used for this run. If not set, defaults to 1.
380	TopP float64 `json:"top_p,nullable"`
381	// Metadata for the response, check the presence of optional fields with the
382	// [resp.Field.IsPresent] method.
383	JSON struct {
384		ID                  resp.Field
385		AssistantID         resp.Field
386		CancelledAt         resp.Field
387		CompletedAt         resp.Field
388		CreatedAt           resp.Field
389		ExpiresAt           resp.Field
390		FailedAt            resp.Field
391		IncompleteDetails   resp.Field
392		Instructions        resp.Field
393		LastError           resp.Field
394		MaxCompletionTokens resp.Field
395		MaxPromptTokens     resp.Field
396		Metadata            resp.Field
397		Model               resp.Field
398		Object              resp.Field
399		ParallelToolCalls   resp.Field
400		RequiredAction      resp.Field
401		ResponseFormat      resp.Field
402		StartedAt           resp.Field
403		Status              resp.Field
404		ThreadID            resp.Field
405		ToolChoice          resp.Field
406		Tools               resp.Field
407		TruncationStrategy  resp.Field
408		Usage               resp.Field
409		Temperature         resp.Field
410		TopP                resp.Field
411		ExtraFields         map[string]resp.Field
412		raw                 string
413	} `json:"-"`
414}
415
416// Returns the unmodified JSON received from the API
417func (r Run) RawJSON() string { return r.JSON.raw }
418func (r *Run) UnmarshalJSON(data []byte) error {
419	return apijson.UnmarshalRoot(data, r)
420}
421
422// Details on why the run is incomplete. Will be `null` if the run is not
423// incomplete.
424type RunIncompleteDetails struct {
425	// The reason why the run is incomplete. This will point to which specific token
426	// limit was reached over the course of the run.
427	//
428	// Any of "max_completion_tokens", "max_prompt_tokens".
429	Reason string `json:"reason"`
430	// Metadata for the response, check the presence of optional fields with the
431	// [resp.Field.IsPresent] method.
432	JSON struct {
433		Reason      resp.Field
434		ExtraFields map[string]resp.Field
435		raw         string
436	} `json:"-"`
437}
438
439// Returns the unmodified JSON received from the API
440func (r RunIncompleteDetails) RawJSON() string { return r.JSON.raw }
441func (r *RunIncompleteDetails) UnmarshalJSON(data []byte) error {
442	return apijson.UnmarshalRoot(data, r)
443}
444
445// The last error associated with this run. Will be `null` if there are no errors.
446type RunLastError struct {
447	// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
448	//
449	// Any of "server_error", "rate_limit_exceeded", "invalid_prompt".
450	Code string `json:"code,required"`
451	// A human-readable description of the error.
452	Message string `json:"message,required"`
453	// Metadata for the response, check the presence of optional fields with the
454	// [resp.Field.IsPresent] method.
455	JSON struct {
456		Code        resp.Field
457		Message     resp.Field
458		ExtraFields map[string]resp.Field
459		raw         string
460	} `json:"-"`
461}
462
463// Returns the unmodified JSON received from the API
464func (r RunLastError) RawJSON() string { return r.JSON.raw }
465func (r *RunLastError) UnmarshalJSON(data []byte) error {
466	return apijson.UnmarshalRoot(data, r)
467}
468
469// Details on the action required to continue the run. Will be `null` if no action
470// is required.
471type RunRequiredAction struct {
472	// Details on the tool outputs needed for this run to continue.
473	SubmitToolOutputs RunRequiredActionSubmitToolOutputs `json:"submit_tool_outputs,required"`
474	// For now, this is always `submit_tool_outputs`.
475	Type constant.SubmitToolOutputs `json:"type,required"`
476	// Metadata for the response, check the presence of optional fields with the
477	// [resp.Field.IsPresent] method.
478	JSON struct {
479		SubmitToolOutputs resp.Field
480		Type              resp.Field
481		ExtraFields       map[string]resp.Field
482		raw               string
483	} `json:"-"`
484}
485
486// Returns the unmodified JSON received from the API
487func (r RunRequiredAction) RawJSON() string { return r.JSON.raw }
488func (r *RunRequiredAction) UnmarshalJSON(data []byte) error {
489	return apijson.UnmarshalRoot(data, r)
490}
491
492// Details on the tool outputs needed for this run to continue.
493type RunRequiredActionSubmitToolOutputs struct {
494	// A list of the relevant tool calls.
495	ToolCalls []RequiredActionFunctionToolCall `json:"tool_calls,required"`
496	// Metadata for the response, check the presence of optional fields with the
497	// [resp.Field.IsPresent] method.
498	JSON struct {
499		ToolCalls   resp.Field
500		ExtraFields map[string]resp.Field
501		raw         string
502	} `json:"-"`
503}
504
505// Returns the unmodified JSON received from the API
506func (r RunRequiredActionSubmitToolOutputs) RawJSON() string { return r.JSON.raw }
507func (r *RunRequiredActionSubmitToolOutputs) UnmarshalJSON(data []byte) error {
508	return apijson.UnmarshalRoot(data, r)
509}
510
511// Controls for how a thread will be truncated prior to the run. Use this to
512// control the intial context window of the run.
513type RunTruncationStrategy struct {
514	// The truncation strategy to use for the thread. The default is `auto`. If set to
515	// `last_messages`, the thread will be truncated to the n most recent messages in
516	// the thread. When set to `auto`, messages in the middle of the thread will be
517	// dropped to fit the context length of the model, `max_prompt_tokens`.
518	//
519	// Any of "auto", "last_messages".
520	Type string `json:"type,required"`
521	// The number of most recent messages from the thread when constructing the context
522	// for the run.
523	LastMessages int64 `json:"last_messages,nullable"`
524	// Metadata for the response, check the presence of optional fields with the
525	// [resp.Field.IsPresent] method.
526	JSON struct {
527		Type         resp.Field
528		LastMessages resp.Field
529		ExtraFields  map[string]resp.Field
530		raw          string
531	} `json:"-"`
532}
533
534// Returns the unmodified JSON received from the API
535func (r RunTruncationStrategy) RawJSON() string { return r.JSON.raw }
536func (r *RunTruncationStrategy) UnmarshalJSON(data []byte) error {
537	return apijson.UnmarshalRoot(data, r)
538}
539
540// Usage statistics related to the run. This value will be `null` if the run is not
541// in a terminal state (i.e. `in_progress`, `queued`, etc.).
542type RunUsage struct {
543	// Number of completion tokens used over the course of the run.
544	CompletionTokens int64 `json:"completion_tokens,required"`
545	// Number of prompt tokens used over the course of the run.
546	PromptTokens int64 `json:"prompt_tokens,required"`
547	// Total number of tokens used (prompt + completion).
548	TotalTokens int64 `json:"total_tokens,required"`
549	// Metadata for the response, check the presence of optional fields with the
550	// [resp.Field.IsPresent] method.
551	JSON struct {
552		CompletionTokens resp.Field
553		PromptTokens     resp.Field
554		TotalTokens      resp.Field
555		ExtraFields      map[string]resp.Field
556		raw              string
557	} `json:"-"`
558}
559
560// Returns the unmodified JSON received from the API
561func (r RunUsage) RawJSON() string { return r.JSON.raw }
562func (r *RunUsage) UnmarshalJSON(data []byte) error {
563	return apijson.UnmarshalRoot(data, r)
564}
565
566// The status of the run, which can be either `queued`, `in_progress`,
567// `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
568// `incomplete`, or `expired`.
569type RunStatus string
570
571const (
572	RunStatusQueued         RunStatus = "queued"
573	RunStatusInProgress     RunStatus = "in_progress"
574	RunStatusRequiresAction RunStatus = "requires_action"
575	RunStatusCancelling     RunStatus = "cancelling"
576	RunStatusCancelled      RunStatus = "cancelled"
577	RunStatusFailed         RunStatus = "failed"
578	RunStatusCompleted      RunStatus = "completed"
579	RunStatusIncomplete     RunStatus = "incomplete"
580	RunStatusExpired        RunStatus = "expired"
581)
582
583type BetaThreadRunNewParams struct {
584	// The ID of the
585	// [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
586	// execute this run.
587	AssistantID string `json:"assistant_id,required"`
588	// Appends additional instructions at the end of the instructions for the run. This
589	// is useful for modifying the behavior on a per-run basis without overriding other
590	// instructions.
591	AdditionalInstructions param.Opt[string] `json:"additional_instructions,omitzero"`
592	// Overrides the
593	// [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
594	// of the assistant. This is useful for modifying the behavior on a per-run basis.
595	Instructions param.Opt[string] `json:"instructions,omitzero"`
596	// The maximum number of completion tokens that may be used over the course of the
597	// run. The run will make a best effort to use only the number of completion tokens
598	// specified, across multiple turns of the run. If the run exceeds the number of
599	// completion tokens specified, the run will end with status `incomplete`. See
600	// `incomplete_details` for more info.
601	MaxCompletionTokens param.Opt[int64] `json:"max_completion_tokens,omitzero"`
602	// The maximum number of prompt tokens that may be used over the course of the run.
603	// The run will make a best effort to use only the number of prompt tokens
604	// specified, across multiple turns of the run. If the run exceeds the number of
605	// prompt tokens specified, the run will end with status `incomplete`. See
606	// `incomplete_details` for more info.
607	MaxPromptTokens param.Opt[int64] `json:"max_prompt_tokens,omitzero"`
608	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
609	// make the output more random, while lower values like 0.2 will make it more
610	// focused and deterministic.
611	Temperature param.Opt[float64] `json:"temperature,omitzero"`
612	// An alternative to sampling with temperature, called nucleus sampling, where the
613	// model considers the results of the tokens with top_p probability mass. So 0.1
614	// means only the tokens comprising the top 10% probability mass are considered.
615	//
616	// We generally recommend altering this or temperature but not both.
617	TopP param.Opt[float64] `json:"top_p,omitzero"`
618	// Whether to enable
619	// [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
620	// during tool use.
621	ParallelToolCalls param.Opt[bool] `json:"parallel_tool_calls,omitzero"`
622	// Adds additional messages to the thread before creating the run.
623	AdditionalMessages []BetaThreadRunNewParamsAdditionalMessage `json:"additional_messages,omitzero"`
624	// Set of 16 key-value pairs that can be attached to an object. This can be useful
625	// for storing additional information about the object in a structured format, and
626	// querying for objects via API or the dashboard.
627	//
628	// Keys are strings with a maximum length of 64 characters. Values are strings with
629	// a maximum length of 512 characters.
630	Metadata shared.MetadataParam `json:"metadata,omitzero"`
631	// The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
632	// be used to execute this run. If a value is provided here, it will override the
633	// model associated with the assistant. If not, the model associated with the
634	// assistant will be used.
635	Model shared.ChatModel `json:"model,omitzero"`
636	// **o-series models only**
637	//
638	// Constrains effort on reasoning for
639	// [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
640	// supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
641	// result in faster responses and fewer tokens used on reasoning in a response.
642	//
643	// Any of "low", "medium", "high".
644	ReasoningEffort shared.ReasoningEffort `json:"reasoning_effort,omitzero"`
645	// Override the tools the assistant can use for this run. This is useful for
646	// modifying the behavior on a per-run basis.
647	Tools []AssistantToolUnionParam `json:"tools,omitzero"`
648	// Controls for how a thread will be truncated prior to the run. Use this to
649	// control the intial context window of the run.
650	TruncationStrategy BetaThreadRunNewParamsTruncationStrategy `json:"truncation_strategy,omitzero"`
651	// A list of additional fields to include in the response. Currently the only
652	// supported value is `step_details.tool_calls[*].file_search.results[*].content`
653	// to fetch the file search result content.
654	//
655	// See the
656	// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
657	// for more information.
658	Include []RunStepInclude `query:"include,omitzero" json:"-"`
659	// Specifies the format that the model must output. Compatible with
660	// [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
661	// [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
662	// and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
663	//
664	// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
665	// Outputs which ensures the model will match your supplied JSON schema. Learn more
666	// in the
667	// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
668	//
669	// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
670	// message the model generates is valid JSON.
671	//
672	// **Important:** when using JSON mode, you **must** also instruct the model to
673	// produce JSON yourself via a system or user message. Without this, the model may
674	// generate an unending stream of whitespace until the generation reaches the token
675	// limit, resulting in a long-running and seemingly "stuck" request. Also note that
676	// the message content may be partially cut off if `finish_reason="length"`, which
677	// indicates the generation exceeded `max_tokens` or the conversation exceeded the
678	// max context length.
679	ResponseFormat AssistantResponseFormatOptionUnionParam `json:"response_format,omitzero"`
680	// Controls which (if any) tool is called by the model. `none` means the model will
681	// not call any tools and instead generates a message. `auto` is the default value
682	// and means the model can pick between generating a message or calling one or more
683	// tools. `required` means the model must call one or more tools before responding
684	// to the user. Specifying a particular tool like `{"type": "file_search"}` or
685	// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
686	// call that tool.
687	ToolChoice AssistantToolChoiceOptionUnionParam `json:"tool_choice,omitzero"`
688	paramObj
689}
690
691// IsPresent returns true if the field's value is not omitted and not the JSON
692// "null". To check if this field is omitted, use [param.IsOmitted].
693func (f BetaThreadRunNewParams) IsPresent() bool { return !param.IsOmitted(f) && !f.IsNull() }
694
695func (r BetaThreadRunNewParams) MarshalJSON() (data []byte, err error) {
696	type shadow BetaThreadRunNewParams
697	return param.MarshalObject(r, (*shadow)(&r))
698}
699
700// URLQuery serializes [BetaThreadRunNewParams]'s query parameters as `url.Values`.
701func (r BetaThreadRunNewParams) URLQuery() (v url.Values) {
702	return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
703		ArrayFormat:  apiquery.ArrayQueryFormatBrackets,
704		NestedFormat: apiquery.NestedQueryFormatBrackets,
705	})
706}
707
708// The properties Content, Role are required.
709type BetaThreadRunNewParamsAdditionalMessage struct {
710	// The text contents of the message.
711	Content BetaThreadRunNewParamsAdditionalMessageContentUnion `json:"content,omitzero,required"`
712	// The role of the entity that is creating the message. Allowed values include:
713	//
714	//   - `user`: Indicates the message is sent by an actual user and should be used in
715	//     most cases to represent user-generated messages.
716	//   - `assistant`: Indicates the message is generated by the assistant. Use this
717	//     value to insert messages from the assistant into the conversation.
718	//
719	// Any of "user", "assistant".
720	Role string `json:"role,omitzero,required"`
721	// A list of files attached to the message, and the tools they should be added to.
722	Attachments []BetaThreadRunNewParamsAdditionalMessageAttachment `json:"attachments,omitzero"`
723	// Set of 16 key-value pairs that can be attached to an object. This can be useful
724	// for storing additional information about the object in a structured format, and
725	// querying for objects via API or the dashboard.
726	//
727	// Keys are strings with a maximum length of 64 characters. Values are strings with
728	// a maximum length of 512 characters.
729	Metadata shared.MetadataParam `json:"metadata,omitzero"`
730	paramObj
731}
732
733// IsPresent returns true if the field's value is not omitted and not the JSON
734// "null". To check if this field is omitted, use [param.IsOmitted].
735func (f BetaThreadRunNewParamsAdditionalMessage) IsPresent() bool {
736	return !param.IsOmitted(f) && !f.IsNull()
737}
738func (r BetaThreadRunNewParamsAdditionalMessage) MarshalJSON() (data []byte, err error) {
739	type shadow BetaThreadRunNewParamsAdditionalMessage
740	return param.MarshalObject(r, (*shadow)(&r))
741}
742
743func init() {
744	apijson.RegisterFieldValidator[BetaThreadRunNewParamsAdditionalMessage](
745		"Role", false, "user", "assistant",
746	)
747}
748
749// Only one field can be non-zero.
750//
751// Use [param.IsOmitted] to confirm if a field is set.
752type BetaThreadRunNewParamsAdditionalMessageContentUnion struct {
753	OfString              param.Opt[string]              `json:",omitzero,inline"`
754	OfArrayOfContentParts []MessageContentPartParamUnion `json:",omitzero,inline"`
755	paramUnion
756}
757
758// IsPresent returns true if the field's value is not omitted and not the JSON
759// "null". To check if this field is omitted, use [param.IsOmitted].
760func (u BetaThreadRunNewParamsAdditionalMessageContentUnion) IsPresent() bool {
761	return !param.IsOmitted(u) && !u.IsNull()
762}
763func (u BetaThreadRunNewParamsAdditionalMessageContentUnion) MarshalJSON() ([]byte, error) {
764	return param.MarshalUnion[BetaThreadRunNewParamsAdditionalMessageContentUnion](u.OfString, u.OfArrayOfContentParts)
765}
766
767func (u *BetaThreadRunNewParamsAdditionalMessageContentUnion) asAny() any {
768	if !param.IsOmitted(u.OfString) {
769		return &u.OfString.Value
770	} else if !param.IsOmitted(u.OfArrayOfContentParts) {
771		return &u.OfArrayOfContentParts
772	}
773	return nil
774}
775
776type BetaThreadRunNewParamsAdditionalMessageAttachment struct {
777	// The ID of the file to attach to the message.
778	FileID param.Opt[string] `json:"file_id,omitzero"`
779	// The tools to add this file to.
780	Tools []BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion `json:"tools,omitzero"`
781	paramObj
782}
783
784// IsPresent returns true if the field's value is not omitted and not the JSON
785// "null". To check if this field is omitted, use [param.IsOmitted].
786func (f BetaThreadRunNewParamsAdditionalMessageAttachment) IsPresent() bool {
787	return !param.IsOmitted(f) && !f.IsNull()
788}
789func (r BetaThreadRunNewParamsAdditionalMessageAttachment) MarshalJSON() (data []byte, err error) {
790	type shadow BetaThreadRunNewParamsAdditionalMessageAttachment
791	return param.MarshalObject(r, (*shadow)(&r))
792}
793
794// Only one field can be non-zero.
795//
796// Use [param.IsOmitted] to confirm if a field is set.
797type BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion struct {
798	OfCodeInterpreter *CodeInterpreterToolParam                                        `json:",omitzero,inline"`
799	OfFileSearch      *BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch `json:",omitzero,inline"`
800	paramUnion
801}
802
803// IsPresent returns true if the field's value is not omitted and not the JSON
804// "null". To check if this field is omitted, use [param.IsOmitted].
805func (u BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion) IsPresent() bool {
806	return !param.IsOmitted(u) && !u.IsNull()
807}
808func (u BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion) MarshalJSON() ([]byte, error) {
809	return param.MarshalUnion[BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion](u.OfCodeInterpreter, u.OfFileSearch)
810}
811
812func (u *BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion) asAny() any {
813	if !param.IsOmitted(u.OfCodeInterpreter) {
814		return u.OfCodeInterpreter
815	} else if !param.IsOmitted(u.OfFileSearch) {
816		return u.OfFileSearch
817	}
818	return nil
819}
820
821// Returns a pointer to the underlying variant's property, if present.
822func (u BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion) GetType() *string {
823	if vt := u.OfCodeInterpreter; vt != nil {
824		return (*string)(&vt.Type)
825	} else if vt := u.OfFileSearch; vt != nil {
826		return (*string)(&vt.Type)
827	}
828	return nil
829}
830
831func init() {
832	apijson.RegisterUnion[BetaThreadRunNewParamsAdditionalMessageAttachmentToolUnion](
833		"type",
834		apijson.UnionVariant{
835			TypeFilter:         gjson.JSON,
836			Type:               reflect.TypeOf(CodeInterpreterToolParam{}),
837			DiscriminatorValue: "code_interpreter",
838		},
839		apijson.UnionVariant{
840			TypeFilter:         gjson.JSON,
841			Type:               reflect.TypeOf(BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch{}),
842			DiscriminatorValue: "file_search",
843		},
844	)
845}
846
847// The property Type is required.
848type BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch struct {
849	// The type of tool being defined: `file_search`
850	//
851	// This field can be elided, and will marshal its zero value as "file_search".
852	Type constant.FileSearch `json:"type,required"`
853	paramObj
854}
855
856// IsPresent returns true if the field's value is not omitted and not the JSON
857// "null". To check if this field is omitted, use [param.IsOmitted].
858func (f BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch) IsPresent() bool {
859	return !param.IsOmitted(f) && !f.IsNull()
860}
861func (r BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch) MarshalJSON() (data []byte, err error) {
862	type shadow BetaThreadRunNewParamsAdditionalMessageAttachmentToolFileSearch
863	return param.MarshalObject(r, (*shadow)(&r))
864}
865
866// Controls for how a thread will be truncated prior to the run. Use this to
867// control the intial context window of the run.
868//
869// The property Type is required.
870type BetaThreadRunNewParamsTruncationStrategy struct {
871	// The truncation strategy to use for the thread. The default is `auto`. If set to
872	// `last_messages`, the thread will be truncated to the n most recent messages in
873	// the thread. When set to `auto`, messages in the middle of the thread will be
874	// dropped to fit the context length of the model, `max_prompt_tokens`.
875	//
876	// Any of "auto", "last_messages".
877	Type string `json:"type,omitzero,required"`
878	// The number of most recent messages from the thread when constructing the context
879	// for the run.
880	LastMessages param.Opt[int64] `json:"last_messages,omitzero"`
881	paramObj
882}
883
884// IsPresent returns true if the field's value is not omitted and not the JSON
885// "null". To check if this field is omitted, use [param.IsOmitted].
886func (f BetaThreadRunNewParamsTruncationStrategy) IsPresent() bool {
887	return !param.IsOmitted(f) && !f.IsNull()
888}
889func (r BetaThreadRunNewParamsTruncationStrategy) MarshalJSON() (data []byte, err error) {
890	type shadow BetaThreadRunNewParamsTruncationStrategy
891	return param.MarshalObject(r, (*shadow)(&r))
892}
893
894func init() {
895	apijson.RegisterFieldValidator[BetaThreadRunNewParamsTruncationStrategy](
896		"Type", false, "auto", "last_messages",
897	)
898}
899
900type BetaThreadRunUpdateParams struct {
901	// Set of 16 key-value pairs that can be attached to an object. This can be useful
902	// for storing additional information about the object in a structured format, and
903	// querying for objects via API or the dashboard.
904	//
905	// Keys are strings with a maximum length of 64 characters. Values are strings with
906	// a maximum length of 512 characters.
907	Metadata shared.MetadataParam `json:"metadata,omitzero"`
908	paramObj
909}
910
911// IsPresent returns true if the field's value is not omitted and not the JSON
912// "null". To check if this field is omitted, use [param.IsOmitted].
913func (f BetaThreadRunUpdateParams) IsPresent() bool { return !param.IsOmitted(f) && !f.IsNull() }
914
915func (r BetaThreadRunUpdateParams) MarshalJSON() (data []byte, err error) {
916	type shadow BetaThreadRunUpdateParams
917	return param.MarshalObject(r, (*shadow)(&r))
918}
919
920type BetaThreadRunListParams struct {
921	// A cursor for use in pagination. `after` is an object ID that defines your place
922	// in the list. For instance, if you make a list request and receive 100 objects,
923	// ending with obj_foo, your subsequent call can include after=obj_foo in order to
924	// fetch the next page of the list.
925	After param.Opt[string] `query:"after,omitzero" json:"-"`
926	// A cursor for use in pagination. `before` is an object ID that defines your place
927	// in the list. For instance, if you make a list request and receive 100 objects,
928	// starting with obj_foo, your subsequent call can include before=obj_foo in order
929	// to fetch the previous page of the list.
930	Before param.Opt[string] `query:"before,omitzero" json:"-"`
931	// A limit on the number of objects to be returned. Limit can range between 1 and
932	// 100, and the default is 20.
933	Limit param.Opt[int64] `query:"limit,omitzero" json:"-"`
934	// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
935	// order and `desc` for descending order.
936	//
937	// Any of "asc", "desc".
938	Order BetaThreadRunListParamsOrder `query:"order,omitzero" json:"-"`
939	paramObj
940}
941
942// IsPresent returns true if the field's value is not omitted and not the JSON
943// "null". To check if this field is omitted, use [param.IsOmitted].
944func (f BetaThreadRunListParams) IsPresent() bool { return !param.IsOmitted(f) && !f.IsNull() }
945
946// URLQuery serializes [BetaThreadRunListParams]'s query parameters as
947// `url.Values`.
948func (r BetaThreadRunListParams) URLQuery() (v url.Values) {
949	return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{
950		ArrayFormat:  apiquery.ArrayQueryFormatBrackets,
951		NestedFormat: apiquery.NestedQueryFormatBrackets,
952	})
953}
954
955// Sort order by the `created_at` timestamp of the objects. `asc` for ascending
956// order and `desc` for descending order.
957type BetaThreadRunListParamsOrder string
958
959const (
960	BetaThreadRunListParamsOrderAsc  BetaThreadRunListParamsOrder = "asc"
961	BetaThreadRunListParamsOrderDesc BetaThreadRunListParamsOrder = "desc"
962)
963
964type BetaThreadRunSubmitToolOutputsParams struct {
965	// A list of tools for which the outputs are being submitted.
966	ToolOutputs []BetaThreadRunSubmitToolOutputsParamsToolOutput `json:"tool_outputs,omitzero,required"`
967	paramObj
968}
969
970// IsPresent returns true if the field's value is not omitted and not the JSON
971// "null". To check if this field is omitted, use [param.IsOmitted].
972func (f BetaThreadRunSubmitToolOutputsParams) IsPresent() bool {
973	return !param.IsOmitted(f) && !f.IsNull()
974}
975
976func (r BetaThreadRunSubmitToolOutputsParams) MarshalJSON() (data []byte, err error) {
977	type shadow BetaThreadRunSubmitToolOutputsParams
978	return param.MarshalObject(r, (*shadow)(&r))
979}
980
981type BetaThreadRunSubmitToolOutputsParamsToolOutput struct {
982	// The output of the tool call to be submitted to continue the run.
983	Output param.Opt[string] `json:"output,omitzero"`
984	// The ID of the tool call in the `required_action` object within the run object
985	// the output is being submitted for.
986	ToolCallID param.Opt[string] `json:"tool_call_id,omitzero"`
987	paramObj
988}
989
990// IsPresent returns true if the field's value is not omitted and not the JSON
991// "null". To check if this field is omitted, use [param.IsOmitted].
992func (f BetaThreadRunSubmitToolOutputsParamsToolOutput) IsPresent() bool {
993	return !param.IsOmitted(f) && !f.IsNull()
994}
995func (r BetaThreadRunSubmitToolOutputsParamsToolOutput) MarshalJSON() (data []byte, err error) {
996	type shadow BetaThreadRunSubmitToolOutputsParamsToolOutput
997	return param.MarshalObject(r, (*shadow)(&r))
998}