1// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
  2
  3package openai
  4
  5import (
  6	"context"
  7	"net/http"
  8
  9	"github.com/openai/openai-go/internal/apijson"
 10	"github.com/openai/openai-go/internal/requestconfig"
 11	"github.com/openai/openai-go/option"
 12	"github.com/openai/openai-go/packages/param"
 13	"github.com/openai/openai-go/packages/respjson"
 14	"github.com/openai/openai-go/packages/ssestream"
 15	"github.com/openai/openai-go/shared/constant"
 16)
 17
 18// CompletionService contains methods and other services that help with interacting
 19// with the openai API.
 20//
 21// Note, unlike clients, this service does not read variables from the environment
 22// automatically. You should not instantiate this service directly, and instead use
 23// the [NewCompletionService] method instead.
 24type CompletionService struct {
 25	Options []option.RequestOption
 26}
 27
 28// NewCompletionService generates a new service that applies the given options to
 29// each request. These options are applied after the parent client's options (if
 30// there is one), and before any request-specific options.
 31func NewCompletionService(opts ...option.RequestOption) (r CompletionService) {
 32	r = CompletionService{}
 33	r.Options = opts
 34	return
 35}
 36
 37// Creates a completion for the provided prompt and parameters.
 38func (r *CompletionService) New(ctx context.Context, body CompletionNewParams, opts ...option.RequestOption) (res *Completion, err error) {
 39	opts = append(r.Options[:], opts...)
 40	path := "completions"
 41	err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
 42	return
 43}
 44
 45// Creates a completion for the provided prompt and parameters.
 46func (r *CompletionService) NewStreaming(ctx context.Context, body CompletionNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[Completion]) {
 47	var (
 48		raw *http.Response
 49		err error
 50	)
 51	opts = append(r.Options[:], opts...)
 52	opts = append([]option.RequestOption{option.WithJSONSet("stream", true)}, opts...)
 53	path := "completions"
 54	err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...)
 55	return ssestream.NewStream[Completion](ssestream.NewDecoder(raw), err)
 56}
 57
 58// Represents a completion response from the API. Note: both the streamed and
 59// non-streamed response objects share the same shape (unlike the chat endpoint).
 60type Completion struct {
 61	// A unique identifier for the completion.
 62	ID string `json:"id,required"`
 63	// The list of completion choices the model generated for the input prompt.
 64	Choices []CompletionChoice `json:"choices,required"`
 65	// The Unix timestamp (in seconds) of when the completion was created.
 66	Created int64 `json:"created,required"`
 67	// The model used for completion.
 68	Model string `json:"model,required"`
 69	// The object type, which is always "text_completion"
 70	Object constant.TextCompletion `json:"object,required"`
 71	// This fingerprint represents the backend configuration that the model runs with.
 72	//
 73	// Can be used in conjunction with the `seed` request parameter to understand when
 74	// backend changes have been made that might impact determinism.
 75	SystemFingerprint string `json:"system_fingerprint"`
 76	// Usage statistics for the completion request.
 77	Usage CompletionUsage `json:"usage"`
 78	// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
 79	JSON struct {
 80		ID                respjson.Field
 81		Choices           respjson.Field
 82		Created           respjson.Field
 83		Model             respjson.Field
 84		Object            respjson.Field
 85		SystemFingerprint respjson.Field
 86		Usage             respjson.Field
 87		ExtraFields       map[string]respjson.Field
 88		raw               string
 89	} `json:"-"`
 90}
 91
 92// Returns the unmodified JSON received from the API
 93func (r Completion) RawJSON() string { return r.JSON.raw }
 94func (r *Completion) UnmarshalJSON(data []byte) error {
 95	return apijson.UnmarshalRoot(data, r)
 96}
 97
 98type CompletionChoice struct {
 99	// The reason the model stopped generating tokens. This will be `stop` if the model
100	// hit a natural stop point or a provided stop sequence, `length` if the maximum
101	// number of tokens specified in the request was reached, or `content_filter` if
102	// content was omitted due to a flag from our content filters.
103	//
104	// Any of "stop", "length", "content_filter".
105	FinishReason CompletionChoiceFinishReason `json:"finish_reason,required"`
106	Index        int64                        `json:"index,required"`
107	Logprobs     CompletionChoiceLogprobs     `json:"logprobs,required"`
108	Text         string                       `json:"text,required"`
109	// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
110	JSON struct {
111		FinishReason respjson.Field
112		Index        respjson.Field
113		Logprobs     respjson.Field
114		Text         respjson.Field
115		ExtraFields  map[string]respjson.Field
116		raw          string
117	} `json:"-"`
118}
119
120// Returns the unmodified JSON received from the API
121func (r CompletionChoice) RawJSON() string { return r.JSON.raw }
122func (r *CompletionChoice) UnmarshalJSON(data []byte) error {
123	return apijson.UnmarshalRoot(data, r)
124}
125
126// The reason the model stopped generating tokens. This will be `stop` if the model
127// hit a natural stop point or a provided stop sequence, `length` if the maximum
128// number of tokens specified in the request was reached, or `content_filter` if
129// content was omitted due to a flag from our content filters.
130type CompletionChoiceFinishReason string
131
132const (
133	CompletionChoiceFinishReasonStop          CompletionChoiceFinishReason = "stop"
134	CompletionChoiceFinishReasonLength        CompletionChoiceFinishReason = "length"
135	CompletionChoiceFinishReasonContentFilter CompletionChoiceFinishReason = "content_filter"
136)
137
138type CompletionChoiceLogprobs struct {
139	TextOffset    []int64              `json:"text_offset"`
140	TokenLogprobs []float64            `json:"token_logprobs"`
141	Tokens        []string             `json:"tokens"`
142	TopLogprobs   []map[string]float64 `json:"top_logprobs"`
143	// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
144	JSON struct {
145		TextOffset    respjson.Field
146		TokenLogprobs respjson.Field
147		Tokens        respjson.Field
148		TopLogprobs   respjson.Field
149		ExtraFields   map[string]respjson.Field
150		raw           string
151	} `json:"-"`
152}
153
154// Returns the unmodified JSON received from the API
155func (r CompletionChoiceLogprobs) RawJSON() string { return r.JSON.raw }
156func (r *CompletionChoiceLogprobs) UnmarshalJSON(data []byte) error {
157	return apijson.UnmarshalRoot(data, r)
158}
159
160// Usage statistics for the completion request.
161type CompletionUsage struct {
162	// Number of tokens in the generated completion.
163	CompletionTokens int64 `json:"completion_tokens,required"`
164	// Number of tokens in the prompt.
165	PromptTokens int64 `json:"prompt_tokens,required"`
166	// Total number of tokens used in the request (prompt + completion).
167	TotalTokens int64 `json:"total_tokens,required"`
168	// Breakdown of tokens used in a completion.
169	CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
170	// Breakdown of tokens used in the prompt.
171	PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
172	// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
173	JSON struct {
174		CompletionTokens        respjson.Field
175		PromptTokens            respjson.Field
176		TotalTokens             respjson.Field
177		CompletionTokensDetails respjson.Field
178		PromptTokensDetails     respjson.Field
179		ExtraFields             map[string]respjson.Field
180		raw                     string
181	} `json:"-"`
182}
183
184// Returns the unmodified JSON received from the API
185func (r CompletionUsage) RawJSON() string { return r.JSON.raw }
186func (r *CompletionUsage) UnmarshalJSON(data []byte) error {
187	return apijson.UnmarshalRoot(data, r)
188}
189
190// Breakdown of tokens used in a completion.
191type CompletionUsageCompletionTokensDetails struct {
192	// When using Predicted Outputs, the number of tokens in the prediction that
193	// appeared in the completion.
194	AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
195	// Audio input tokens generated by the model.
196	AudioTokens int64 `json:"audio_tokens"`
197	// Tokens generated by the model for reasoning.
198	ReasoningTokens int64 `json:"reasoning_tokens"`
199	// When using Predicted Outputs, the number of tokens in the prediction that did
200	// not appear in the completion. However, like reasoning tokens, these tokens are
201	// still counted in the total completion tokens for purposes of billing, output,
202	// and context window limits.
203	RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
204	// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
205	JSON struct {
206		AcceptedPredictionTokens respjson.Field
207		AudioTokens              respjson.Field
208		ReasoningTokens          respjson.Field
209		RejectedPredictionTokens respjson.Field
210		ExtraFields              map[string]respjson.Field
211		raw                      string
212	} `json:"-"`
213}
214
215// Returns the unmodified JSON received from the API
216func (r CompletionUsageCompletionTokensDetails) RawJSON() string { return r.JSON.raw }
217func (r *CompletionUsageCompletionTokensDetails) UnmarshalJSON(data []byte) error {
218	return apijson.UnmarshalRoot(data, r)
219}
220
221// Breakdown of tokens used in the prompt.
222type CompletionUsagePromptTokensDetails struct {
223	// Audio input tokens present in the prompt.
224	AudioTokens int64 `json:"audio_tokens"`
225	// Cached tokens present in the prompt.
226	CachedTokens int64 `json:"cached_tokens"`
227	// JSON contains metadata for fields, check presence with [respjson.Field.Valid].
228	JSON struct {
229		AudioTokens  respjson.Field
230		CachedTokens respjson.Field
231		ExtraFields  map[string]respjson.Field
232		raw          string
233	} `json:"-"`
234}
235
236// Returns the unmodified JSON received from the API
237func (r CompletionUsagePromptTokensDetails) RawJSON() string { return r.JSON.raw }
238func (r *CompletionUsagePromptTokensDetails) UnmarshalJSON(data []byte) error {
239	return apijson.UnmarshalRoot(data, r)
240}
241
242type CompletionNewParams struct {
243	// The prompt(s) to generate completions for, encoded as a string, array of
244	// strings, array of tokens, or array of token arrays.
245	//
246	// Note that <|endoftext|> is the document separator that the model sees during
247	// training, so if a prompt is not specified the model will generate as if from the
248	// beginning of a new document.
249	Prompt CompletionNewParamsPromptUnion `json:"prompt,omitzero,required"`
250	// ID of the model to use. You can use the
251	// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
252	// see all of your available models, or see our
253	// [Model overview](https://platform.openai.com/docs/models) for descriptions of
254	// them.
255	Model CompletionNewParamsModel `json:"model,omitzero,required"`
256	// Generates `best_of` completions server-side and returns the "best" (the one with
257	// the highest log probability per token). Results cannot be streamed.
258	//
259	// When used with `n`, `best_of` controls the number of candidate completions and
260	// `n` specifies how many to return – `best_of` must be greater than `n`.
261	//
262	// **Note:** Because this parameter generates many completions, it can quickly
263	// consume your token quota. Use carefully and ensure that you have reasonable
264	// settings for `max_tokens` and `stop`.
265	BestOf param.Opt[int64] `json:"best_of,omitzero"`
266	// Echo back the prompt in addition to the completion
267	Echo param.Opt[bool] `json:"echo,omitzero"`
268	// Number between -2.0 and 2.0. Positive values penalize new tokens based on their
269	// existing frequency in the text so far, decreasing the model's likelihood to
270	// repeat the same line verbatim.
271	//
272	// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
273	FrequencyPenalty param.Opt[float64] `json:"frequency_penalty,omitzero"`
274	// Include the log probabilities on the `logprobs` most likely output tokens, as
275	// well the chosen tokens. For example, if `logprobs` is 5, the API will return a
276	// list of the 5 most likely tokens. The API will always return the `logprob` of
277	// the sampled token, so there may be up to `logprobs+1` elements in the response.
278	//
279	// The maximum value for `logprobs` is 5.
280	Logprobs param.Opt[int64] `json:"logprobs,omitzero"`
281	// The maximum number of [tokens](/tokenizer) that can be generated in the
282	// completion.
283	//
284	// The token count of your prompt plus `max_tokens` cannot exceed the model's
285	// context length.
286	// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
287	// for counting tokens.
288	MaxTokens param.Opt[int64] `json:"max_tokens,omitzero"`
289	// How many completions to generate for each prompt.
290	//
291	// **Note:** Because this parameter generates many completions, it can quickly
292	// consume your token quota. Use carefully and ensure that you have reasonable
293	// settings for `max_tokens` and `stop`.
294	N param.Opt[int64] `json:"n,omitzero"`
295	// Number between -2.0 and 2.0. Positive values penalize new tokens based on
296	// whether they appear in the text so far, increasing the model's likelihood to
297	// talk about new topics.
298	//
299	// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
300	PresencePenalty param.Opt[float64] `json:"presence_penalty,omitzero"`
301	// If specified, our system will make a best effort to sample deterministically,
302	// such that repeated requests with the same `seed` and parameters should return
303	// the same result.
304	//
305	// Determinism is not guaranteed, and you should refer to the `system_fingerprint`
306	// response parameter to monitor changes in the backend.
307	Seed param.Opt[int64] `json:"seed,omitzero"`
308	// The suffix that comes after a completion of inserted text.
309	//
310	// This parameter is only supported for `gpt-3.5-turbo-instruct`.
311	Suffix param.Opt[string] `json:"suffix,omitzero"`
312	// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
313	// make the output more random, while lower values like 0.2 will make it more
314	// focused and deterministic.
315	//
316	// We generally recommend altering this or `top_p` but not both.
317	Temperature param.Opt[float64] `json:"temperature,omitzero"`
318	// An alternative to sampling with temperature, called nucleus sampling, where the
319	// model considers the results of the tokens with top_p probability mass. So 0.1
320	// means only the tokens comprising the top 10% probability mass are considered.
321	//
322	// We generally recommend altering this or `temperature` but not both.
323	TopP param.Opt[float64] `json:"top_p,omitzero"`
324	// A unique identifier representing your end-user, which can help OpenAI to monitor
325	// and detect abuse.
326	// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
327	User param.Opt[string] `json:"user,omitzero"`
328	// Modify the likelihood of specified tokens appearing in the completion.
329	//
330	// Accepts a JSON object that maps tokens (specified by their token ID in the GPT
331	// tokenizer) to an associated bias value from -100 to 100. You can use this
332	// [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
333	// Mathematically, the bias is added to the logits generated by the model prior to
334	// sampling. The exact effect will vary per model, but values between -1 and 1
335	// should decrease or increase likelihood of selection; values like -100 or 100
336	// should result in a ban or exclusive selection of the relevant token.
337	//
338	// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
339	// from being generated.
340	LogitBias map[string]int64 `json:"logit_bias,omitzero"`
341	// Not supported with latest reasoning models `o3` and `o4-mini`.
342	//
343	// Up to 4 sequences where the API will stop generating further tokens. The
344	// returned text will not contain the stop sequence.
345	Stop CompletionNewParamsStopUnion `json:"stop,omitzero"`
346	// Options for streaming response. Only set this when you set `stream: true`.
347	StreamOptions ChatCompletionStreamOptionsParam `json:"stream_options,omitzero"`
348	paramObj
349}
350
351func (r CompletionNewParams) MarshalJSON() (data []byte, err error) {
352	type shadow CompletionNewParams
353	return param.MarshalObject(r, (*shadow)(&r))
354}
355func (r *CompletionNewParams) UnmarshalJSON(data []byte) error {
356	return apijson.UnmarshalRoot(data, r)
357}
358
359// ID of the model to use. You can use the
360// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
361// see all of your available models, or see our
362// [Model overview](https://platform.openai.com/docs/models) for descriptions of
363// them.
364type CompletionNewParamsModel string
365
366const (
367	CompletionNewParamsModelGPT3_5TurboInstruct CompletionNewParamsModel = "gpt-3.5-turbo-instruct"
368	CompletionNewParamsModelDavinci002          CompletionNewParamsModel = "davinci-002"
369	CompletionNewParamsModelBabbage002          CompletionNewParamsModel = "babbage-002"
370)
371
372// Only one field can be non-zero.
373//
374// Use [param.IsOmitted] to confirm if a field is set.
375type CompletionNewParamsPromptUnion struct {
376	OfString             param.Opt[string] `json:",omitzero,inline"`
377	OfArrayOfStrings     []string          `json:",omitzero,inline"`
378	OfArrayOfTokens      []int64           `json:",omitzero,inline"`
379	OfArrayOfTokenArrays [][]int64         `json:",omitzero,inline"`
380	paramUnion
381}
382
383func (u CompletionNewParamsPromptUnion) MarshalJSON() ([]byte, error) {
384	return param.MarshalUnion(u, u.OfString, u.OfArrayOfStrings, u.OfArrayOfTokens, u.OfArrayOfTokenArrays)
385}
386func (u *CompletionNewParamsPromptUnion) UnmarshalJSON(data []byte) error {
387	return apijson.UnmarshalRoot(data, u)
388}
389
390func (u *CompletionNewParamsPromptUnion) asAny() any {
391	if !param.IsOmitted(u.OfString) {
392		return &u.OfString.Value
393	} else if !param.IsOmitted(u.OfArrayOfStrings) {
394		return &u.OfArrayOfStrings
395	} else if !param.IsOmitted(u.OfArrayOfTokens) {
396		return &u.OfArrayOfTokens
397	} else if !param.IsOmitted(u.OfArrayOfTokenArrays) {
398		return &u.OfArrayOfTokenArrays
399	}
400	return nil
401}
402
403// Only one field can be non-zero.
404//
405// Use [param.IsOmitted] to confirm if a field is set.
406type CompletionNewParamsStopUnion struct {
407	OfString      param.Opt[string] `json:",omitzero,inline"`
408	OfStringArray []string          `json:",omitzero,inline"`
409	paramUnion
410}
411
412func (u CompletionNewParamsStopUnion) MarshalJSON() ([]byte, error) {
413	return param.MarshalUnion(u, u.OfString, u.OfStringArray)
414}
415func (u *CompletionNewParamsStopUnion) UnmarshalJSON(data []byte) error {
416	return apijson.UnmarshalRoot(data, u)
417}
418
419func (u *CompletionNewParamsStopUnion) asAny() any {
420	if !param.IsOmitted(u.OfString) {
421		return &u.OfString.Value
422	} else if !param.IsOmitted(u.OfStringArray) {
423		return &u.OfStringArray
424	}
425	return nil
426}