1// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
3package anthropic
4
5import (
6 "context"
7 "fmt"
8 "net/http"
9
10 "github.com/anthropics/anthropic-sdk-go/internal/apijson"
11 "github.com/anthropics/anthropic-sdk-go/internal/requestconfig"
12 "github.com/anthropics/anthropic-sdk-go/option"
13 "github.com/anthropics/anthropic-sdk-go/packages/param"
14 "github.com/anthropics/anthropic-sdk-go/packages/respjson"
15 "github.com/anthropics/anthropic-sdk-go/packages/ssestream"
16 "github.com/anthropics/anthropic-sdk-go/shared/constant"
17)
18
19// CompletionService contains methods and other services that help with interacting
20// with the anthropic API.
21//
22// Note, unlike clients, this service does not read variables from the environment
23// automatically. You should not instantiate this service directly, and instead use
24// the [NewCompletionService] method instead.
25type CompletionService struct {
26 Options []option.RequestOption
27}
28
29// NewCompletionService generates a new service that applies the given options to
30// each request. These options are applied after the parent client's options (if
31// there is one), and before any request-specific options.
32func NewCompletionService(opts ...option.RequestOption) (r CompletionService) {
33 r = CompletionService{}
34 r.Options = opts
35 return
36}
37
38// [Legacy] Create a Text Completion.
39//
40// The Text Completions API is a legacy API. We recommend using the
41// [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
42//
43// Future models and features will not be compatible with Text Completions. See our
44// [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
45// for guidance in migrating from Text Completions to Messages.
46//
47// Note: If you choose to set a timeout for this request, we recommend 10 minutes.
48func (r *CompletionService) New(ctx context.Context, params CompletionNewParams, opts ...option.RequestOption) (res *Completion, err error) {
49 for _, v := range params.Betas {
50 opts = append(opts, option.WithHeaderAdd("anthropic-beta", fmt.Sprintf("%s", v)))
51 }
52 opts = append(r.Options[:], opts...)
53 path := "v1/complete"
54 err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, params, &res, opts...)
55 return
56}
57
58// [Legacy] Create a Text Completion.
59//
60// The Text Completions API is a legacy API. We recommend using the
61// [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
62//
63// Future models and features will not be compatible with Text Completions. See our
64// [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
65// for guidance in migrating from Text Completions to Messages.
66//
67// Note: If you choose to set a timeout for this request, we recommend 10 minutes.
68func (r *CompletionService) NewStreaming(ctx context.Context, params CompletionNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[Completion]) {
69 var (
70 raw *http.Response
71 err error
72 )
73 for _, v := range params.Betas {
74 opts = append(opts, option.WithHeaderAdd("anthropic-beta", fmt.Sprintf("%s", v)))
75 }
76 opts = append(r.Options[:], opts...)
77 opts = append([]option.RequestOption{option.WithJSONSet("stream", true)}, opts...)
78 path := "v1/complete"
79 err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, params, &raw, opts...)
80 return ssestream.NewStream[Completion](ssestream.NewDecoder(raw), err)
81}
82
83type Completion struct {
84 // Unique object identifier.
85 //
86 // The format and length of IDs may change over time.
87 ID string `json:"id,required"`
88 // The resulting completion up to and excluding the stop sequences.
89 Completion string `json:"completion,required"`
90 // The model that will complete your prompt.\n\nSee
91 // [models](https://docs.anthropic.com/en/docs/models-overview) for additional
92 // details and options.
93 Model Model `json:"model,required"`
94 // The reason that we stopped.
95 //
96 // This may be one the following values:
97 //
98 // - `"stop_sequence"`: we reached a stop sequence — either provided by you via the
99 // `stop_sequences` parameter, or a stop sequence built into the model
100 // - `"max_tokens"`: we exceeded `max_tokens_to_sample` or the model's maximum
101 StopReason string `json:"stop_reason,required"`
102 // Object type.
103 //
104 // For Text Completions, this is always `"completion"`.
105 Type constant.Completion `json:"type,required"`
106 // JSON contains metadata for fields, check presence with [respjson.Field.Valid].
107 JSON struct {
108 ID respjson.Field
109 Completion respjson.Field
110 Model respjson.Field
111 StopReason respjson.Field
112 Type respjson.Field
113 ExtraFields map[string]respjson.Field
114 raw string
115 } `json:"-"`
116}
117
118// Returns the unmodified JSON received from the API
119func (r Completion) RawJSON() string { return r.JSON.raw }
120func (r *Completion) UnmarshalJSON(data []byte) error {
121 return apijson.UnmarshalRoot(data, r)
122}
123
124type CompletionNewParams struct {
125 // The maximum number of tokens to generate before stopping.
126 //
127 // Note that our models may stop _before_ reaching this maximum. This parameter
128 // only specifies the absolute maximum number of tokens to generate.
129 MaxTokensToSample int64 `json:"max_tokens_to_sample,required"`
130 // The model that will complete your prompt.\n\nSee
131 // [models](https://docs.anthropic.com/en/docs/models-overview) for additional
132 // details and options.
133 Model Model `json:"model,omitzero,required"`
134 // The prompt that you want Claude to complete.
135 //
136 // For proper response generation you will need to format your prompt using
137 // alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
138 //
139 // ```
140 // "\n\nHuman: {userQuestion}\n\nAssistant:"
141 // ```
142 //
143 // See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
144 // our guide to
145 // [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
146 // details.
147 Prompt string `json:"prompt,required"`
148 // Amount of randomness injected into the response.
149 //
150 // Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
151 // for analytical / multiple choice, and closer to `1.0` for creative and
152 // generative tasks.
153 //
154 // Note that even with `temperature` of `0.0`, the results will not be fully
155 // deterministic.
156 Temperature param.Opt[float64] `json:"temperature,omitzero"`
157 // Only sample from the top K options for each subsequent token.
158 //
159 // Used to remove "long tail" low probability responses.
160 // [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
161 //
162 // Recommended for advanced use cases only. You usually only need to use
163 // `temperature`.
164 TopK param.Opt[int64] `json:"top_k,omitzero"`
165 // Use nucleus sampling.
166 //
167 // In nucleus sampling, we compute the cumulative distribution over all the options
168 // for each subsequent token in decreasing probability order and cut it off once it
169 // reaches a particular probability specified by `top_p`. You should either alter
170 // `temperature` or `top_p`, but not both.
171 //
172 // Recommended for advanced use cases only. You usually only need to use
173 // `temperature`.
174 TopP param.Opt[float64] `json:"top_p,omitzero"`
175 // An object describing metadata about the request.
176 Metadata MetadataParam `json:"metadata,omitzero"`
177 // Sequences that will cause the model to stop generating.
178 //
179 // Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
180 // sequences in the future. By providing the stop_sequences parameter, you may
181 // include additional strings that will cause the model to stop generating.
182 StopSequences []string `json:"stop_sequences,omitzero"`
183 // Optional header to specify the beta version(s) you want to use.
184 Betas []AnthropicBeta `header:"anthropic-beta,omitzero" json:"-"`
185 paramObj
186}
187
188func (r CompletionNewParams) MarshalJSON() (data []byte, err error) {
189 type shadow CompletionNewParams
190 return param.MarshalObject(r, (*shadow)(&r))
191}
192func (r *CompletionNewParams) UnmarshalJSON(data []byte) error {
193 return apijson.UnmarshalRoot(data, r)
194}