1package provider
2
3import (
4 "context"
5 "encoding/json"
6 "errors"
7 "fmt"
8 "io"
9 "time"
10
11 "github.com/opencode-ai/opencode/internal/config"
12 "github.com/opencode-ai/opencode/internal/llm/tools"
13 "github.com/opencode-ai/opencode/internal/logging"
14 "github.com/opencode-ai/opencode/internal/message"
15 "github.com/openai/openai-go"
16 "github.com/openai/openai-go/option"
17 "github.com/openai/openai-go/shared"
18)
19
20type openaiOptions struct {
21 baseURL string
22 disableCache bool
23 reasoningEffort string
24}
25
26type OpenAIOption func(*openaiOptions)
27
28type openaiClient struct {
29 providerOptions providerClientOptions
30 options openaiOptions
31 client openai.Client
32}
33
34type OpenAIClient ProviderClient
35
36func newOpenAIClient(opts providerClientOptions) OpenAIClient {
37 openaiOpts := openaiOptions{
38 reasoningEffort: "medium",
39 }
40 for _, o := range opts.openaiOptions {
41 o(&openaiOpts)
42 }
43
44 openaiClientOptions := []option.RequestOption{}
45 if opts.apiKey != "" {
46 openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
47 }
48 if openaiOpts.baseURL != "" {
49 openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(openaiOpts.baseURL))
50 }
51
52 client := openai.NewClient(openaiClientOptions...)
53 return &openaiClient{
54 providerOptions: opts,
55 options: openaiOpts,
56 client: client,
57 }
58}
59
60func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
61 // Add system message first
62 openaiMessages = append(openaiMessages, openai.SystemMessage(o.providerOptions.systemMessage))
63
64 for _, msg := range messages {
65 switch msg.Role {
66 case message.User:
67 openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
68
69 case message.Assistant:
70 assistantMsg := openai.ChatCompletionAssistantMessageParam{
71 Role: "assistant",
72 }
73
74 if msg.Content().String() != "" {
75 assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
76 OfString: openai.String(msg.Content().String()),
77 }
78 }
79
80 if len(msg.ToolCalls()) > 0 {
81 assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
82 for i, call := range msg.ToolCalls() {
83 assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
84 ID: call.ID,
85 Type: "function",
86 Function: openai.ChatCompletionMessageToolCallFunctionParam{
87 Name: call.Name,
88 Arguments: call.Input,
89 },
90 }
91 }
92 }
93
94 openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
95 OfAssistant: &assistantMsg,
96 })
97
98 case message.Tool:
99 for _, result := range msg.ToolResults() {
100 openaiMessages = append(openaiMessages,
101 openai.ToolMessage(result.Content, result.ToolCallID),
102 )
103 }
104 }
105 }
106
107 return
108}
109
110func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
111 openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
112
113 for i, tool := range tools {
114 info := tool.Info()
115 openaiTools[i] = openai.ChatCompletionToolParam{
116 Function: openai.FunctionDefinitionParam{
117 Name: info.Name,
118 Description: openai.String(info.Description),
119 Parameters: openai.FunctionParameters{
120 "type": "object",
121 "properties": info.Parameters,
122 "required": info.Required,
123 },
124 },
125 }
126 }
127
128 return openaiTools
129}
130
131func (o *openaiClient) finishReason(reason string) message.FinishReason {
132 switch reason {
133 case "stop":
134 return message.FinishReasonEndTurn
135 case "length":
136 return message.FinishReasonMaxTokens
137 case "tool_calls":
138 return message.FinishReasonToolUse
139 default:
140 return message.FinishReasonUnknown
141 }
142}
143
144func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
145 params := openai.ChatCompletionNewParams{
146 Model: openai.ChatModel(o.providerOptions.model.APIModel),
147 Messages: messages,
148 Tools: tools,
149 }
150
151 if o.providerOptions.model.CanReason == true {
152 params.MaxCompletionTokens = openai.Int(o.providerOptions.maxTokens)
153 switch o.options.reasoningEffort {
154 case "low":
155 params.ReasoningEffort = shared.ReasoningEffortLow
156 case "medium":
157 params.ReasoningEffort = shared.ReasoningEffortMedium
158 case "high":
159 params.ReasoningEffort = shared.ReasoningEffortHigh
160 default:
161 params.ReasoningEffort = shared.ReasoningEffortMedium
162 }
163 } else {
164 params.MaxTokens = openai.Int(o.providerOptions.maxTokens)
165 }
166
167 return params
168}
169
170func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
171 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
172 cfg := config.Get()
173 if cfg.Debug {
174 jsonData, _ := json.Marshal(params)
175 logging.Debug("Prepared messages", "messages", string(jsonData))
176 }
177 attempts := 0
178 for {
179 attempts++
180 openaiResponse, err := o.client.Chat.Completions.New(
181 ctx,
182 params,
183 )
184 // If there is an error we are going to see if we can retry the call
185 if err != nil {
186 retry, after, retryErr := o.shouldRetry(attempts, err)
187 if retryErr != nil {
188 return nil, retryErr
189 }
190 if retry {
191 logging.WarnPersist("Retrying due to rate limit... attempt %d of %d", logging.PersistTimeArg, time.Millisecond*time.Duration(after+100))
192 select {
193 case <-ctx.Done():
194 return nil, ctx.Err()
195 case <-time.After(time.Duration(after) * time.Millisecond):
196 continue
197 }
198 }
199 return nil, retryErr
200 }
201
202 content := ""
203 if openaiResponse.Choices[0].Message.Content != "" {
204 content = openaiResponse.Choices[0].Message.Content
205 }
206
207 return &ProviderResponse{
208 Content: content,
209 ToolCalls: o.toolCalls(*openaiResponse),
210 Usage: o.usage(*openaiResponse),
211 FinishReason: o.finishReason(string(openaiResponse.Choices[0].FinishReason)),
212 }, nil
213 }
214}
215
216func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
217 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
218 params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
219 IncludeUsage: openai.Bool(true),
220 }
221
222 cfg := config.Get()
223 if cfg.Debug {
224 jsonData, _ := json.Marshal(params)
225 logging.Debug("Prepared messages", "messages", string(jsonData))
226 }
227
228 attempts := 0
229 eventChan := make(chan ProviderEvent)
230
231 go func() {
232 for {
233 attempts++
234 openaiStream := o.client.Chat.Completions.NewStreaming(
235 ctx,
236 params,
237 )
238
239 acc := openai.ChatCompletionAccumulator{}
240 currentContent := ""
241 toolCalls := make([]message.ToolCall, 0)
242
243 for openaiStream.Next() {
244 chunk := openaiStream.Current()
245 acc.AddChunk(chunk)
246
247 if tool, ok := acc.JustFinishedToolCall(); ok {
248 toolCalls = append(toolCalls, message.ToolCall{
249 ID: tool.Id,
250 Name: tool.Name,
251 Input: tool.Arguments,
252 Type: "function",
253 })
254 }
255
256 for _, choice := range chunk.Choices {
257 if choice.Delta.Content != "" {
258 eventChan <- ProviderEvent{
259 Type: EventContentDelta,
260 Content: choice.Delta.Content,
261 }
262 currentContent += choice.Delta.Content
263 }
264 }
265 }
266
267 err := openaiStream.Err()
268 if err == nil || errors.Is(err, io.EOF) {
269 // Stream completed successfully
270 eventChan <- ProviderEvent{
271 Type: EventComplete,
272 Response: &ProviderResponse{
273 Content: currentContent,
274 ToolCalls: toolCalls,
275 Usage: o.usage(acc.ChatCompletion),
276 FinishReason: o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason)),
277 },
278 }
279 close(eventChan)
280 return
281 }
282
283 // If there is an error we are going to see if we can retry the call
284 retry, after, retryErr := o.shouldRetry(attempts, err)
285 if retryErr != nil {
286 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
287 close(eventChan)
288 return
289 }
290 if retry {
291 logging.WarnPersist("Retrying due to rate limit... attempt %d of %d", logging.PersistTimeArg, time.Millisecond*time.Duration(after+100))
292 select {
293 case <-ctx.Done():
294 // context cancelled
295 if ctx.Err() == nil {
296 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
297 }
298 close(eventChan)
299 return
300 case <-time.After(time.Duration(after) * time.Millisecond):
301 continue
302 }
303 }
304 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
305 close(eventChan)
306 return
307 }
308 }()
309
310 return eventChan
311}
312
313func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
314 var apierr *openai.Error
315 if !errors.As(err, &apierr) {
316 return false, 0, err
317 }
318
319 if apierr.StatusCode != 429 && apierr.StatusCode != 500 {
320 return false, 0, err
321 }
322
323 if attempts > maxRetries {
324 return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
325 }
326
327 retryMs := 0
328 retryAfterValues := apierr.Response.Header.Values("Retry-After")
329
330 backoffMs := 2000 * (1 << (attempts - 1))
331 jitterMs := int(float64(backoffMs) * 0.2)
332 retryMs = backoffMs + jitterMs
333 if len(retryAfterValues) > 0 {
334 if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
335 retryMs = retryMs * 1000
336 }
337 }
338 return true, int64(retryMs), nil
339}
340
341func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
342 var toolCalls []message.ToolCall
343
344 if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
345 for _, call := range completion.Choices[0].Message.ToolCalls {
346 toolCall := message.ToolCall{
347 ID: call.ID,
348 Name: call.Function.Name,
349 Input: call.Function.Arguments,
350 Type: "function",
351 Finished: true,
352 }
353 toolCalls = append(toolCalls, toolCall)
354 }
355 }
356
357 return toolCalls
358}
359
360func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
361 cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
362 inputTokens := completion.Usage.PromptTokens - cachedTokens
363
364 return TokenUsage{
365 InputTokens: inputTokens,
366 OutputTokens: completion.Usage.CompletionTokens,
367 CacheCreationTokens: 0, // OpenAI doesn't provide this directly
368 CacheReadTokens: cachedTokens,
369 }
370}
371
372func WithOpenAIBaseURL(baseURL string) OpenAIOption {
373 return func(options *openaiOptions) {
374 options.baseURL = baseURL
375 }
376}
377
378func WithOpenAIDisableCache() OpenAIOption {
379 return func(options *openaiOptions) {
380 options.disableCache = true
381 }
382}
383
384func WithReasoningEffort(effort string) OpenAIOption {
385 return func(options *openaiOptions) {
386 defaultReasoningEffort := "medium"
387 switch effort {
388 case "low", "medium", "high":
389 defaultReasoningEffort = effort
390 default:
391 logging.Warn("Invalid reasoning effort, using default: medium")
392 }
393 options.reasoningEffort = defaultReasoningEffort
394 }
395}