diff --git a/internal/llm/provider/anthropic.go b/internal/llm/provider/anthropic.go index e626f37d998c72cdcc3602236f906cbb41ac1cc0..8e0e3cc623557e1098f4a31fcafcae79805f205e 100644 --- a/internal/llm/provider/anthropic.go +++ b/internal/llm/provider/anthropic.go @@ -332,7 +332,7 @@ func (a *anthropicClient) stream(ctx context.Context, messages []message.Message // Prepare messages on each attempt in case max_tokens was adjusted preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools)) - opts := []option.RequestOption{option.WithRequestTimeout(time.Minute)} + var opts []option.RequestOption if a.isThinkingEnabled() { opts = append(opts, option.WithHeaderAdd("anthropic-beta", "interleaved-thinking-2025-05-14")) } diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 38497a73a06297976db7361dcf122ec80eb6bca8..4fe96d2b7e11c5e3953781653e8004c72d365226 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -337,7 +337,6 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t openaiStream := o.client.Chat.Completions.NewStreaming( ctx, params, - option.WithRequestTimeout(time.Minute), ) acc := openai.ChatCompletionAccumulator{}