diff --git a/internal/llm/provider/anthropic.go b/internal/llm/provider/anthropic.go index 6b655f7f7741a658ea3e7108e1fba0032c95e563..e626f37d998c72cdcc3602236f906cbb41ac1cc0 100644 --- a/internal/llm/provider/anthropic.go +++ b/internal/llm/provider/anthropic.go @@ -153,7 +153,6 @@ func (a *anthropicClient) convertMessages(messages []message.Message) (anthropic } if len(blocks) == 0 { - slog.Warn("There is a message without content, investigate, this should not happen") continue } anthropicMessages = append(anthropicMessages, anthropic.NewAssistantMessage(blocks...)) @@ -333,7 +332,7 @@ func (a *anthropicClient) stream(ctx context.Context, messages []message.Message // Prepare messages on each attempt in case max_tokens was adjusted preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools)) - var opts []option.RequestOption + opts := []option.RequestOption{option.WithRequestTimeout(time.Minute)} if a.isThinkingEnabled() { opts = append(opts, option.WithHeaderAdd("anthropic-beta", "interleaved-thinking-2025-05-14")) } diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 8433282fc0b5e771a0a28184895a864b30b0e389..38497a73a06297976db7361dcf122ec80eb6bca8 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -166,7 +166,6 @@ func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessag } } if !hasContent { - slog.Warn("There is a message without content, investigate, this should not happen") continue } @@ -338,6 +337,7 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t openaiStream := o.client.Chat.Completions.NewStreaming( ctx, params, + option.WithRequestTimeout(time.Minute), ) acc := openai.ChatCompletionAccumulator{}