diff --git a/loop/loop.go b/loop/loop.go index 3a3b58e30867d5da9e68ba57a4a8b18b81f1f1ee..26fabbb7b7a5da072c844aa78e2e8db04d350990 100644 --- a/loop/loop.go +++ b/loop/loop.go @@ -388,6 +388,16 @@ func (l *Loop) insertMissingToolResults(req *llm.Request) { msg := req.Messages[i] if msg.Role == llm.MessageRoleAssistant { + // Handle empty assistant messages - add placeholder content if not the last message + // The API requires all messages to have non-empty content except for the optional + // final assistant message. Empty content can happen when the model ends its turn + // without producing any output. + if len(msg.Content) == 0 && i < len(req.Messages)-1 { + req.Messages[i].Content = []llm.Content{{Type: llm.ContentTypeText, Text: "(no response)"}} + msg = req.Messages[i] // update local copy for subsequent processing + l.logger.Debug("added placeholder content to empty assistant message", "index", i) + } + // Track all tool_use IDs in this assistant message prevAssistantToolUseIDs = make(map[string]bool) for _, c := range msg.Content { diff --git a/loop/loop_test.go b/loop/loop_test.go index e150a11e5200cb0782588e001893e46027722f81..311d63309514d53d201b31df731ce596ce8cf81f 100644 --- a/loop/loop_test.go +++ b/loop/loop_test.go @@ -841,3 +841,126 @@ func TestInsertMissingToolResultsWithEdgeCases(t *testing.T) { } }) } + +func TestInsertMissingToolResults_EmptyAssistantContent(t *testing.T) { + // Test for the bug: when an assistant message has empty content (can happen when + // the model ends its turn without producing any output), we need to add placeholder + // content if it's not the last message. Otherwise the API will reject with: + // "messages.N: all messages must have non-empty content except for the optional + // final assistant message" + + t.Run("empty assistant content in middle of conversation", func(t *testing.T) { + loop := NewLoop(Config{ + LLM: NewPredictableService(), + History: []llm.Message{}, + }) + + req := &llm.Request{ + Messages: []llm.Message{ + { + Role: llm.MessageRoleUser, + Content: []llm.Content{{Type: llm.ContentTypeText, Text: "run git fetch"}}, + }, + { + Role: llm.MessageRoleAssistant, + Content: []llm.Content{{Type: llm.ContentTypeToolUse, ID: "tool1", ToolName: "bash"}}, + }, + { + Role: llm.MessageRoleUser, + Content: []llm.Content{{ + Type: llm.ContentTypeToolResult, + ToolUseID: "tool1", + ToolResult: []llm.Content{{Type: llm.ContentTypeText, Text: "success"}}, + }}, + }, + { + // Empty assistant message - this can happen when model ends turn without output + Role: llm.MessageRoleAssistant, + Content: []llm.Content{}, + EndOfTurn: true, + }, + { + Role: llm.MessageRoleUser, + Content: []llm.Content{{Type: llm.ContentTypeText, Text: "next question"}}, + }, + }, + } + + loop.insertMissingToolResults(req) + + // The empty assistant message (index 3) should now have placeholder content + if len(req.Messages[3].Content) == 0 { + t.Error("expected placeholder content to be added to empty assistant message") + } + if req.Messages[3].Content[0].Type != llm.ContentTypeText { + t.Error("expected placeholder to be text content") + } + if req.Messages[3].Content[0].Text != "(no response)" { + t.Errorf("expected placeholder text '(no response)', got %q", req.Messages[3].Content[0].Text) + } + }) + + t.Run("empty assistant content at end of conversation - no modification needed", func(t *testing.T) { + loop := NewLoop(Config{ + LLM: NewPredictableService(), + History: []llm.Message{}, + }) + + req := &llm.Request{ + Messages: []llm.Message{ + { + Role: llm.MessageRoleUser, + Content: []llm.Content{{Type: llm.ContentTypeText, Text: "hello"}}, + }, + { + // Empty assistant message at end is allowed by the API + Role: llm.MessageRoleAssistant, + Content: []llm.Content{}, + EndOfTurn: true, + }, + }, + } + + loop.insertMissingToolResults(req) + + // The empty assistant message at the end should NOT be modified + // because the API allows empty content for the final assistant message + if len(req.Messages[1].Content) != 0 { + t.Error("expected final empty assistant message to remain empty") + } + }) + + t.Run("non-empty assistant content - no modification needed", func(t *testing.T) { + loop := NewLoop(Config{ + LLM: NewPredictableService(), + History: []llm.Message{}, + }) + + req := &llm.Request{ + Messages: []llm.Message{ + { + Role: llm.MessageRoleUser, + Content: []llm.Content{{Type: llm.ContentTypeText, Text: "hello"}}, + }, + { + Role: llm.MessageRoleAssistant, + Content: []llm.Content{{Type: llm.ContentTypeText, Text: "hi there"}}, + }, + { + Role: llm.MessageRoleUser, + Content: []llm.Content{{Type: llm.ContentTypeText, Text: "goodbye"}}, + }, + }, + } + + loop.insertMissingToolResults(req) + + // The assistant message should not be modified + if len(req.Messages[1].Content) != 1 { + t.Errorf("expected assistant message to have 1 content item, got %d", len(req.Messages[1].Content)) + } + if req.Messages[1].Content[0].Text != "hi there" { + t.Errorf("expected assistant message text 'hi there', got %q", req.Messages[1].Content[0].Text) + } + }) +}