shelley: fix agent still showing working after LLM error

Philip Zeyliger and Claude created

Prompt: when the API errors out though, the agent is still listed as working; is that a bug?

The error message recorded when an LLM request fails was missing
EndOfTurn: true. This meant the server never called SetAgentWorking(false)
and the agent stayed in the "working" state indefinitely.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

Change summary

loop/loop.go      | 2 ++
loop/loop_test.go | 5 +++++
2 files changed, 7 insertions(+)

Detailed changes

loop/loop.go 🔗

@@ -245,6 +245,7 @@ func (l *Loop) processLLMRequest(ctx context.Context) error {
 	resp, err := llmService.Do(llmCtx, req)
 	if err != nil {
 		// Record the error as a message so it can be displayed in the UI
+		// EndOfTurn must be true so the agent working state is properly updated
 		errorMessage := llm.Message{
 			Role: llm.MessageRoleAssistant,
 			Content: []llm.Content{
@@ -253,6 +254,7 @@ func (l *Loop) processLLMRequest(ctx context.Context) error {
 					Text: fmt.Sprintf("LLM request failed: %v", err),
 				},
 			},
+			EndOfTurn: true,
 		}
 		if recordErr := l.recordMessage(ctx, errorMessage, llm.Usage{}); recordErr != nil {
 			l.logger.Error("failed to record error message", "error", recordErr)

loop/loop_test.go 🔗

@@ -1548,6 +1548,11 @@ func TestProcessLLMRequestError(t *testing.T) {
 	if !strings.Contains(recordedMessages[0].Content[0].Text, "LLM request failed") {
 		t.Errorf("expected error message to contain 'LLM request failed', got: %s", recordedMessages[0].Content[0].Text)
 	}
+
+	// Verify EndOfTurn is set so the agent working state is properly updated
+	if !recordedMessages[0].EndOfTurn {
+		t.Error("expected error message to have EndOfTurn=true so agent working state is updated")
+	}
 }
 
 // errorLLMService is a test LLM service that always returns an error