1use crate::{
2 agent_profile::AgentProfile,
3 context::{AgentContext, AgentContextHandle, ContextLoadResult, LoadedContext},
4 thread_store::{
5 SerializedCrease, SerializedLanguageModel, SerializedMessage, SerializedMessageSegment,
6 SerializedThread, SerializedToolResult, SerializedToolUse, SharedProjectContext,
7 ThreadStore,
8 },
9 tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState},
10};
11use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
12use anyhow::{Result, anyhow};
13use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolWorkingSet};
14use chrono::{DateTime, Utc};
15use client::{ModelRequestUsage, RequestUsage};
16use collections::HashMap;
17use feature_flags::{self, FeatureFlagAppExt};
18use futures::{FutureExt, StreamExt as _, future::Shared};
19use git::repository::DiffType;
20use gpui::{
21 AnyWindowHandle, App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task,
22 WeakEntity, Window,
23};
24use http_client::StatusCode;
25use language_model::{
26 ConfiguredModel, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
27 LanguageModelExt as _, LanguageModelId, LanguageModelRegistry, LanguageModelRequest,
28 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
29 LanguageModelToolResultContent, LanguageModelToolUse, LanguageModelToolUseId, MessageContent,
30 ModelRequestLimitReachedError, PaymentRequiredError, Role, SelectedModel, StopReason,
31 TokenUsage,
32};
33use postage::stream::Stream as _;
34use project::{
35 Project,
36 git_store::{GitStore, GitStoreCheckpoint, RepositoryState},
37};
38use prompt_store::{ModelContext, PromptBuilder};
39use proto::Plan;
40use schemars::JsonSchema;
41use serde::{Deserialize, Serialize};
42use settings::Settings;
43use std::{
44 io::Write,
45 ops::Range,
46 sync::Arc,
47 time::{Duration, Instant},
48};
49use thiserror::Error;
50use util::{ResultExt as _, debug_panic, post_inc};
51use uuid::Uuid;
52use zed_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
53
54const MAX_RETRY_ATTEMPTS: u8 = 4;
55const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
56
57#[derive(Debug, Clone)]
58enum RetryStrategy {
59 ExponentialBackoff {
60 initial_delay: Duration,
61 max_attempts: u8,
62 },
63 Fixed {
64 delay: Duration,
65 max_attempts: u8,
66 },
67}
68
69#[derive(
70 Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
71)]
72pub struct ThreadId(Arc<str>);
73
74impl ThreadId {
75 pub fn new() -> Self {
76 Self(Uuid::new_v4().to_string().into())
77 }
78}
79
80impl std::fmt::Display for ThreadId {
81 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
82 write!(f, "{}", self.0)
83 }
84}
85
86impl From<&str> for ThreadId {
87 fn from(value: &str) -> Self {
88 Self(value.into())
89 }
90}
91
92/// The ID of the user prompt that initiated a request.
93///
94/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
95#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
96pub struct PromptId(Arc<str>);
97
98impl PromptId {
99 pub fn new() -> Self {
100 Self(Uuid::new_v4().to_string().into())
101 }
102}
103
104impl std::fmt::Display for PromptId {
105 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
106 write!(f, "{}", self.0)
107 }
108}
109
110#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
111pub struct MessageId(pub(crate) usize);
112
113impl MessageId {
114 fn post_inc(&mut self) -> Self {
115 Self(post_inc(&mut self.0))
116 }
117
118 pub fn as_usize(&self) -> usize {
119 self.0
120 }
121}
122
123/// Stored information that can be used to resurrect a context crease when creating an editor for a past message.
124#[derive(Clone, Debug)]
125pub struct MessageCrease {
126 pub range: Range<usize>,
127 pub icon_path: SharedString,
128 pub label: SharedString,
129 /// None for a deserialized message, Some otherwise.
130 pub context: Option<AgentContextHandle>,
131}
132
133/// A message in a [`Thread`].
134#[derive(Debug, Clone)]
135pub struct Message {
136 pub id: MessageId,
137 pub role: Role,
138 pub segments: Vec<MessageSegment>,
139 pub loaded_context: LoadedContext,
140 pub creases: Vec<MessageCrease>,
141 pub is_hidden: bool,
142 pub ui_only: bool,
143}
144
145impl Message {
146 /// Returns whether the message contains any meaningful text that should be displayed
147 /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
148 pub fn should_display_content(&self) -> bool {
149 self.segments.iter().all(|segment| segment.should_display())
150 }
151
152 pub fn push_thinking(&mut self, text: &str, signature: Option<String>) {
153 if let Some(MessageSegment::Thinking {
154 text: segment,
155 signature: current_signature,
156 }) = self.segments.last_mut()
157 {
158 if let Some(signature) = signature {
159 *current_signature = Some(signature);
160 }
161 segment.push_str(text);
162 } else {
163 self.segments.push(MessageSegment::Thinking {
164 text: text.to_string(),
165 signature,
166 });
167 }
168 }
169
170 pub fn push_redacted_thinking(&mut self, data: String) {
171 self.segments.push(MessageSegment::RedactedThinking(data));
172 }
173
174 pub fn push_text(&mut self, text: &str) {
175 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
176 segment.push_str(text);
177 } else {
178 self.segments.push(MessageSegment::Text(text.to_string()));
179 }
180 }
181
182 pub fn to_string(&self) -> String {
183 let mut result = String::new();
184
185 if !self.loaded_context.text.is_empty() {
186 result.push_str(&self.loaded_context.text);
187 }
188
189 for segment in &self.segments {
190 match segment {
191 MessageSegment::Text(text) => result.push_str(text),
192 MessageSegment::Thinking { text, .. } => {
193 result.push_str("<think>\n");
194 result.push_str(text);
195 result.push_str("\n</think>");
196 }
197 MessageSegment::RedactedThinking(_) => {}
198 }
199 }
200
201 result
202 }
203}
204
205#[derive(Debug, Clone, PartialEq, Eq)]
206pub enum MessageSegment {
207 Text(String),
208 Thinking {
209 text: String,
210 signature: Option<String>,
211 },
212 RedactedThinking(String),
213}
214
215impl MessageSegment {
216 pub fn should_display(&self) -> bool {
217 match self {
218 Self::Text(text) => text.is_empty(),
219 Self::Thinking { text, .. } => text.is_empty(),
220 Self::RedactedThinking(_) => false,
221 }
222 }
223
224 pub fn text(&self) -> Option<&str> {
225 match self {
226 MessageSegment::Text(text) => Some(text),
227 _ => None,
228 }
229 }
230}
231
232#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
233pub struct ProjectSnapshot {
234 pub worktree_snapshots: Vec<WorktreeSnapshot>,
235 pub unsaved_buffer_paths: Vec<String>,
236 pub timestamp: DateTime<Utc>,
237}
238
239#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
240pub struct WorktreeSnapshot {
241 pub worktree_path: String,
242 pub git_state: Option<GitState>,
243}
244
245#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
246pub struct GitState {
247 pub remote_url: Option<String>,
248 pub head_sha: Option<String>,
249 pub current_branch: Option<String>,
250 pub diff: Option<String>,
251}
252
253#[derive(Clone, Debug)]
254pub struct ThreadCheckpoint {
255 message_id: MessageId,
256 git_checkpoint: GitStoreCheckpoint,
257}
258
259#[derive(Copy, Clone, Debug, PartialEq, Eq)]
260pub enum ThreadFeedback {
261 Positive,
262 Negative,
263}
264
265pub enum LastRestoreCheckpoint {
266 Pending {
267 message_id: MessageId,
268 },
269 Error {
270 message_id: MessageId,
271 error: String,
272 },
273}
274
275impl LastRestoreCheckpoint {
276 pub fn message_id(&self) -> MessageId {
277 match self {
278 LastRestoreCheckpoint::Pending { message_id } => *message_id,
279 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
280 }
281 }
282}
283
284#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
285pub enum DetailedSummaryState {
286 #[default]
287 NotGenerated,
288 Generating {
289 message_id: MessageId,
290 },
291 Generated {
292 text: SharedString,
293 message_id: MessageId,
294 },
295}
296
297impl DetailedSummaryState {
298 fn text(&self) -> Option<SharedString> {
299 if let Self::Generated { text, .. } = self {
300 Some(text.clone())
301 } else {
302 None
303 }
304 }
305}
306
307#[derive(Default, Debug)]
308pub struct TotalTokenUsage {
309 pub total: u64,
310 pub max: u64,
311}
312
313impl TotalTokenUsage {
314 pub fn ratio(&self) -> TokenUsageRatio {
315 #[cfg(debug_assertions)]
316 let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
317 .unwrap_or("0.8".to_string())
318 .parse()
319 .unwrap();
320 #[cfg(not(debug_assertions))]
321 let warning_threshold: f32 = 0.8;
322
323 // When the maximum is unknown because there is no selected model,
324 // avoid showing the token limit warning.
325 if self.max == 0 {
326 TokenUsageRatio::Normal
327 } else if self.total >= self.max {
328 TokenUsageRatio::Exceeded
329 } else if self.total as f32 / self.max as f32 >= warning_threshold {
330 TokenUsageRatio::Warning
331 } else {
332 TokenUsageRatio::Normal
333 }
334 }
335
336 pub fn add(&self, tokens: u64) -> TotalTokenUsage {
337 TotalTokenUsage {
338 total: self.total + tokens,
339 max: self.max,
340 }
341 }
342}
343
344#[derive(Debug, Default, PartialEq, Eq)]
345pub enum TokenUsageRatio {
346 #[default]
347 Normal,
348 Warning,
349 Exceeded,
350}
351
352#[derive(Debug, Clone, Copy)]
353pub enum QueueState {
354 Sending,
355 Queued { position: usize },
356 Started,
357}
358
359/// A thread of conversation with the LLM.
360pub struct Thread {
361 id: ThreadId,
362 updated_at: DateTime<Utc>,
363 summary: ThreadSummary,
364 pending_summary: Task<Option<()>>,
365 detailed_summary_task: Task<Option<()>>,
366 detailed_summary_tx: postage::watch::Sender<DetailedSummaryState>,
367 detailed_summary_rx: postage::watch::Receiver<DetailedSummaryState>,
368 completion_mode: agent_settings::CompletionMode,
369 messages: Vec<Message>,
370 next_message_id: MessageId,
371 last_prompt_id: PromptId,
372 project_context: SharedProjectContext,
373 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
374 completion_count: usize,
375 pending_completions: Vec<PendingCompletion>,
376 project: Entity<Project>,
377 prompt_builder: Arc<PromptBuilder>,
378 tools: Entity<ToolWorkingSet>,
379 tool_use: ToolUseState,
380 action_log: Entity<ActionLog>,
381 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
382 pending_checkpoint: Option<ThreadCheckpoint>,
383 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
384 request_token_usage: Vec<TokenUsage>,
385 cumulative_token_usage: TokenUsage,
386 exceeded_window_error: Option<ExceededWindowError>,
387 tool_use_limit_reached: bool,
388 feedback: Option<ThreadFeedback>,
389 retry_state: Option<RetryState>,
390 message_feedback: HashMap<MessageId, ThreadFeedback>,
391 last_auto_capture_at: Option<Instant>,
392 last_received_chunk_at: Option<Instant>,
393 request_callback: Option<
394 Box<dyn FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>])>,
395 >,
396 remaining_turns: u32,
397 configured_model: Option<ConfiguredModel>,
398 profile: AgentProfile,
399 last_error_context: Option<(Arc<dyn LanguageModel>, CompletionIntent)>,
400}
401
402#[derive(Clone, Debug)]
403struct RetryState {
404 attempt: u8,
405 max_attempts: u8,
406 intent: CompletionIntent,
407}
408
409#[derive(Clone, Debug, PartialEq, Eq)]
410pub enum ThreadSummary {
411 Pending,
412 Generating,
413 Ready(SharedString),
414 Error,
415}
416
417impl ThreadSummary {
418 pub const DEFAULT: SharedString = SharedString::new_static("New Thread");
419
420 pub fn or_default(&self) -> SharedString {
421 self.unwrap_or(Self::DEFAULT)
422 }
423
424 pub fn unwrap_or(&self, message: impl Into<SharedString>) -> SharedString {
425 self.ready().unwrap_or_else(|| message.into())
426 }
427
428 pub fn ready(&self) -> Option<SharedString> {
429 match self {
430 ThreadSummary::Ready(summary) => Some(summary.clone()),
431 ThreadSummary::Pending | ThreadSummary::Generating | ThreadSummary::Error => None,
432 }
433 }
434}
435
436#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
437pub struct ExceededWindowError {
438 /// Model used when last message exceeded context window
439 model_id: LanguageModelId,
440 /// Token count including last message
441 token_count: u64,
442}
443
444impl Thread {
445 pub fn new(
446 project: Entity<Project>,
447 tools: Entity<ToolWorkingSet>,
448 prompt_builder: Arc<PromptBuilder>,
449 system_prompt: SharedProjectContext,
450 cx: &mut Context<Self>,
451 ) -> Self {
452 let (detailed_summary_tx, detailed_summary_rx) = postage::watch::channel();
453 let configured_model = LanguageModelRegistry::read_global(cx).default_model();
454 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
455
456 Self {
457 id: ThreadId::new(),
458 updated_at: Utc::now(),
459 summary: ThreadSummary::Pending,
460 pending_summary: Task::ready(None),
461 detailed_summary_task: Task::ready(None),
462 detailed_summary_tx,
463 detailed_summary_rx,
464 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
465 messages: Vec::new(),
466 next_message_id: MessageId(0),
467 last_prompt_id: PromptId::new(),
468 project_context: system_prompt,
469 checkpoints_by_message: HashMap::default(),
470 completion_count: 0,
471 pending_completions: Vec::new(),
472 project: project.clone(),
473 prompt_builder,
474 tools: tools.clone(),
475 last_restore_checkpoint: None,
476 pending_checkpoint: None,
477 tool_use: ToolUseState::new(tools.clone()),
478 action_log: cx.new(|_| ActionLog::new(project.clone())),
479 initial_project_snapshot: {
480 let project_snapshot = Self::project_snapshot(project, cx);
481 cx.foreground_executor()
482 .spawn(async move { Some(project_snapshot.await) })
483 .shared()
484 },
485 request_token_usage: Vec::new(),
486 cumulative_token_usage: TokenUsage::default(),
487 exceeded_window_error: None,
488 tool_use_limit_reached: false,
489 feedback: None,
490 retry_state: None,
491 message_feedback: HashMap::default(),
492 last_auto_capture_at: None,
493 last_error_context: None,
494 last_received_chunk_at: None,
495 request_callback: None,
496 remaining_turns: u32::MAX,
497 configured_model: configured_model.clone(),
498 profile: AgentProfile::new(profile_id, tools),
499 }
500 }
501
502 pub fn deserialize(
503 id: ThreadId,
504 serialized: SerializedThread,
505 project: Entity<Project>,
506 tools: Entity<ToolWorkingSet>,
507 prompt_builder: Arc<PromptBuilder>,
508 project_context: SharedProjectContext,
509 window: Option<&mut Window>, // None in headless mode
510 cx: &mut Context<Self>,
511 ) -> Self {
512 let next_message_id = MessageId(
513 serialized
514 .messages
515 .last()
516 .map(|message| message.id.0 + 1)
517 .unwrap_or(0),
518 );
519 let tool_use = ToolUseState::from_serialized_messages(
520 tools.clone(),
521 &serialized.messages,
522 project.clone(),
523 window,
524 cx,
525 );
526 let (detailed_summary_tx, detailed_summary_rx) =
527 postage::watch::channel_with(serialized.detailed_summary_state);
528
529 let configured_model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
530 serialized
531 .model
532 .and_then(|model| {
533 let model = SelectedModel {
534 provider: model.provider.clone().into(),
535 model: model.model.clone().into(),
536 };
537 registry.select_model(&model, cx)
538 })
539 .or_else(|| registry.default_model())
540 });
541
542 let completion_mode = serialized
543 .completion_mode
544 .unwrap_or_else(|| AgentSettings::get_global(cx).preferred_completion_mode);
545 let profile_id = serialized
546 .profile
547 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
548
549 Self {
550 id,
551 updated_at: serialized.updated_at,
552 summary: ThreadSummary::Ready(serialized.summary),
553 pending_summary: Task::ready(None),
554 detailed_summary_task: Task::ready(None),
555 detailed_summary_tx,
556 detailed_summary_rx,
557 completion_mode,
558 retry_state: None,
559 messages: serialized
560 .messages
561 .into_iter()
562 .map(|message| Message {
563 id: message.id,
564 role: message.role,
565 segments: message
566 .segments
567 .into_iter()
568 .map(|segment| match segment {
569 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
570 SerializedMessageSegment::Thinking { text, signature } => {
571 MessageSegment::Thinking { text, signature }
572 }
573 SerializedMessageSegment::RedactedThinking { data } => {
574 MessageSegment::RedactedThinking(data)
575 }
576 })
577 .collect(),
578 loaded_context: LoadedContext {
579 contexts: Vec::new(),
580 text: message.context,
581 images: Vec::new(),
582 },
583 creases: message
584 .creases
585 .into_iter()
586 .map(|crease| MessageCrease {
587 range: crease.start..crease.end,
588 icon_path: crease.icon_path,
589 label: crease.label,
590 context: None,
591 })
592 .collect(),
593 is_hidden: message.is_hidden,
594 ui_only: false, // UI-only messages are not persisted
595 })
596 .collect(),
597 next_message_id,
598 last_prompt_id: PromptId::new(),
599 project_context,
600 checkpoints_by_message: HashMap::default(),
601 completion_count: 0,
602 pending_completions: Vec::new(),
603 last_restore_checkpoint: None,
604 pending_checkpoint: None,
605 project: project.clone(),
606 prompt_builder,
607 tools: tools.clone(),
608 tool_use,
609 action_log: cx.new(|_| ActionLog::new(project)),
610 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
611 request_token_usage: serialized.request_token_usage,
612 cumulative_token_usage: serialized.cumulative_token_usage,
613 exceeded_window_error: None,
614 tool_use_limit_reached: serialized.tool_use_limit_reached,
615 feedback: None,
616 message_feedback: HashMap::default(),
617 last_auto_capture_at: None,
618 last_error_context: None,
619 last_received_chunk_at: None,
620 request_callback: None,
621 remaining_turns: u32::MAX,
622 configured_model,
623 profile: AgentProfile::new(profile_id, tools),
624 }
625 }
626
627 pub fn set_request_callback(
628 &mut self,
629 callback: impl 'static
630 + FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>]),
631 ) {
632 self.request_callback = Some(Box::new(callback));
633 }
634
635 pub fn id(&self) -> &ThreadId {
636 &self.id
637 }
638
639 pub fn profile(&self) -> &AgentProfile {
640 &self.profile
641 }
642
643 pub fn set_profile(&mut self, id: AgentProfileId, cx: &mut Context<Self>) {
644 if &id != self.profile.id() {
645 self.profile = AgentProfile::new(id, self.tools.clone());
646 cx.emit(ThreadEvent::ProfileChanged);
647 }
648 }
649
650 pub fn is_empty(&self) -> bool {
651 self.messages.is_empty()
652 }
653
654 pub fn updated_at(&self) -> DateTime<Utc> {
655 self.updated_at
656 }
657
658 pub fn touch_updated_at(&mut self) {
659 self.updated_at = Utc::now();
660 }
661
662 pub fn advance_prompt_id(&mut self) {
663 self.last_prompt_id = PromptId::new();
664 }
665
666 pub fn project_context(&self) -> SharedProjectContext {
667 self.project_context.clone()
668 }
669
670 pub fn get_or_init_configured_model(&mut self, cx: &App) -> Option<ConfiguredModel> {
671 if self.configured_model.is_none() {
672 self.configured_model = LanguageModelRegistry::read_global(cx).default_model();
673 }
674 self.configured_model.clone()
675 }
676
677 pub fn configured_model(&self) -> Option<ConfiguredModel> {
678 self.configured_model.clone()
679 }
680
681 pub fn set_configured_model(&mut self, model: Option<ConfiguredModel>, cx: &mut Context<Self>) {
682 self.configured_model = model;
683 cx.notify();
684 }
685
686 pub fn summary(&self) -> &ThreadSummary {
687 &self.summary
688 }
689
690 pub fn set_summary(&mut self, new_summary: impl Into<SharedString>, cx: &mut Context<Self>) {
691 let current_summary = match &self.summary {
692 ThreadSummary::Pending | ThreadSummary::Generating => return,
693 ThreadSummary::Ready(summary) => summary,
694 ThreadSummary::Error => &ThreadSummary::DEFAULT,
695 };
696
697 let mut new_summary = new_summary.into();
698
699 if new_summary.is_empty() {
700 new_summary = ThreadSummary::DEFAULT;
701 }
702
703 if current_summary != &new_summary {
704 self.summary = ThreadSummary::Ready(new_summary);
705 cx.emit(ThreadEvent::SummaryChanged);
706 }
707 }
708
709 pub fn completion_mode(&self) -> CompletionMode {
710 self.completion_mode
711 }
712
713 pub fn set_completion_mode(&mut self, mode: CompletionMode) {
714 self.completion_mode = mode;
715 }
716
717 pub fn message(&self, id: MessageId) -> Option<&Message> {
718 let index = self
719 .messages
720 .binary_search_by(|message| message.id.cmp(&id))
721 .ok()?;
722
723 self.messages.get(index)
724 }
725
726 pub fn messages(&self) -> impl ExactSizeIterator<Item = &Message> {
727 self.messages.iter()
728 }
729
730 pub fn is_generating(&self) -> bool {
731 !self.pending_completions.is_empty() || !self.all_tools_finished()
732 }
733
734 /// Indicates whether streaming of language model events is stale.
735 /// When `is_generating()` is false, this method returns `None`.
736 pub fn is_generation_stale(&self) -> Option<bool> {
737 const STALE_THRESHOLD: u128 = 250;
738
739 self.last_received_chunk_at
740 .map(|instant| instant.elapsed().as_millis() > STALE_THRESHOLD)
741 }
742
743 fn received_chunk(&mut self) {
744 self.last_received_chunk_at = Some(Instant::now());
745 }
746
747 pub fn queue_state(&self) -> Option<QueueState> {
748 self.pending_completions
749 .first()
750 .map(|pending_completion| pending_completion.queue_state)
751 }
752
753 pub fn tools(&self) -> &Entity<ToolWorkingSet> {
754 &self.tools
755 }
756
757 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
758 self.tool_use
759 .pending_tool_uses()
760 .into_iter()
761 .find(|tool_use| &tool_use.id == id)
762 }
763
764 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
765 self.tool_use
766 .pending_tool_uses()
767 .into_iter()
768 .filter(|tool_use| tool_use.status.needs_confirmation())
769 }
770
771 pub fn has_pending_tool_uses(&self) -> bool {
772 !self.tool_use.pending_tool_uses().is_empty()
773 }
774
775 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
776 self.checkpoints_by_message.get(&id).cloned()
777 }
778
779 pub fn restore_checkpoint(
780 &mut self,
781 checkpoint: ThreadCheckpoint,
782 cx: &mut Context<Self>,
783 ) -> Task<Result<()>> {
784 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
785 message_id: checkpoint.message_id,
786 });
787 cx.emit(ThreadEvent::CheckpointChanged);
788 cx.notify();
789
790 let git_store = self.project().read(cx).git_store().clone();
791 let restore = git_store.update(cx, |git_store, cx| {
792 git_store.restore_checkpoint(checkpoint.git_checkpoint.clone(), cx)
793 });
794
795 cx.spawn(async move |this, cx| {
796 let result = restore.await;
797 this.update(cx, |this, cx| {
798 if let Err(err) = result.as_ref() {
799 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
800 message_id: checkpoint.message_id,
801 error: err.to_string(),
802 });
803 } else {
804 this.truncate(checkpoint.message_id, cx);
805 this.last_restore_checkpoint = None;
806 }
807 this.pending_checkpoint = None;
808 cx.emit(ThreadEvent::CheckpointChanged);
809 cx.notify();
810 })?;
811 result
812 })
813 }
814
815 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
816 let pending_checkpoint = if self.is_generating() {
817 return;
818 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
819 checkpoint
820 } else {
821 return;
822 };
823
824 self.finalize_checkpoint(pending_checkpoint, cx);
825 }
826
827 fn finalize_checkpoint(
828 &mut self,
829 pending_checkpoint: ThreadCheckpoint,
830 cx: &mut Context<Self>,
831 ) {
832 let git_store = self.project.read(cx).git_store().clone();
833 let final_checkpoint = git_store.update(cx, |git_store, cx| git_store.checkpoint(cx));
834 cx.spawn(async move |this, cx| match final_checkpoint.await {
835 Ok(final_checkpoint) => {
836 let equal = git_store
837 .update(cx, |store, cx| {
838 store.compare_checkpoints(
839 pending_checkpoint.git_checkpoint.clone(),
840 final_checkpoint.clone(),
841 cx,
842 )
843 })?
844 .await
845 .unwrap_or(false);
846
847 if !equal {
848 this.update(cx, |this, cx| {
849 this.insert_checkpoint(pending_checkpoint, cx)
850 })?;
851 }
852
853 Ok(())
854 }
855 Err(_) => this.update(cx, |this, cx| {
856 this.insert_checkpoint(pending_checkpoint, cx)
857 }),
858 })
859 .detach();
860 }
861
862 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
863 self.checkpoints_by_message
864 .insert(checkpoint.message_id, checkpoint);
865 cx.emit(ThreadEvent::CheckpointChanged);
866 cx.notify();
867 }
868
869 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
870 self.last_restore_checkpoint.as_ref()
871 }
872
873 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
874 let Some(message_ix) = self
875 .messages
876 .iter()
877 .rposition(|message| message.id == message_id)
878 else {
879 return;
880 };
881 for deleted_message in self.messages.drain(message_ix..) {
882 self.checkpoints_by_message.remove(&deleted_message.id);
883 }
884 cx.notify();
885 }
886
887 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AgentContext> {
888 self.messages
889 .iter()
890 .find(|message| message.id == id)
891 .into_iter()
892 .flat_map(|message| message.loaded_context.contexts.iter())
893 }
894
895 pub fn is_turn_end(&self, ix: usize) -> bool {
896 if self.messages.is_empty() {
897 return false;
898 }
899
900 if !self.is_generating() && ix == self.messages.len() - 1 {
901 return true;
902 }
903
904 let Some(message) = self.messages.get(ix) else {
905 return false;
906 };
907
908 if message.role != Role::Assistant {
909 return false;
910 }
911
912 self.messages
913 .get(ix + 1)
914 .and_then(|message| {
915 self.message(message.id)
916 .map(|next_message| next_message.role == Role::User && !next_message.is_hidden)
917 })
918 .unwrap_or(false)
919 }
920
921 pub fn tool_use_limit_reached(&self) -> bool {
922 self.tool_use_limit_reached
923 }
924
925 /// Returns whether all of the tool uses have finished running.
926 pub fn all_tools_finished(&self) -> bool {
927 // If the only pending tool uses left are the ones with errors, then
928 // that means that we've finished running all of the pending tools.
929 self.tool_use
930 .pending_tool_uses()
931 .iter()
932 .all(|pending_tool_use| pending_tool_use.status.is_error())
933 }
934
935 /// Returns whether any pending tool uses may perform edits
936 pub fn has_pending_edit_tool_uses(&self) -> bool {
937 self.tool_use
938 .pending_tool_uses()
939 .iter()
940 .filter(|pending_tool_use| !pending_tool_use.status.is_error())
941 .any(|pending_tool_use| pending_tool_use.may_perform_edits)
942 }
943
944 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
945 self.tool_use.tool_uses_for_message(id, cx)
946 }
947
948 pub fn tool_results_for_message(
949 &self,
950 assistant_message_id: MessageId,
951 ) -> Vec<&LanguageModelToolResult> {
952 self.tool_use.tool_results_for_message(assistant_message_id)
953 }
954
955 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
956 self.tool_use.tool_result(id)
957 }
958
959 pub fn output_for_tool(&self, id: &LanguageModelToolUseId) -> Option<&Arc<str>> {
960 match &self.tool_use.tool_result(id)?.content {
961 LanguageModelToolResultContent::Text(text) => Some(text),
962 LanguageModelToolResultContent::Image(_) => {
963 // TODO: We should display image
964 None
965 }
966 }
967 }
968
969 pub fn card_for_tool(&self, id: &LanguageModelToolUseId) -> Option<AnyToolCard> {
970 self.tool_use.tool_result_card(id).cloned()
971 }
972
973 /// Return tools that are both enabled and supported by the model
974 pub fn available_tools(
975 &self,
976 cx: &App,
977 model: Arc<dyn LanguageModel>,
978 ) -> Vec<LanguageModelRequestTool> {
979 if model.supports_tools() {
980 self.profile
981 .enabled_tools(cx)
982 .into_iter()
983 .filter_map(|(name, tool)| {
984 // Skip tools that cannot be supported
985 let input_schema = tool.input_schema(model.tool_input_format()).ok()?;
986 Some(LanguageModelRequestTool {
987 name: name.into(),
988 description: tool.description(),
989 input_schema,
990 })
991 })
992 .collect()
993 } else {
994 Vec::default()
995 }
996 }
997
998 pub fn insert_user_message(
999 &mut self,
1000 text: impl Into<String>,
1001 loaded_context: ContextLoadResult,
1002 git_checkpoint: Option<GitStoreCheckpoint>,
1003 creases: Vec<MessageCrease>,
1004 cx: &mut Context<Self>,
1005 ) -> MessageId {
1006 if !loaded_context.referenced_buffers.is_empty() {
1007 self.action_log.update(cx, |log, cx| {
1008 for buffer in loaded_context.referenced_buffers {
1009 log.buffer_read(buffer, cx);
1010 }
1011 });
1012 }
1013
1014 let message_id = self.insert_message(
1015 Role::User,
1016 vec![MessageSegment::Text(text.into())],
1017 loaded_context.loaded_context,
1018 creases,
1019 false,
1020 cx,
1021 );
1022
1023 if let Some(git_checkpoint) = git_checkpoint {
1024 self.pending_checkpoint = Some(ThreadCheckpoint {
1025 message_id,
1026 git_checkpoint,
1027 });
1028 }
1029
1030 self.auto_capture_telemetry(cx);
1031
1032 message_id
1033 }
1034
1035 pub fn insert_invisible_continue_message(&mut self, cx: &mut Context<Self>) -> MessageId {
1036 let id = self.insert_message(
1037 Role::User,
1038 vec![MessageSegment::Text("Continue where you left off".into())],
1039 LoadedContext::default(),
1040 vec![],
1041 true,
1042 cx,
1043 );
1044 self.pending_checkpoint = None;
1045
1046 id
1047 }
1048
1049 pub fn insert_assistant_message(
1050 &mut self,
1051 segments: Vec<MessageSegment>,
1052 cx: &mut Context<Self>,
1053 ) -> MessageId {
1054 self.insert_message(
1055 Role::Assistant,
1056 segments,
1057 LoadedContext::default(),
1058 Vec::new(),
1059 false,
1060 cx,
1061 )
1062 }
1063
1064 pub fn insert_message(
1065 &mut self,
1066 role: Role,
1067 segments: Vec<MessageSegment>,
1068 loaded_context: LoadedContext,
1069 creases: Vec<MessageCrease>,
1070 is_hidden: bool,
1071 cx: &mut Context<Self>,
1072 ) -> MessageId {
1073 let id = self.next_message_id.post_inc();
1074 self.messages.push(Message {
1075 id,
1076 role,
1077 segments,
1078 loaded_context,
1079 creases,
1080 is_hidden,
1081 ui_only: false,
1082 });
1083 self.touch_updated_at();
1084 cx.emit(ThreadEvent::MessageAdded(id));
1085 id
1086 }
1087
1088 pub fn edit_message(
1089 &mut self,
1090 id: MessageId,
1091 new_role: Role,
1092 new_segments: Vec<MessageSegment>,
1093 creases: Vec<MessageCrease>,
1094 loaded_context: Option<LoadedContext>,
1095 checkpoint: Option<GitStoreCheckpoint>,
1096 cx: &mut Context<Self>,
1097 ) -> bool {
1098 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
1099 return false;
1100 };
1101 message.role = new_role;
1102 message.segments = new_segments;
1103 message.creases = creases;
1104 if let Some(context) = loaded_context {
1105 message.loaded_context = context;
1106 }
1107 if let Some(git_checkpoint) = checkpoint {
1108 self.checkpoints_by_message.insert(
1109 id,
1110 ThreadCheckpoint {
1111 message_id: id,
1112 git_checkpoint,
1113 },
1114 );
1115 }
1116 self.touch_updated_at();
1117 cx.emit(ThreadEvent::MessageEdited(id));
1118 true
1119 }
1120
1121 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
1122 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
1123 return false;
1124 };
1125 self.messages.remove(index);
1126 self.touch_updated_at();
1127 cx.emit(ThreadEvent::MessageDeleted(id));
1128 true
1129 }
1130
1131 /// Returns the representation of this [`Thread`] in a textual form.
1132 ///
1133 /// This is the representation we use when attaching a thread as context to another thread.
1134 pub fn text(&self) -> String {
1135 let mut text = String::new();
1136
1137 for message in &self.messages {
1138 text.push_str(match message.role {
1139 language_model::Role::User => "User:",
1140 language_model::Role::Assistant => "Agent:",
1141 language_model::Role::System => "System:",
1142 });
1143 text.push('\n');
1144
1145 for segment in &message.segments {
1146 match segment {
1147 MessageSegment::Text(content) => text.push_str(content),
1148 MessageSegment::Thinking { text: content, .. } => {
1149 text.push_str(&format!("<think>{}</think>", content))
1150 }
1151 MessageSegment::RedactedThinking(_) => {}
1152 }
1153 }
1154 text.push('\n');
1155 }
1156
1157 text
1158 }
1159
1160 /// Serializes this thread into a format for storage or telemetry.
1161 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
1162 let initial_project_snapshot = self.initial_project_snapshot.clone();
1163 cx.spawn(async move |this, cx| {
1164 let initial_project_snapshot = initial_project_snapshot.await;
1165 this.read_with(cx, |this, cx| SerializedThread {
1166 version: SerializedThread::VERSION.to_string(),
1167 summary: this.summary().or_default(),
1168 updated_at: this.updated_at(),
1169 messages: this
1170 .messages()
1171 .filter(|message| !message.ui_only)
1172 .map(|message| SerializedMessage {
1173 id: message.id,
1174 role: message.role,
1175 segments: message
1176 .segments
1177 .iter()
1178 .map(|segment| match segment {
1179 MessageSegment::Text(text) => {
1180 SerializedMessageSegment::Text { text: text.clone() }
1181 }
1182 MessageSegment::Thinking { text, signature } => {
1183 SerializedMessageSegment::Thinking {
1184 text: text.clone(),
1185 signature: signature.clone(),
1186 }
1187 }
1188 MessageSegment::RedactedThinking(data) => {
1189 SerializedMessageSegment::RedactedThinking {
1190 data: data.clone(),
1191 }
1192 }
1193 })
1194 .collect(),
1195 tool_uses: this
1196 .tool_uses_for_message(message.id, cx)
1197 .into_iter()
1198 .map(|tool_use| SerializedToolUse {
1199 id: tool_use.id,
1200 name: tool_use.name,
1201 input: tool_use.input,
1202 })
1203 .collect(),
1204 tool_results: this
1205 .tool_results_for_message(message.id)
1206 .into_iter()
1207 .map(|tool_result| SerializedToolResult {
1208 tool_use_id: tool_result.tool_use_id.clone(),
1209 is_error: tool_result.is_error,
1210 content: tool_result.content.clone(),
1211 output: tool_result.output.clone(),
1212 })
1213 .collect(),
1214 context: message.loaded_context.text.clone(),
1215 creases: message
1216 .creases
1217 .iter()
1218 .map(|crease| SerializedCrease {
1219 start: crease.range.start,
1220 end: crease.range.end,
1221 icon_path: crease.icon_path.clone(),
1222 label: crease.label.clone(),
1223 })
1224 .collect(),
1225 is_hidden: message.is_hidden,
1226 })
1227 .collect(),
1228 initial_project_snapshot,
1229 cumulative_token_usage: this.cumulative_token_usage,
1230 request_token_usage: this.request_token_usage.clone(),
1231 detailed_summary_state: this.detailed_summary_rx.borrow().clone(),
1232 exceeded_window_error: this.exceeded_window_error.clone(),
1233 model: this
1234 .configured_model
1235 .as_ref()
1236 .map(|model| SerializedLanguageModel {
1237 provider: model.provider.id().0.to_string(),
1238 model: model.model.id().0.to_string(),
1239 }),
1240 completion_mode: Some(this.completion_mode),
1241 tool_use_limit_reached: this.tool_use_limit_reached,
1242 profile: Some(this.profile.id().clone()),
1243 })
1244 })
1245 }
1246
1247 pub fn remaining_turns(&self) -> u32 {
1248 self.remaining_turns
1249 }
1250
1251 pub fn set_remaining_turns(&mut self, remaining_turns: u32) {
1252 self.remaining_turns = remaining_turns;
1253 }
1254
1255 pub fn send_to_model(
1256 &mut self,
1257 model: Arc<dyn LanguageModel>,
1258 intent: CompletionIntent,
1259 window: Option<AnyWindowHandle>,
1260 cx: &mut Context<Self>,
1261 ) {
1262 if self.remaining_turns == 0 {
1263 return;
1264 }
1265
1266 self.remaining_turns -= 1;
1267
1268 self.flush_notifications(model.clone(), intent, cx);
1269
1270 let _checkpoint = self.finalize_pending_checkpoint(cx);
1271 self.stream_completion(
1272 self.to_completion_request(model.clone(), intent, cx),
1273 model,
1274 intent,
1275 window,
1276 cx,
1277 );
1278 }
1279
1280 pub fn retry_last_completion(
1281 &mut self,
1282 window: Option<AnyWindowHandle>,
1283 cx: &mut Context<Self>,
1284 ) {
1285 // Clear any existing error state
1286 self.retry_state = None;
1287
1288 // Use the last error context if available, otherwise fall back to configured model
1289 let (model, intent) = if let Some((model, intent)) = self.last_error_context.take() {
1290 (model, intent)
1291 } else if let Some(configured_model) = self.configured_model.as_ref() {
1292 let model = configured_model.model.clone();
1293 let intent = if self.has_pending_tool_uses() {
1294 CompletionIntent::ToolResults
1295 } else {
1296 CompletionIntent::UserPrompt
1297 };
1298 (model, intent)
1299 } else if let Some(configured_model) = self.get_or_init_configured_model(cx) {
1300 let model = configured_model.model.clone();
1301 let intent = if self.has_pending_tool_uses() {
1302 CompletionIntent::ToolResults
1303 } else {
1304 CompletionIntent::UserPrompt
1305 };
1306 (model, intent)
1307 } else {
1308 return;
1309 };
1310
1311 self.send_to_model(model, intent, window, cx);
1312 }
1313
1314 pub fn enable_burn_mode_and_retry(
1315 &mut self,
1316 window: Option<AnyWindowHandle>,
1317 cx: &mut Context<Self>,
1318 ) {
1319 self.completion_mode = CompletionMode::Burn;
1320 cx.emit(ThreadEvent::ProfileChanged);
1321 self.retry_last_completion(window, cx);
1322 }
1323
1324 pub fn used_tools_since_last_user_message(&self) -> bool {
1325 for message in self.messages.iter().rev() {
1326 if self.tool_use.message_has_tool_results(message.id) {
1327 return true;
1328 } else if message.role == Role::User {
1329 return false;
1330 }
1331 }
1332
1333 false
1334 }
1335
1336 pub fn to_completion_request(
1337 &self,
1338 model: Arc<dyn LanguageModel>,
1339 intent: CompletionIntent,
1340 cx: &mut Context<Self>,
1341 ) -> LanguageModelRequest {
1342 let mut request = LanguageModelRequest {
1343 thread_id: Some(self.id.to_string()),
1344 prompt_id: Some(self.last_prompt_id.to_string()),
1345 intent: Some(intent),
1346 mode: None,
1347 messages: vec![],
1348 tools: Vec::new(),
1349 tool_choice: None,
1350 stop: Vec::new(),
1351 temperature: AgentSettings::temperature_for_model(&model, cx),
1352 thinking_allowed: true,
1353 };
1354
1355 let available_tools = self.available_tools(cx, model.clone());
1356 let available_tool_names = available_tools
1357 .iter()
1358 .map(|tool| tool.name.clone())
1359 .collect();
1360
1361 let model_context = &ModelContext {
1362 available_tools: available_tool_names,
1363 };
1364
1365 if let Some(project_context) = self.project_context.borrow().as_ref() {
1366 match self
1367 .prompt_builder
1368 .generate_assistant_system_prompt(project_context, model_context)
1369 {
1370 Err(err) => {
1371 let message = format!("{err:?}").into();
1372 log::error!("{message}");
1373 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1374 header: "Error generating system prompt".into(),
1375 message,
1376 }));
1377 }
1378 Ok(system_prompt) => {
1379 request.messages.push(LanguageModelRequestMessage {
1380 role: Role::System,
1381 content: vec![MessageContent::Text(system_prompt)],
1382 cache: true,
1383 });
1384 }
1385 }
1386 } else {
1387 let message = "Context for system prompt unexpectedly not ready.".into();
1388 log::error!("{message}");
1389 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1390 header: "Error generating system prompt".into(),
1391 message,
1392 }));
1393 }
1394
1395 let mut message_ix_to_cache = None;
1396 for message in &self.messages {
1397 // ui_only messages are for the UI only, not for the model
1398 if message.ui_only {
1399 continue;
1400 }
1401
1402 let mut request_message = LanguageModelRequestMessage {
1403 role: message.role,
1404 content: Vec::new(),
1405 cache: false,
1406 };
1407
1408 message
1409 .loaded_context
1410 .add_to_request_message(&mut request_message);
1411
1412 for segment in &message.segments {
1413 match segment {
1414 MessageSegment::Text(text) => {
1415 let text = text.trim_end();
1416 if !text.is_empty() {
1417 request_message
1418 .content
1419 .push(MessageContent::Text(text.into()));
1420 }
1421 }
1422 MessageSegment::Thinking { text, signature } => {
1423 if !text.is_empty() {
1424 request_message.content.push(MessageContent::Thinking {
1425 text: text.into(),
1426 signature: signature.clone(),
1427 });
1428 }
1429 }
1430 MessageSegment::RedactedThinking(data) => {
1431 request_message
1432 .content
1433 .push(MessageContent::RedactedThinking(data.clone()));
1434 }
1435 };
1436 }
1437
1438 let mut cache_message = true;
1439 let mut tool_results_message = LanguageModelRequestMessage {
1440 role: Role::User,
1441 content: Vec::new(),
1442 cache: false,
1443 };
1444 for (tool_use, tool_result) in self.tool_use.tool_results(message.id) {
1445 if let Some(tool_result) = tool_result {
1446 request_message
1447 .content
1448 .push(MessageContent::ToolUse(tool_use.clone()));
1449 tool_results_message
1450 .content
1451 .push(MessageContent::ToolResult(LanguageModelToolResult {
1452 tool_use_id: tool_use.id.clone(),
1453 tool_name: tool_result.tool_name.clone(),
1454 is_error: tool_result.is_error,
1455 content: if tool_result.content.is_empty() {
1456 // Surprisingly, the API fails if we return an empty string here.
1457 // It thinks we are sending a tool use without a tool result.
1458 "<Tool returned an empty string>".into()
1459 } else {
1460 tool_result.content.clone()
1461 },
1462 output: None,
1463 }));
1464 } else {
1465 cache_message = false;
1466 log::debug!(
1467 "skipped tool use {:?} because it is still pending",
1468 tool_use
1469 );
1470 }
1471 }
1472
1473 if cache_message {
1474 message_ix_to_cache = Some(request.messages.len());
1475 }
1476 request.messages.push(request_message);
1477
1478 if !tool_results_message.content.is_empty() {
1479 if cache_message {
1480 message_ix_to_cache = Some(request.messages.len());
1481 }
1482 request.messages.push(tool_results_message);
1483 }
1484 }
1485
1486 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
1487 if let Some(message_ix_to_cache) = message_ix_to_cache {
1488 request.messages[message_ix_to_cache].cache = true;
1489 }
1490
1491 request.tools = available_tools;
1492 request.mode = if model.supports_burn_mode() {
1493 Some(self.completion_mode.into())
1494 } else {
1495 Some(CompletionMode::Normal.into())
1496 };
1497
1498 request
1499 }
1500
1501 fn to_summarize_request(
1502 &self,
1503 model: &Arc<dyn LanguageModel>,
1504 intent: CompletionIntent,
1505 added_user_message: String,
1506 cx: &App,
1507 ) -> LanguageModelRequest {
1508 let mut request = LanguageModelRequest {
1509 thread_id: None,
1510 prompt_id: None,
1511 intent: Some(intent),
1512 mode: None,
1513 messages: vec![],
1514 tools: Vec::new(),
1515 tool_choice: None,
1516 stop: Vec::new(),
1517 temperature: AgentSettings::temperature_for_model(model, cx),
1518 thinking_allowed: false,
1519 };
1520
1521 for message in &self.messages {
1522 let mut request_message = LanguageModelRequestMessage {
1523 role: message.role,
1524 content: Vec::new(),
1525 cache: false,
1526 };
1527
1528 for segment in &message.segments {
1529 match segment {
1530 MessageSegment::Text(text) => request_message
1531 .content
1532 .push(MessageContent::Text(text.clone())),
1533 MessageSegment::Thinking { .. } => {}
1534 MessageSegment::RedactedThinking(_) => {}
1535 }
1536 }
1537
1538 if request_message.content.is_empty() {
1539 continue;
1540 }
1541
1542 request.messages.push(request_message);
1543 }
1544
1545 request.messages.push(LanguageModelRequestMessage {
1546 role: Role::User,
1547 content: vec![MessageContent::Text(added_user_message)],
1548 cache: false,
1549 });
1550
1551 request
1552 }
1553
1554 /// Insert auto-generated notifications (if any) to the thread
1555 fn flush_notifications(
1556 &mut self,
1557 model: Arc<dyn LanguageModel>,
1558 intent: CompletionIntent,
1559 cx: &mut Context<Self>,
1560 ) {
1561 match intent {
1562 CompletionIntent::UserPrompt | CompletionIntent::ToolResults => {
1563 if let Some(pending_tool_use) = self.attach_tracked_files_state(model, cx) {
1564 cx.emit(ThreadEvent::ToolFinished {
1565 tool_use_id: pending_tool_use.id.clone(),
1566 pending_tool_use: Some(pending_tool_use),
1567 });
1568 }
1569 }
1570 CompletionIntent::ThreadSummarization
1571 | CompletionIntent::ThreadContextSummarization
1572 | CompletionIntent::CreateFile
1573 | CompletionIntent::EditFile
1574 | CompletionIntent::InlineAssist
1575 | CompletionIntent::TerminalInlineAssist
1576 | CompletionIntent::GenerateGitCommitMessage => {}
1577 };
1578 }
1579
1580 fn attach_tracked_files_state(
1581 &mut self,
1582 model: Arc<dyn LanguageModel>,
1583 cx: &mut App,
1584 ) -> Option<PendingToolUse> {
1585 let action_log = self.action_log.read(cx);
1586
1587 if !action_log.has_unnotified_user_edits() {
1588 return None;
1589 }
1590
1591 // Represent notification as a simulated `project_notifications` tool call
1592 let tool_name = Arc::from("project_notifications");
1593 let Some(tool) = self.tools.read(cx).tool(&tool_name, cx) else {
1594 debug_panic!("`project_notifications` tool not found");
1595 return None;
1596 };
1597
1598 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
1599 return None;
1600 }
1601
1602 let input = serde_json::json!({});
1603 let request = Arc::new(LanguageModelRequest::default()); // unused
1604 let window = None;
1605 let tool_result = tool.run(
1606 input,
1607 request,
1608 self.project.clone(),
1609 self.action_log.clone(),
1610 model.clone(),
1611 window,
1612 cx,
1613 );
1614
1615 let tool_use_id =
1616 LanguageModelToolUseId::from(format!("project_notifications_{}", self.messages.len()));
1617
1618 let tool_use = LanguageModelToolUse {
1619 id: tool_use_id.clone(),
1620 name: tool_name.clone(),
1621 raw_input: "{}".to_string(),
1622 input: serde_json::json!({}),
1623 is_input_complete: true,
1624 };
1625
1626 let tool_output = cx.background_executor().block(tool_result.output);
1627
1628 // Attach a project_notification tool call to the latest existing
1629 // Assistant message. We cannot create a new Assistant message
1630 // because thinking models require a `thinking` block that we
1631 // cannot mock. We cannot send a notification as a normal
1632 // (non-tool-use) User message because this distracts Agent
1633 // too much.
1634 let tool_message_id = self
1635 .messages
1636 .iter()
1637 .enumerate()
1638 .rfind(|(_, message)| message.role == Role::Assistant)
1639 .map(|(_, message)| message.id)?;
1640
1641 let tool_use_metadata = ToolUseMetadata {
1642 model: model.clone(),
1643 thread_id: self.id.clone(),
1644 prompt_id: self.last_prompt_id.clone(),
1645 };
1646
1647 self.tool_use
1648 .request_tool_use(tool_message_id, tool_use, tool_use_metadata.clone(), cx);
1649
1650 let pending_tool_use = self.tool_use.insert_tool_output(
1651 tool_use_id.clone(),
1652 tool_name,
1653 tool_output,
1654 self.configured_model.as_ref(),
1655 self.completion_mode,
1656 );
1657
1658 pending_tool_use
1659 }
1660
1661 pub fn stream_completion(
1662 &mut self,
1663 request: LanguageModelRequest,
1664 model: Arc<dyn LanguageModel>,
1665 intent: CompletionIntent,
1666 window: Option<AnyWindowHandle>,
1667 cx: &mut Context<Self>,
1668 ) {
1669 self.tool_use_limit_reached = false;
1670
1671 let pending_completion_id = post_inc(&mut self.completion_count);
1672 let mut request_callback_parameters = if self.request_callback.is_some() {
1673 Some((request.clone(), Vec::new()))
1674 } else {
1675 None
1676 };
1677 let prompt_id = self.last_prompt_id.clone();
1678 let tool_use_metadata = ToolUseMetadata {
1679 model: model.clone(),
1680 thread_id: self.id.clone(),
1681 prompt_id: prompt_id.clone(),
1682 };
1683
1684 let completion_mode = request
1685 .mode
1686 .unwrap_or(zed_llm_client::CompletionMode::Normal);
1687
1688 self.last_received_chunk_at = Some(Instant::now());
1689
1690 let task = cx.spawn(async move |thread, cx| {
1691 let stream_completion_future = model.stream_completion(request, &cx);
1692 let initial_token_usage =
1693 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage);
1694 let stream_completion = async {
1695 let mut events = stream_completion_future.await?;
1696
1697 let mut stop_reason = StopReason::EndTurn;
1698 let mut current_token_usage = TokenUsage::default();
1699
1700 thread
1701 .update(cx, |_thread, cx| {
1702 cx.emit(ThreadEvent::NewRequest);
1703 })
1704 .ok();
1705
1706 let mut request_assistant_message_id = None;
1707
1708 while let Some(event) = events.next().await {
1709 if let Some((_, response_events)) = request_callback_parameters.as_mut() {
1710 response_events
1711 .push(event.as_ref().map_err(|error| error.to_string()).cloned());
1712 }
1713
1714 thread.update(cx, |thread, cx| {
1715 match event? {
1716 LanguageModelCompletionEvent::StartMessage { .. } => {
1717 request_assistant_message_id =
1718 Some(thread.insert_assistant_message(
1719 vec![MessageSegment::Text(String::new())],
1720 cx,
1721 ));
1722 }
1723 LanguageModelCompletionEvent::Stop(reason) => {
1724 stop_reason = reason;
1725 }
1726 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1727 thread.update_token_usage_at_last_message(token_usage);
1728 thread.cumulative_token_usage = thread.cumulative_token_usage
1729 + token_usage
1730 - current_token_usage;
1731 current_token_usage = token_usage;
1732 }
1733 LanguageModelCompletionEvent::Text(chunk) => {
1734 thread.received_chunk();
1735
1736 cx.emit(ThreadEvent::ReceivedTextChunk);
1737 if let Some(last_message) = thread.messages.last_mut() {
1738 if last_message.role == Role::Assistant
1739 && !thread.tool_use.has_tool_results(last_message.id)
1740 {
1741 last_message.push_text(&chunk);
1742 cx.emit(ThreadEvent::StreamedAssistantText(
1743 last_message.id,
1744 chunk,
1745 ));
1746 } else {
1747 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1748 // of a new Assistant response.
1749 //
1750 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1751 // will result in duplicating the text of the chunk in the rendered Markdown.
1752 request_assistant_message_id =
1753 Some(thread.insert_assistant_message(
1754 vec![MessageSegment::Text(chunk.to_string())],
1755 cx,
1756 ));
1757 };
1758 }
1759 }
1760 LanguageModelCompletionEvent::Thinking {
1761 text: chunk,
1762 signature,
1763 } => {
1764 thread.received_chunk();
1765
1766 if let Some(last_message) = thread.messages.last_mut() {
1767 if last_message.role == Role::Assistant
1768 && !thread.tool_use.has_tool_results(last_message.id)
1769 {
1770 last_message.push_thinking(&chunk, signature);
1771 cx.emit(ThreadEvent::StreamedAssistantThinking(
1772 last_message.id,
1773 chunk,
1774 ));
1775 } else {
1776 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1777 // of a new Assistant response.
1778 //
1779 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1780 // will result in duplicating the text of the chunk in the rendered Markdown.
1781 request_assistant_message_id =
1782 Some(thread.insert_assistant_message(
1783 vec![MessageSegment::Thinking {
1784 text: chunk.to_string(),
1785 signature,
1786 }],
1787 cx,
1788 ));
1789 };
1790 }
1791 }
1792 LanguageModelCompletionEvent::RedactedThinking { data } => {
1793 thread.received_chunk();
1794
1795 if let Some(last_message) = thread.messages.last_mut() {
1796 if last_message.role == Role::Assistant
1797 && !thread.tool_use.has_tool_results(last_message.id)
1798 {
1799 last_message.push_redacted_thinking(data);
1800 } else {
1801 request_assistant_message_id =
1802 Some(thread.insert_assistant_message(
1803 vec![MessageSegment::RedactedThinking(data)],
1804 cx,
1805 ));
1806 };
1807 }
1808 }
1809 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1810 let last_assistant_message_id = request_assistant_message_id
1811 .unwrap_or_else(|| {
1812 let new_assistant_message_id =
1813 thread.insert_assistant_message(vec![], cx);
1814 request_assistant_message_id =
1815 Some(new_assistant_message_id);
1816 new_assistant_message_id
1817 });
1818
1819 let tool_use_id = tool_use.id.clone();
1820 let streamed_input = if tool_use.is_input_complete {
1821 None
1822 } else {
1823 Some((&tool_use.input).clone())
1824 };
1825
1826 let ui_text = thread.tool_use.request_tool_use(
1827 last_assistant_message_id,
1828 tool_use,
1829 tool_use_metadata.clone(),
1830 cx,
1831 );
1832
1833 if let Some(input) = streamed_input {
1834 cx.emit(ThreadEvent::StreamedToolUse {
1835 tool_use_id,
1836 ui_text,
1837 input,
1838 });
1839 }
1840 }
1841 LanguageModelCompletionEvent::ToolUseJsonParseError {
1842 id,
1843 tool_name,
1844 raw_input: invalid_input_json,
1845 json_parse_error,
1846 } => {
1847 thread.receive_invalid_tool_json(
1848 id,
1849 tool_name,
1850 invalid_input_json,
1851 json_parse_error,
1852 window,
1853 cx,
1854 );
1855 }
1856 LanguageModelCompletionEvent::StatusUpdate(status_update) => {
1857 if let Some(completion) = thread
1858 .pending_completions
1859 .iter_mut()
1860 .find(|completion| completion.id == pending_completion_id)
1861 {
1862 match status_update {
1863 CompletionRequestStatus::Queued { position } => {
1864 completion.queue_state =
1865 QueueState::Queued { position };
1866 }
1867 CompletionRequestStatus::Started => {
1868 completion.queue_state = QueueState::Started;
1869 }
1870 CompletionRequestStatus::Failed {
1871 code,
1872 message,
1873 request_id: _,
1874 retry_after,
1875 } => {
1876 return Err(
1877 LanguageModelCompletionError::from_cloud_failure(
1878 model.upstream_provider_name(),
1879 code,
1880 message,
1881 retry_after.map(Duration::from_secs_f64),
1882 ),
1883 );
1884 }
1885 CompletionRequestStatus::UsageUpdated { amount, limit } => {
1886 thread.update_model_request_usage(
1887 amount as u32,
1888 limit,
1889 cx,
1890 );
1891 }
1892 CompletionRequestStatus::ToolUseLimitReached => {
1893 thread.tool_use_limit_reached = true;
1894 cx.emit(ThreadEvent::ToolUseLimitReached);
1895 }
1896 }
1897 }
1898 }
1899 }
1900
1901 thread.touch_updated_at();
1902 cx.emit(ThreadEvent::StreamedCompletion);
1903 cx.notify();
1904
1905 thread.auto_capture_telemetry(cx);
1906 Ok(())
1907 })??;
1908
1909 smol::future::yield_now().await;
1910 }
1911
1912 thread.update(cx, |thread, cx| {
1913 thread.last_received_chunk_at = None;
1914 thread
1915 .pending_completions
1916 .retain(|completion| completion.id != pending_completion_id);
1917
1918 // If there is a response without tool use, summarize the message. Otherwise,
1919 // allow two tool uses before summarizing.
1920 if matches!(thread.summary, ThreadSummary::Pending)
1921 && thread.messages.len() >= 2
1922 && (!thread.has_pending_tool_uses() || thread.messages.len() >= 6)
1923 {
1924 thread.summarize(cx);
1925 }
1926 })?;
1927
1928 anyhow::Ok(stop_reason)
1929 };
1930
1931 let result = stream_completion.await;
1932 let mut retry_scheduled = false;
1933
1934 thread
1935 .update(cx, |thread, cx| {
1936 thread.finalize_pending_checkpoint(cx);
1937 match result.as_ref() {
1938 Ok(stop_reason) => {
1939 match stop_reason {
1940 StopReason::ToolUse => {
1941 let tool_uses =
1942 thread.use_pending_tools(window, model.clone(), cx);
1943 cx.emit(ThreadEvent::UsePendingTools { tool_uses });
1944 }
1945 StopReason::EndTurn | StopReason::MaxTokens => {
1946 thread.project.update(cx, |project, cx| {
1947 project.set_agent_location(None, cx);
1948 });
1949 }
1950 StopReason::Refusal => {
1951 thread.project.update(cx, |project, cx| {
1952 project.set_agent_location(None, cx);
1953 });
1954
1955 // Remove the turn that was refused.
1956 //
1957 // https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/handle-streaming-refusals#reset-context-after-refusal
1958 {
1959 let mut messages_to_remove = Vec::new();
1960
1961 for (ix, message) in
1962 thread.messages.iter().enumerate().rev()
1963 {
1964 messages_to_remove.push(message.id);
1965
1966 if message.role == Role::User {
1967 if ix == 0 {
1968 break;
1969 }
1970
1971 if let Some(prev_message) =
1972 thread.messages.get(ix - 1)
1973 {
1974 if prev_message.role == Role::Assistant {
1975 break;
1976 }
1977 }
1978 }
1979 }
1980
1981 for message_id in messages_to_remove {
1982 thread.delete_message(message_id, cx);
1983 }
1984 }
1985
1986 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1987 header: "Language model refusal".into(),
1988 message:
1989 "Model refused to generate content for safety reasons."
1990 .into(),
1991 }));
1992 }
1993 }
1994
1995 // We successfully completed, so cancel any remaining retries.
1996 thread.retry_state = None;
1997 }
1998 Err(error) => {
1999 thread.project.update(cx, |project, cx| {
2000 project.set_agent_location(None, cx);
2001 });
2002
2003 if error.is::<PaymentRequiredError>() {
2004 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
2005 } else if let Some(error) =
2006 error.downcast_ref::<ModelRequestLimitReachedError>()
2007 {
2008 cx.emit(ThreadEvent::ShowError(
2009 ThreadError::ModelRequestLimitReached { plan: error.plan },
2010 ));
2011 } else if let Some(completion_error) =
2012 error.downcast_ref::<LanguageModelCompletionError>()
2013 {
2014 match &completion_error {
2015 LanguageModelCompletionError::PromptTooLarge {
2016 tokens, ..
2017 } => {
2018 let tokens = tokens.unwrap_or_else(|| {
2019 // We didn't get an exact token count from the API, so fall back on our estimate.
2020 thread
2021 .total_token_usage()
2022 .map(|usage| usage.total)
2023 .unwrap_or(0)
2024 // We know the context window was exceeded in practice, so if our estimate was
2025 // lower than max tokens, the estimate was wrong; return that we exceeded by 1.
2026 .max(
2027 model
2028 .max_token_count_for_mode(completion_mode)
2029 .saturating_add(1),
2030 )
2031 });
2032 thread.exceeded_window_error = Some(ExceededWindowError {
2033 model_id: model.id(),
2034 token_count: tokens,
2035 });
2036 cx.notify();
2037 }
2038 _ => {
2039 if let Some(retry_strategy) =
2040 Thread::get_retry_strategy(completion_error)
2041 {
2042 log::info!(
2043 "Retrying with {:?} for language model completion error {:?}",
2044 retry_strategy,
2045 completion_error
2046 );
2047
2048 retry_scheduled = thread
2049 .handle_retryable_error_with_delay(
2050 &completion_error,
2051 Some(retry_strategy),
2052 model.clone(),
2053 intent,
2054 window,
2055 cx,
2056 );
2057 }
2058 }
2059 }
2060 }
2061
2062 if !retry_scheduled {
2063 thread.cancel_last_completion(window, cx);
2064 }
2065 }
2066 }
2067
2068 if !retry_scheduled {
2069 cx.emit(ThreadEvent::Stopped(result.map_err(Arc::new)));
2070 }
2071
2072 if let Some((request_callback, (request, response_events))) = thread
2073 .request_callback
2074 .as_mut()
2075 .zip(request_callback_parameters.as_ref())
2076 {
2077 request_callback(request, response_events);
2078 }
2079
2080 thread.auto_capture_telemetry(cx);
2081
2082 if let Ok(initial_usage) = initial_token_usage {
2083 let usage = thread.cumulative_token_usage - initial_usage;
2084
2085 telemetry::event!(
2086 "Assistant Thread Completion",
2087 thread_id = thread.id().to_string(),
2088 prompt_id = prompt_id,
2089 model = model.telemetry_id(),
2090 model_provider = model.provider_id().to_string(),
2091 input_tokens = usage.input_tokens,
2092 output_tokens = usage.output_tokens,
2093 cache_creation_input_tokens = usage.cache_creation_input_tokens,
2094 cache_read_input_tokens = usage.cache_read_input_tokens,
2095 );
2096 }
2097 })
2098 .ok();
2099 });
2100
2101 self.pending_completions.push(PendingCompletion {
2102 id: pending_completion_id,
2103 queue_state: QueueState::Sending,
2104 _task: task,
2105 });
2106 }
2107
2108 pub fn summarize(&mut self, cx: &mut Context<Self>) {
2109 let Some(model) = LanguageModelRegistry::read_global(cx).thread_summary_model() else {
2110 println!("No thread summary model");
2111 return;
2112 };
2113
2114 if !model.provider.is_authenticated(cx) {
2115 return;
2116 }
2117
2118 let added_user_message = include_str!("./prompts/summarize_thread_prompt.txt");
2119
2120 let request = self.to_summarize_request(
2121 &model.model,
2122 CompletionIntent::ThreadSummarization,
2123 added_user_message.into(),
2124 cx,
2125 );
2126
2127 self.summary = ThreadSummary::Generating;
2128
2129 self.pending_summary = cx.spawn(async move |this, cx| {
2130 let result = async {
2131 let mut messages = model.model.stream_completion(request, &cx).await?;
2132
2133 let mut new_summary = String::new();
2134 while let Some(event) = messages.next().await {
2135 let Ok(event) = event else {
2136 continue;
2137 };
2138 let text = match event {
2139 LanguageModelCompletionEvent::Text(text) => text,
2140 LanguageModelCompletionEvent::StatusUpdate(
2141 CompletionRequestStatus::UsageUpdated { amount, limit },
2142 ) => {
2143 this.update(cx, |thread, cx| {
2144 thread.update_model_request_usage(amount as u32, limit, cx);
2145 })?;
2146 continue;
2147 }
2148 _ => continue,
2149 };
2150
2151 let mut lines = text.lines();
2152 new_summary.extend(lines.next());
2153
2154 // Stop if the LLM generated multiple lines.
2155 if lines.next().is_some() {
2156 break;
2157 }
2158 }
2159
2160 anyhow::Ok(new_summary)
2161 }
2162 .await;
2163
2164 this.update(cx, |this, cx| {
2165 match result {
2166 Ok(new_summary) => {
2167 if new_summary.is_empty() {
2168 this.summary = ThreadSummary::Error;
2169 } else {
2170 this.summary = ThreadSummary::Ready(new_summary.into());
2171 }
2172 }
2173 Err(err) => {
2174 this.summary = ThreadSummary::Error;
2175 log::error!("Failed to generate thread summary: {}", err);
2176 }
2177 }
2178 cx.emit(ThreadEvent::SummaryGenerated);
2179 })
2180 .log_err()?;
2181
2182 Some(())
2183 });
2184 }
2185
2186 fn get_retry_strategy(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2187 use LanguageModelCompletionError::*;
2188
2189 // General strategy here:
2190 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2191 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2192 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2193 match error {
2194 HttpResponseError {
2195 status_code: StatusCode::TOO_MANY_REQUESTS,
2196 ..
2197 } => Some(RetryStrategy::ExponentialBackoff {
2198 initial_delay: BASE_RETRY_DELAY,
2199 max_attempts: MAX_RETRY_ATTEMPTS,
2200 }),
2201 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2202 Some(RetryStrategy::Fixed {
2203 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2204 max_attempts: MAX_RETRY_ATTEMPTS,
2205 })
2206 }
2207 UpstreamProviderError {
2208 status,
2209 retry_after,
2210 ..
2211 } => match *status {
2212 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2213 Some(RetryStrategy::Fixed {
2214 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2215 max_attempts: MAX_RETRY_ATTEMPTS,
2216 })
2217 }
2218 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2219 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2220 // Internal Server Error could be anything, retry up to 3 times.
2221 max_attempts: 3,
2222 }),
2223 status => {
2224 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2225 // but we frequently get them in practice. See https://http.dev/529
2226 if status.as_u16() == 529 {
2227 Some(RetryStrategy::Fixed {
2228 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2229 max_attempts: MAX_RETRY_ATTEMPTS,
2230 })
2231 } else {
2232 Some(RetryStrategy::Fixed {
2233 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2234 max_attempts: 2,
2235 })
2236 }
2237 }
2238 },
2239 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2240 delay: BASE_RETRY_DELAY,
2241 max_attempts: 3,
2242 }),
2243 ApiReadResponseError { .. }
2244 | HttpSend { .. }
2245 | DeserializeResponse { .. }
2246 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2247 delay: BASE_RETRY_DELAY,
2248 max_attempts: 3,
2249 }),
2250 // Retrying these errors definitely shouldn't help.
2251 HttpResponseError {
2252 status_code:
2253 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2254 ..
2255 }
2256 | AuthenticationError { .. }
2257 | PermissionError { .. }
2258 | NoApiKey { .. }
2259 | ApiEndpointNotFound { .. }
2260 | PromptTooLarge { .. } => None,
2261 // These errors might be transient, so retry them
2262 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2263 delay: BASE_RETRY_DELAY,
2264 max_attempts: 1,
2265 }),
2266 // Retry all other 4xx and 5xx errors once.
2267 HttpResponseError { status_code, .. }
2268 if status_code.is_client_error() || status_code.is_server_error() =>
2269 {
2270 Some(RetryStrategy::Fixed {
2271 delay: BASE_RETRY_DELAY,
2272 max_attempts: 3,
2273 })
2274 }
2275 // Conservatively assume that any other errors are non-retryable
2276 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2277 delay: BASE_RETRY_DELAY,
2278 max_attempts: 2,
2279 }),
2280 }
2281 }
2282
2283 fn handle_retryable_error_with_delay(
2284 &mut self,
2285 error: &LanguageModelCompletionError,
2286 strategy: Option<RetryStrategy>,
2287 model: Arc<dyn LanguageModel>,
2288 intent: CompletionIntent,
2289 window: Option<AnyWindowHandle>,
2290 cx: &mut Context<Self>,
2291 ) -> bool {
2292 // Store context for the Retry button
2293 self.last_error_context = Some((model.clone(), intent));
2294
2295 // Only auto-retry if Burn Mode is enabled
2296 if self.completion_mode != CompletionMode::Burn {
2297 // Show error with retry options
2298 cx.emit(ThreadEvent::ShowError(ThreadError::RetryableError {
2299 message: format!(
2300 "{}\n\nTo automatically retry when similar errors happen, enable Burn Mode.",
2301 error
2302 )
2303 .into(),
2304 can_enable_burn_mode: true,
2305 }));
2306 return false;
2307 }
2308
2309 let Some(strategy) = strategy.or_else(|| Self::get_retry_strategy(error)) else {
2310 return false;
2311 };
2312
2313 let max_attempts = match &strategy {
2314 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
2315 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
2316 };
2317
2318 let retry_state = self.retry_state.get_or_insert(RetryState {
2319 attempt: 0,
2320 max_attempts,
2321 intent,
2322 });
2323
2324 retry_state.attempt += 1;
2325 let attempt = retry_state.attempt;
2326 let max_attempts = retry_state.max_attempts;
2327 let intent = retry_state.intent;
2328
2329 if attempt <= max_attempts {
2330 let delay = match &strategy {
2331 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
2332 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
2333 Duration::from_secs(delay_secs)
2334 }
2335 RetryStrategy::Fixed { delay, .. } => *delay,
2336 };
2337
2338 // Add a transient message to inform the user
2339 let delay_secs = delay.as_secs();
2340 let retry_message = if max_attempts == 1 {
2341 format!("{error}. Retrying in {delay_secs} seconds...")
2342 } else {
2343 format!(
2344 "{error}. Retrying (attempt {attempt} of {max_attempts}) \
2345 in {delay_secs} seconds..."
2346 )
2347 };
2348 log::warn!(
2349 "Retrying completion request (attempt {attempt} of {max_attempts}) \
2350 in {delay_secs} seconds: {error:?}",
2351 );
2352
2353 // Add a UI-only message instead of a regular message
2354 let id = self.next_message_id.post_inc();
2355 self.messages.push(Message {
2356 id,
2357 role: Role::System,
2358 segments: vec![MessageSegment::Text(retry_message)],
2359 loaded_context: LoadedContext::default(),
2360 creases: Vec::new(),
2361 is_hidden: false,
2362 ui_only: true,
2363 });
2364 cx.emit(ThreadEvent::MessageAdded(id));
2365
2366 // Schedule the retry
2367 let thread_handle = cx.entity().downgrade();
2368
2369 cx.spawn(async move |_thread, cx| {
2370 cx.background_executor().timer(delay).await;
2371
2372 thread_handle
2373 .update(cx, |thread, cx| {
2374 // Retry the completion
2375 thread.send_to_model(model, intent, window, cx);
2376 })
2377 .log_err();
2378 })
2379 .detach();
2380
2381 true
2382 } else {
2383 // Max retries exceeded
2384 self.retry_state = None;
2385
2386 // Stop generating since we're giving up on retrying.
2387 self.pending_completions.clear();
2388
2389 // Show error alongside a Retry button, but no
2390 // Enable Burn Mode button (since it's already enabled)
2391 cx.emit(ThreadEvent::ShowError(ThreadError::RetryableError {
2392 message: format!("Failed after retrying: {}", error).into(),
2393 can_enable_burn_mode: false,
2394 }));
2395
2396 false
2397 }
2398 }
2399
2400 pub fn start_generating_detailed_summary_if_needed(
2401 &mut self,
2402 thread_store: WeakEntity<ThreadStore>,
2403 cx: &mut Context<Self>,
2404 ) {
2405 let Some(last_message_id) = self.messages.last().map(|message| message.id) else {
2406 return;
2407 };
2408
2409 match &*self.detailed_summary_rx.borrow() {
2410 DetailedSummaryState::Generating { message_id, .. }
2411 | DetailedSummaryState::Generated { message_id, .. }
2412 if *message_id == last_message_id =>
2413 {
2414 // Already up-to-date
2415 return;
2416 }
2417 _ => {}
2418 }
2419
2420 let Some(ConfiguredModel { model, provider }) =
2421 LanguageModelRegistry::read_global(cx).thread_summary_model()
2422 else {
2423 return;
2424 };
2425
2426 if !provider.is_authenticated(cx) {
2427 return;
2428 }
2429
2430 let added_user_message = include_str!("./prompts/summarize_thread_detailed_prompt.txt");
2431
2432 let request = self.to_summarize_request(
2433 &model,
2434 CompletionIntent::ThreadContextSummarization,
2435 added_user_message.into(),
2436 cx,
2437 );
2438
2439 *self.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generating {
2440 message_id: last_message_id,
2441 };
2442
2443 // Replace the detailed summarization task if there is one, cancelling it. It would probably
2444 // be better to allow the old task to complete, but this would require logic for choosing
2445 // which result to prefer (the old task could complete after the new one, resulting in a
2446 // stale summary).
2447 self.detailed_summary_task = cx.spawn(async move |thread, cx| {
2448 let stream = model.stream_completion_text(request, &cx);
2449 let Some(mut messages) = stream.await.log_err() else {
2450 thread
2451 .update(cx, |thread, _cx| {
2452 *thread.detailed_summary_tx.borrow_mut() =
2453 DetailedSummaryState::NotGenerated;
2454 })
2455 .ok()?;
2456 return None;
2457 };
2458
2459 let mut new_detailed_summary = String::new();
2460
2461 while let Some(chunk) = messages.stream.next().await {
2462 if let Some(chunk) = chunk.log_err() {
2463 new_detailed_summary.push_str(&chunk);
2464 }
2465 }
2466
2467 thread
2468 .update(cx, |thread, _cx| {
2469 *thread.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generated {
2470 text: new_detailed_summary.into(),
2471 message_id: last_message_id,
2472 };
2473 })
2474 .ok()?;
2475
2476 // Save thread so its summary can be reused later
2477 if let Some(thread) = thread.upgrade() {
2478 if let Ok(Ok(save_task)) = cx.update(|cx| {
2479 thread_store
2480 .update(cx, |thread_store, cx| thread_store.save_thread(&thread, cx))
2481 }) {
2482 save_task.await.log_err();
2483 }
2484 }
2485
2486 Some(())
2487 });
2488 }
2489
2490 pub async fn wait_for_detailed_summary_or_text(
2491 this: &Entity<Self>,
2492 cx: &mut AsyncApp,
2493 ) -> Option<SharedString> {
2494 let mut detailed_summary_rx = this
2495 .read_with(cx, |this, _cx| this.detailed_summary_rx.clone())
2496 .ok()?;
2497 loop {
2498 match detailed_summary_rx.recv().await? {
2499 DetailedSummaryState::Generating { .. } => {}
2500 DetailedSummaryState::NotGenerated => {
2501 return this.read_with(cx, |this, _cx| this.text().into()).ok();
2502 }
2503 DetailedSummaryState::Generated { text, .. } => return Some(text),
2504 }
2505 }
2506 }
2507
2508 pub fn latest_detailed_summary_or_text(&self) -> SharedString {
2509 self.detailed_summary_rx
2510 .borrow()
2511 .text()
2512 .unwrap_or_else(|| self.text().into())
2513 }
2514
2515 pub fn is_generating_detailed_summary(&self) -> bool {
2516 matches!(
2517 &*self.detailed_summary_rx.borrow(),
2518 DetailedSummaryState::Generating { .. }
2519 )
2520 }
2521
2522 pub fn use_pending_tools(
2523 &mut self,
2524 window: Option<AnyWindowHandle>,
2525 model: Arc<dyn LanguageModel>,
2526 cx: &mut Context<Self>,
2527 ) -> Vec<PendingToolUse> {
2528 self.auto_capture_telemetry(cx);
2529 let request =
2530 Arc::new(self.to_completion_request(model.clone(), CompletionIntent::ToolResults, cx));
2531 let pending_tool_uses = self
2532 .tool_use
2533 .pending_tool_uses()
2534 .into_iter()
2535 .filter(|tool_use| tool_use.status.is_idle())
2536 .cloned()
2537 .collect::<Vec<_>>();
2538
2539 for tool_use in pending_tool_uses.iter() {
2540 self.use_pending_tool(tool_use.clone(), request.clone(), model.clone(), window, cx);
2541 }
2542
2543 pending_tool_uses
2544 }
2545
2546 fn use_pending_tool(
2547 &mut self,
2548 tool_use: PendingToolUse,
2549 request: Arc<LanguageModelRequest>,
2550 model: Arc<dyn LanguageModel>,
2551 window: Option<AnyWindowHandle>,
2552 cx: &mut Context<Self>,
2553 ) {
2554 let Some(tool) = self.tools.read(cx).tool(&tool_use.name, cx) else {
2555 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2556 };
2557
2558 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
2559 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2560 }
2561
2562 if tool.needs_confirmation(&tool_use.input, cx)
2563 && !AgentSettings::get_global(cx).always_allow_tool_actions
2564 {
2565 self.tool_use.confirm_tool_use(
2566 tool_use.id,
2567 tool_use.ui_text,
2568 tool_use.input,
2569 request,
2570 tool,
2571 );
2572 cx.emit(ThreadEvent::ToolConfirmationNeeded);
2573 } else {
2574 self.run_tool(
2575 tool_use.id,
2576 tool_use.ui_text,
2577 tool_use.input,
2578 request,
2579 tool,
2580 model,
2581 window,
2582 cx,
2583 );
2584 }
2585 }
2586
2587 pub fn handle_hallucinated_tool_use(
2588 &mut self,
2589 tool_use_id: LanguageModelToolUseId,
2590 hallucinated_tool_name: Arc<str>,
2591 window: Option<AnyWindowHandle>,
2592 cx: &mut Context<Thread>,
2593 ) {
2594 let available_tools = self.profile.enabled_tools(cx);
2595
2596 let tool_list = available_tools
2597 .iter()
2598 .map(|(name, tool)| format!("- {}: {}", name, tool.description()))
2599 .collect::<Vec<_>>()
2600 .join("\n");
2601
2602 let error_message = format!(
2603 "The tool '{}' doesn't exist or is not enabled. Available tools:\n{}",
2604 hallucinated_tool_name, tool_list
2605 );
2606
2607 let pending_tool_use = self.tool_use.insert_tool_output(
2608 tool_use_id.clone(),
2609 hallucinated_tool_name,
2610 Err(anyhow!("Missing tool call: {error_message}")),
2611 self.configured_model.as_ref(),
2612 self.completion_mode,
2613 );
2614
2615 cx.emit(ThreadEvent::MissingToolUse {
2616 tool_use_id: tool_use_id.clone(),
2617 ui_text: error_message.into(),
2618 });
2619
2620 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2621 }
2622
2623 pub fn receive_invalid_tool_json(
2624 &mut self,
2625 tool_use_id: LanguageModelToolUseId,
2626 tool_name: Arc<str>,
2627 invalid_json: Arc<str>,
2628 error: String,
2629 window: Option<AnyWindowHandle>,
2630 cx: &mut Context<Thread>,
2631 ) {
2632 log::error!("The model returned invalid input JSON: {invalid_json}");
2633
2634 let pending_tool_use = self.tool_use.insert_tool_output(
2635 tool_use_id.clone(),
2636 tool_name,
2637 Err(anyhow!("Error parsing input JSON: {error}")),
2638 self.configured_model.as_ref(),
2639 self.completion_mode,
2640 );
2641 let ui_text = if let Some(pending_tool_use) = &pending_tool_use {
2642 pending_tool_use.ui_text.clone()
2643 } else {
2644 log::error!(
2645 "There was no pending tool use for tool use {tool_use_id}, even though it finished (with invalid input JSON)."
2646 );
2647 format!("Unknown tool {}", tool_use_id).into()
2648 };
2649
2650 cx.emit(ThreadEvent::InvalidToolInput {
2651 tool_use_id: tool_use_id.clone(),
2652 ui_text,
2653 invalid_input_json: invalid_json,
2654 });
2655
2656 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2657 }
2658
2659 pub fn run_tool(
2660 &mut self,
2661 tool_use_id: LanguageModelToolUseId,
2662 ui_text: impl Into<SharedString>,
2663 input: serde_json::Value,
2664 request: Arc<LanguageModelRequest>,
2665 tool: Arc<dyn Tool>,
2666 model: Arc<dyn LanguageModel>,
2667 window: Option<AnyWindowHandle>,
2668 cx: &mut Context<Thread>,
2669 ) {
2670 let task =
2671 self.spawn_tool_use(tool_use_id.clone(), request, input, tool, model, window, cx);
2672 self.tool_use
2673 .run_pending_tool(tool_use_id, ui_text.into(), task);
2674 }
2675
2676 fn spawn_tool_use(
2677 &mut self,
2678 tool_use_id: LanguageModelToolUseId,
2679 request: Arc<LanguageModelRequest>,
2680 input: serde_json::Value,
2681 tool: Arc<dyn Tool>,
2682 model: Arc<dyn LanguageModel>,
2683 window: Option<AnyWindowHandle>,
2684 cx: &mut Context<Thread>,
2685 ) -> Task<()> {
2686 let tool_name: Arc<str> = tool.name().into();
2687
2688 let tool_result = tool.run(
2689 input,
2690 request,
2691 self.project.clone(),
2692 self.action_log.clone(),
2693 model,
2694 window,
2695 cx,
2696 );
2697
2698 // Store the card separately if it exists
2699 if let Some(card) = tool_result.card.clone() {
2700 self.tool_use
2701 .insert_tool_result_card(tool_use_id.clone(), card);
2702 }
2703
2704 cx.spawn({
2705 async move |thread: WeakEntity<Thread>, cx| {
2706 let output = tool_result.output.await;
2707
2708 thread
2709 .update(cx, |thread, cx| {
2710 let pending_tool_use = thread.tool_use.insert_tool_output(
2711 tool_use_id.clone(),
2712 tool_name,
2713 output,
2714 thread.configured_model.as_ref(),
2715 thread.completion_mode,
2716 );
2717 thread.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2718 })
2719 .ok();
2720 }
2721 })
2722 }
2723
2724 fn tool_finished(
2725 &mut self,
2726 tool_use_id: LanguageModelToolUseId,
2727 pending_tool_use: Option<PendingToolUse>,
2728 canceled: bool,
2729 window: Option<AnyWindowHandle>,
2730 cx: &mut Context<Self>,
2731 ) {
2732 if self.all_tools_finished() {
2733 if let Some(ConfiguredModel { model, .. }) = self.configured_model.as_ref() {
2734 if !canceled {
2735 self.send_to_model(model.clone(), CompletionIntent::ToolResults, window, cx);
2736 }
2737 self.auto_capture_telemetry(cx);
2738 }
2739 }
2740
2741 cx.emit(ThreadEvent::ToolFinished {
2742 tool_use_id,
2743 pending_tool_use,
2744 });
2745 }
2746
2747 /// Cancels the last pending completion, if there are any pending.
2748 ///
2749 /// Returns whether a completion was canceled.
2750 pub fn cancel_last_completion(
2751 &mut self,
2752 window: Option<AnyWindowHandle>,
2753 cx: &mut Context<Self>,
2754 ) -> bool {
2755 let mut canceled = self.pending_completions.pop().is_some() || self.retry_state.is_some();
2756
2757 self.retry_state = None;
2758
2759 for pending_tool_use in self.tool_use.cancel_pending() {
2760 canceled = true;
2761 self.tool_finished(
2762 pending_tool_use.id.clone(),
2763 Some(pending_tool_use),
2764 true,
2765 window,
2766 cx,
2767 );
2768 }
2769
2770 if canceled {
2771 cx.emit(ThreadEvent::CompletionCanceled);
2772
2773 // When canceled, we always want to insert the checkpoint.
2774 // (We skip over finalize_pending_checkpoint, because it
2775 // would conclude we didn't have anything to insert here.)
2776 if let Some(checkpoint) = self.pending_checkpoint.take() {
2777 self.insert_checkpoint(checkpoint, cx);
2778 }
2779 } else {
2780 self.finalize_pending_checkpoint(cx);
2781 }
2782
2783 canceled
2784 }
2785
2786 /// Signals that any in-progress editing should be canceled.
2787 ///
2788 /// This method is used to notify listeners (like ActiveThread) that
2789 /// they should cancel any editing operations.
2790 pub fn cancel_editing(&mut self, cx: &mut Context<Self>) {
2791 cx.emit(ThreadEvent::CancelEditing);
2792 }
2793
2794 pub fn feedback(&self) -> Option<ThreadFeedback> {
2795 self.feedback
2796 }
2797
2798 pub fn message_feedback(&self, message_id: MessageId) -> Option<ThreadFeedback> {
2799 self.message_feedback.get(&message_id).copied()
2800 }
2801
2802 pub fn report_message_feedback(
2803 &mut self,
2804 message_id: MessageId,
2805 feedback: ThreadFeedback,
2806 cx: &mut Context<Self>,
2807 ) -> Task<Result<()>> {
2808 if self.message_feedback.get(&message_id) == Some(&feedback) {
2809 return Task::ready(Ok(()));
2810 }
2811
2812 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2813 let serialized_thread = self.serialize(cx);
2814 let thread_id = self.id().clone();
2815 let client = self.project.read(cx).client();
2816
2817 let enabled_tool_names: Vec<String> = self
2818 .profile
2819 .enabled_tools(cx)
2820 .iter()
2821 .map(|(name, _)| name.clone().into())
2822 .collect();
2823
2824 self.message_feedback.insert(message_id, feedback);
2825
2826 cx.notify();
2827
2828 let message_content = self
2829 .message(message_id)
2830 .map(|msg| msg.to_string())
2831 .unwrap_or_default();
2832
2833 cx.background_spawn(async move {
2834 let final_project_snapshot = final_project_snapshot.await;
2835 let serialized_thread = serialized_thread.await?;
2836 let thread_data =
2837 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
2838
2839 let rating = match feedback {
2840 ThreadFeedback::Positive => "positive",
2841 ThreadFeedback::Negative => "negative",
2842 };
2843 telemetry::event!(
2844 "Assistant Thread Rated",
2845 rating,
2846 thread_id,
2847 enabled_tool_names,
2848 message_id = message_id.0,
2849 message_content,
2850 thread_data,
2851 final_project_snapshot
2852 );
2853 client.telemetry().flush_events().await;
2854
2855 Ok(())
2856 })
2857 }
2858
2859 pub fn report_feedback(
2860 &mut self,
2861 feedback: ThreadFeedback,
2862 cx: &mut Context<Self>,
2863 ) -> Task<Result<()>> {
2864 let last_assistant_message_id = self
2865 .messages
2866 .iter()
2867 .rev()
2868 .find(|msg| msg.role == Role::Assistant)
2869 .map(|msg| msg.id);
2870
2871 if let Some(message_id) = last_assistant_message_id {
2872 self.report_message_feedback(message_id, feedback, cx)
2873 } else {
2874 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2875 let serialized_thread = self.serialize(cx);
2876 let thread_id = self.id().clone();
2877 let client = self.project.read(cx).client();
2878 self.feedback = Some(feedback);
2879 cx.notify();
2880
2881 cx.background_spawn(async move {
2882 let final_project_snapshot = final_project_snapshot.await;
2883 let serialized_thread = serialized_thread.await?;
2884 let thread_data = serde_json::to_value(serialized_thread)
2885 .unwrap_or_else(|_| serde_json::Value::Null);
2886
2887 let rating = match feedback {
2888 ThreadFeedback::Positive => "positive",
2889 ThreadFeedback::Negative => "negative",
2890 };
2891 telemetry::event!(
2892 "Assistant Thread Rated",
2893 rating,
2894 thread_id,
2895 thread_data,
2896 final_project_snapshot
2897 );
2898 client.telemetry().flush_events().await;
2899
2900 Ok(())
2901 })
2902 }
2903 }
2904
2905 /// Create a snapshot of the current project state including git information and unsaved buffers.
2906 fn project_snapshot(
2907 project: Entity<Project>,
2908 cx: &mut Context<Self>,
2909 ) -> Task<Arc<ProjectSnapshot>> {
2910 let git_store = project.read(cx).git_store().clone();
2911 let worktree_snapshots: Vec<_> = project
2912 .read(cx)
2913 .visible_worktrees(cx)
2914 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
2915 .collect();
2916
2917 cx.spawn(async move |_, cx| {
2918 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
2919
2920 let mut unsaved_buffers = Vec::new();
2921 cx.update(|app_cx| {
2922 let buffer_store = project.read(app_cx).buffer_store();
2923 for buffer_handle in buffer_store.read(app_cx).buffers() {
2924 let buffer = buffer_handle.read(app_cx);
2925 if buffer.is_dirty() {
2926 if let Some(file) = buffer.file() {
2927 let path = file.path().to_string_lossy().to_string();
2928 unsaved_buffers.push(path);
2929 }
2930 }
2931 }
2932 })
2933 .ok();
2934
2935 Arc::new(ProjectSnapshot {
2936 worktree_snapshots,
2937 unsaved_buffer_paths: unsaved_buffers,
2938 timestamp: Utc::now(),
2939 })
2940 })
2941 }
2942
2943 fn worktree_snapshot(
2944 worktree: Entity<project::Worktree>,
2945 git_store: Entity<GitStore>,
2946 cx: &App,
2947 ) -> Task<WorktreeSnapshot> {
2948 cx.spawn(async move |cx| {
2949 // Get worktree path and snapshot
2950 let worktree_info = cx.update(|app_cx| {
2951 let worktree = worktree.read(app_cx);
2952 let path = worktree.abs_path().to_string_lossy().to_string();
2953 let snapshot = worktree.snapshot();
2954 (path, snapshot)
2955 });
2956
2957 let Ok((worktree_path, _snapshot)) = worktree_info else {
2958 return WorktreeSnapshot {
2959 worktree_path: String::new(),
2960 git_state: None,
2961 };
2962 };
2963
2964 let git_state = git_store
2965 .update(cx, |git_store, cx| {
2966 git_store
2967 .repositories()
2968 .values()
2969 .find(|repo| {
2970 repo.read(cx)
2971 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
2972 .is_some()
2973 })
2974 .cloned()
2975 })
2976 .ok()
2977 .flatten()
2978 .map(|repo| {
2979 repo.update(cx, |repo, _| {
2980 let current_branch =
2981 repo.branch.as_ref().map(|branch| branch.name().to_owned());
2982 repo.send_job(None, |state, _| async move {
2983 let RepositoryState::Local { backend, .. } = state else {
2984 return GitState {
2985 remote_url: None,
2986 head_sha: None,
2987 current_branch,
2988 diff: None,
2989 };
2990 };
2991
2992 let remote_url = backend.remote_url("origin");
2993 let head_sha = backend.head_sha().await;
2994 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
2995
2996 GitState {
2997 remote_url,
2998 head_sha,
2999 current_branch,
3000 diff,
3001 }
3002 })
3003 })
3004 });
3005
3006 let git_state = match git_state {
3007 Some(git_state) => match git_state.ok() {
3008 Some(git_state) => git_state.await.ok(),
3009 None => None,
3010 },
3011 None => None,
3012 };
3013
3014 WorktreeSnapshot {
3015 worktree_path,
3016 git_state,
3017 }
3018 })
3019 }
3020
3021 pub fn to_markdown(&self, cx: &App) -> Result<String> {
3022 let mut markdown = Vec::new();
3023
3024 let summary = self.summary().or_default();
3025 writeln!(markdown, "# {summary}\n")?;
3026
3027 for message in self.messages() {
3028 writeln!(
3029 markdown,
3030 "## {role}\n",
3031 role = match message.role {
3032 Role::User => "User",
3033 Role::Assistant => "Agent",
3034 Role::System => "System",
3035 }
3036 )?;
3037
3038 if !message.loaded_context.text.is_empty() {
3039 writeln!(markdown, "{}", message.loaded_context.text)?;
3040 }
3041
3042 if !message.loaded_context.images.is_empty() {
3043 writeln!(
3044 markdown,
3045 "\n{} images attached as context.\n",
3046 message.loaded_context.images.len()
3047 )?;
3048 }
3049
3050 for segment in &message.segments {
3051 match segment {
3052 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
3053 MessageSegment::Thinking { text, .. } => {
3054 writeln!(markdown, "<think>\n{}\n</think>\n", text)?
3055 }
3056 MessageSegment::RedactedThinking(_) => {}
3057 }
3058 }
3059
3060 for tool_use in self.tool_uses_for_message(message.id, cx) {
3061 writeln!(
3062 markdown,
3063 "**Use Tool: {} ({})**",
3064 tool_use.name, tool_use.id
3065 )?;
3066 writeln!(markdown, "```json")?;
3067 writeln!(
3068 markdown,
3069 "{}",
3070 serde_json::to_string_pretty(&tool_use.input)?
3071 )?;
3072 writeln!(markdown, "```")?;
3073 }
3074
3075 for tool_result in self.tool_results_for_message(message.id) {
3076 write!(markdown, "\n**Tool Results: {}", tool_result.tool_use_id)?;
3077 if tool_result.is_error {
3078 write!(markdown, " (Error)")?;
3079 }
3080
3081 writeln!(markdown, "**\n")?;
3082 match &tool_result.content {
3083 LanguageModelToolResultContent::Text(text) => {
3084 writeln!(markdown, "{text}")?;
3085 }
3086 LanguageModelToolResultContent::Image(image) => {
3087 writeln!(markdown, "", image.source)?;
3088 }
3089 }
3090
3091 if let Some(output) = tool_result.output.as_ref() {
3092 writeln!(
3093 markdown,
3094 "\n\nDebug Output:\n\n```json\n{}\n```\n",
3095 serde_json::to_string_pretty(output)?
3096 )?;
3097 }
3098 }
3099 }
3100
3101 Ok(String::from_utf8_lossy(&markdown).to_string())
3102 }
3103
3104 pub fn keep_edits_in_range(
3105 &mut self,
3106 buffer: Entity<language::Buffer>,
3107 buffer_range: Range<language::Anchor>,
3108 cx: &mut Context<Self>,
3109 ) {
3110 self.action_log.update(cx, |action_log, cx| {
3111 action_log.keep_edits_in_range(buffer, buffer_range, cx)
3112 });
3113 }
3114
3115 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
3116 self.action_log
3117 .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
3118 }
3119
3120 pub fn reject_edits_in_ranges(
3121 &mut self,
3122 buffer: Entity<language::Buffer>,
3123 buffer_ranges: Vec<Range<language::Anchor>>,
3124 cx: &mut Context<Self>,
3125 ) -> Task<Result<()>> {
3126 self.action_log.update(cx, |action_log, cx| {
3127 action_log.reject_edits_in_ranges(buffer, buffer_ranges, cx)
3128 })
3129 }
3130
3131 pub fn action_log(&self) -> &Entity<ActionLog> {
3132 &self.action_log
3133 }
3134
3135 pub fn project(&self) -> &Entity<Project> {
3136 &self.project
3137 }
3138
3139 pub fn auto_capture_telemetry(&mut self, cx: &mut Context<Self>) {
3140 if !cx.has_flag::<feature_flags::ThreadAutoCaptureFeatureFlag>() {
3141 return;
3142 }
3143
3144 let now = Instant::now();
3145 if let Some(last) = self.last_auto_capture_at {
3146 if now.duration_since(last).as_secs() < 10 {
3147 return;
3148 }
3149 }
3150
3151 self.last_auto_capture_at = Some(now);
3152
3153 let thread_id = self.id().clone();
3154 let github_login = self
3155 .project
3156 .read(cx)
3157 .user_store()
3158 .read(cx)
3159 .current_user()
3160 .map(|user| user.github_login.clone());
3161 let client = self.project.read(cx).client();
3162 let serialize_task = self.serialize(cx);
3163
3164 cx.background_executor()
3165 .spawn(async move {
3166 if let Ok(serialized_thread) = serialize_task.await {
3167 if let Ok(thread_data) = serde_json::to_value(serialized_thread) {
3168 telemetry::event!(
3169 "Agent Thread Auto-Captured",
3170 thread_id = thread_id.to_string(),
3171 thread_data = thread_data,
3172 auto_capture_reason = "tracked_user",
3173 github_login = github_login
3174 );
3175
3176 client.telemetry().flush_events().await;
3177 }
3178 }
3179 })
3180 .detach();
3181 }
3182
3183 pub fn cumulative_token_usage(&self) -> TokenUsage {
3184 self.cumulative_token_usage
3185 }
3186
3187 pub fn token_usage_up_to_message(&self, message_id: MessageId) -> TotalTokenUsage {
3188 let Some(model) = self.configured_model.as_ref() else {
3189 return TotalTokenUsage::default();
3190 };
3191
3192 let max = model
3193 .model
3194 .max_token_count_for_mode(self.completion_mode().into());
3195
3196 let index = self
3197 .messages
3198 .iter()
3199 .position(|msg| msg.id == message_id)
3200 .unwrap_or(0);
3201
3202 if index == 0 {
3203 return TotalTokenUsage { total: 0, max };
3204 }
3205
3206 let token_usage = &self
3207 .request_token_usage
3208 .get(index - 1)
3209 .cloned()
3210 .unwrap_or_default();
3211
3212 TotalTokenUsage {
3213 total: token_usage.total_tokens(),
3214 max,
3215 }
3216 }
3217
3218 pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
3219 let model = self.configured_model.as_ref()?;
3220
3221 let max = model
3222 .model
3223 .max_token_count_for_mode(self.completion_mode().into());
3224
3225 if let Some(exceeded_error) = &self.exceeded_window_error {
3226 if model.model.id() == exceeded_error.model_id {
3227 return Some(TotalTokenUsage {
3228 total: exceeded_error.token_count,
3229 max,
3230 });
3231 }
3232 }
3233
3234 let total = self
3235 .token_usage_at_last_message()
3236 .unwrap_or_default()
3237 .total_tokens();
3238
3239 Some(TotalTokenUsage { total, max })
3240 }
3241
3242 fn token_usage_at_last_message(&self) -> Option<TokenUsage> {
3243 self.request_token_usage
3244 .get(self.messages.len().saturating_sub(1))
3245 .or_else(|| self.request_token_usage.last())
3246 .cloned()
3247 }
3248
3249 fn update_token_usage_at_last_message(&mut self, token_usage: TokenUsage) {
3250 let placeholder = self.token_usage_at_last_message().unwrap_or_default();
3251 self.request_token_usage
3252 .resize(self.messages.len(), placeholder);
3253
3254 if let Some(last) = self.request_token_usage.last_mut() {
3255 *last = token_usage;
3256 }
3257 }
3258
3259 fn update_model_request_usage(&self, amount: u32, limit: UsageLimit, cx: &mut Context<Self>) {
3260 self.project.update(cx, |project, cx| {
3261 project.user_store().update(cx, |user_store, cx| {
3262 user_store.update_model_request_usage(
3263 ModelRequestUsage(RequestUsage {
3264 amount: amount as i32,
3265 limit,
3266 }),
3267 cx,
3268 )
3269 })
3270 });
3271 }
3272
3273 pub fn deny_tool_use(
3274 &mut self,
3275 tool_use_id: LanguageModelToolUseId,
3276 tool_name: Arc<str>,
3277 window: Option<AnyWindowHandle>,
3278 cx: &mut Context<Self>,
3279 ) {
3280 let err = Err(anyhow::anyhow!(
3281 "Permission to run tool action denied by user"
3282 ));
3283
3284 self.tool_use.insert_tool_output(
3285 tool_use_id.clone(),
3286 tool_name,
3287 err,
3288 self.configured_model.as_ref(),
3289 self.completion_mode,
3290 );
3291 self.tool_finished(tool_use_id.clone(), None, true, window, cx);
3292 }
3293}
3294
3295#[derive(Debug, Clone, Error)]
3296pub enum ThreadError {
3297 #[error("Payment required")]
3298 PaymentRequired,
3299 #[error("Model request limit reached")]
3300 ModelRequestLimitReached { plan: Plan },
3301 #[error("Message {header}: {message}")]
3302 Message {
3303 header: SharedString,
3304 message: SharedString,
3305 },
3306 #[error("Retryable error: {message}")]
3307 RetryableError {
3308 message: SharedString,
3309 can_enable_burn_mode: bool,
3310 },
3311}
3312
3313#[derive(Debug, Clone)]
3314pub enum ThreadEvent {
3315 ShowError(ThreadError),
3316 StreamedCompletion,
3317 ReceivedTextChunk,
3318 NewRequest,
3319 StreamedAssistantText(MessageId, String),
3320 StreamedAssistantThinking(MessageId, String),
3321 StreamedToolUse {
3322 tool_use_id: LanguageModelToolUseId,
3323 ui_text: Arc<str>,
3324 input: serde_json::Value,
3325 },
3326 MissingToolUse {
3327 tool_use_id: LanguageModelToolUseId,
3328 ui_text: Arc<str>,
3329 },
3330 InvalidToolInput {
3331 tool_use_id: LanguageModelToolUseId,
3332 ui_text: Arc<str>,
3333 invalid_input_json: Arc<str>,
3334 },
3335 Stopped(Result<StopReason, Arc<anyhow::Error>>),
3336 MessageAdded(MessageId),
3337 MessageEdited(MessageId),
3338 MessageDeleted(MessageId),
3339 SummaryGenerated,
3340 SummaryChanged,
3341 UsePendingTools {
3342 tool_uses: Vec<PendingToolUse>,
3343 },
3344 ToolFinished {
3345 #[allow(unused)]
3346 tool_use_id: LanguageModelToolUseId,
3347 /// The pending tool use that corresponds to this tool.
3348 pending_tool_use: Option<PendingToolUse>,
3349 },
3350 CheckpointChanged,
3351 ToolConfirmationNeeded,
3352 ToolUseLimitReached,
3353 CancelEditing,
3354 CompletionCanceled,
3355 ProfileChanged,
3356}
3357
3358impl EventEmitter<ThreadEvent> for Thread {}
3359
3360struct PendingCompletion {
3361 id: usize,
3362 queue_state: QueueState,
3363 _task: Task<()>,
3364}
3365
3366#[cfg(test)]
3367mod tests {
3368 use super::*;
3369 use crate::{
3370 context::load_context, context_store::ContextStore, thread_store, thread_store::ThreadStore,
3371 };
3372
3373 // Test-specific constants
3374 const TEST_RATE_LIMIT_RETRY_SECS: u64 = 30;
3375 use agent_settings::{AgentProfileId, AgentSettings, LanguageModelParameters};
3376 use assistant_tool::ToolRegistry;
3377 use assistant_tools;
3378 use futures::StreamExt;
3379 use futures::future::BoxFuture;
3380 use futures::stream::BoxStream;
3381 use gpui::TestAppContext;
3382 use http_client;
3383 use language_model::fake_provider::{FakeLanguageModel, FakeLanguageModelProvider};
3384 use language_model::{
3385 LanguageModelCompletionError, LanguageModelName, LanguageModelProviderId,
3386 LanguageModelProviderName, LanguageModelToolChoice,
3387 };
3388 use parking_lot::Mutex;
3389 use project::{FakeFs, Project};
3390 use prompt_store::PromptBuilder;
3391 use serde_json::json;
3392 use settings::{Settings, SettingsStore};
3393 use std::sync::Arc;
3394 use std::time::Duration;
3395 use theme::ThemeSettings;
3396 use util::path;
3397 use workspace::Workspace;
3398
3399 #[gpui::test]
3400 async fn test_message_with_context(cx: &mut TestAppContext) {
3401 init_test_settings(cx);
3402
3403 let project = create_test_project(
3404 cx,
3405 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3406 )
3407 .await;
3408
3409 let (_workspace, _thread_store, thread, context_store, model) =
3410 setup_test_environment(cx, project.clone()).await;
3411
3412 add_file_to_context(&project, &context_store, "test/code.rs", cx)
3413 .await
3414 .unwrap();
3415
3416 let context =
3417 context_store.read_with(cx, |store, _| store.context().next().cloned().unwrap());
3418 let loaded_context = cx
3419 .update(|cx| load_context(vec![context], &project, &None, cx))
3420 .await;
3421
3422 // Insert user message with context
3423 let message_id = thread.update(cx, |thread, cx| {
3424 thread.insert_user_message(
3425 "Please explain this code",
3426 loaded_context,
3427 None,
3428 Vec::new(),
3429 cx,
3430 )
3431 });
3432
3433 // Check content and context in message object
3434 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3435
3436 // Use different path format strings based on platform for the test
3437 #[cfg(windows)]
3438 let path_part = r"test\code.rs";
3439 #[cfg(not(windows))]
3440 let path_part = "test/code.rs";
3441
3442 let expected_context = format!(
3443 r#"
3444<context>
3445The following items were attached by the user. They are up-to-date and don't need to be re-read.
3446
3447<files>
3448```rs {path_part}
3449fn main() {{
3450 println!("Hello, world!");
3451}}
3452```
3453</files>
3454</context>
3455"#
3456 );
3457
3458 assert_eq!(message.role, Role::User);
3459 assert_eq!(message.segments.len(), 1);
3460 assert_eq!(
3461 message.segments[0],
3462 MessageSegment::Text("Please explain this code".to_string())
3463 );
3464 assert_eq!(message.loaded_context.text, expected_context);
3465
3466 // Check message in request
3467 let request = thread.update(cx, |thread, cx| {
3468 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3469 });
3470
3471 assert_eq!(request.messages.len(), 2);
3472 let expected_full_message = format!("{}Please explain this code", expected_context);
3473 assert_eq!(request.messages[1].string_contents(), expected_full_message);
3474 }
3475
3476 #[gpui::test]
3477 async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
3478 init_test_settings(cx);
3479
3480 let project = create_test_project(
3481 cx,
3482 json!({
3483 "file1.rs": "fn function1() {}\n",
3484 "file2.rs": "fn function2() {}\n",
3485 "file3.rs": "fn function3() {}\n",
3486 "file4.rs": "fn function4() {}\n",
3487 }),
3488 )
3489 .await;
3490
3491 let (_, _thread_store, thread, context_store, model) =
3492 setup_test_environment(cx, project.clone()).await;
3493
3494 // First message with context 1
3495 add_file_to_context(&project, &context_store, "test/file1.rs", cx)
3496 .await
3497 .unwrap();
3498 let new_contexts = context_store.update(cx, |store, cx| {
3499 store.new_context_for_thread(thread.read(cx), None)
3500 });
3501 assert_eq!(new_contexts.len(), 1);
3502 let loaded_context = cx
3503 .update(|cx| load_context(new_contexts, &project, &None, cx))
3504 .await;
3505 let message1_id = thread.update(cx, |thread, cx| {
3506 thread.insert_user_message("Message 1", loaded_context, None, Vec::new(), cx)
3507 });
3508
3509 // Second message with contexts 1 and 2 (context 1 should be skipped as it's already included)
3510 add_file_to_context(&project, &context_store, "test/file2.rs", cx)
3511 .await
3512 .unwrap();
3513 let new_contexts = context_store.update(cx, |store, cx| {
3514 store.new_context_for_thread(thread.read(cx), None)
3515 });
3516 assert_eq!(new_contexts.len(), 1);
3517 let loaded_context = cx
3518 .update(|cx| load_context(new_contexts, &project, &None, cx))
3519 .await;
3520 let message2_id = thread.update(cx, |thread, cx| {
3521 thread.insert_user_message("Message 2", loaded_context, None, Vec::new(), cx)
3522 });
3523
3524 // Third message with all three contexts (contexts 1 and 2 should be skipped)
3525 //
3526 add_file_to_context(&project, &context_store, "test/file3.rs", cx)
3527 .await
3528 .unwrap();
3529 let new_contexts = context_store.update(cx, |store, cx| {
3530 store.new_context_for_thread(thread.read(cx), None)
3531 });
3532 assert_eq!(new_contexts.len(), 1);
3533 let loaded_context = cx
3534 .update(|cx| load_context(new_contexts, &project, &None, cx))
3535 .await;
3536 let message3_id = thread.update(cx, |thread, cx| {
3537 thread.insert_user_message("Message 3", loaded_context, None, Vec::new(), cx)
3538 });
3539
3540 // Check what contexts are included in each message
3541 let (message1, message2, message3) = thread.read_with(cx, |thread, _| {
3542 (
3543 thread.message(message1_id).unwrap().clone(),
3544 thread.message(message2_id).unwrap().clone(),
3545 thread.message(message3_id).unwrap().clone(),
3546 )
3547 });
3548
3549 // First message should include context 1
3550 assert!(message1.loaded_context.text.contains("file1.rs"));
3551
3552 // Second message should include only context 2 (not 1)
3553 assert!(!message2.loaded_context.text.contains("file1.rs"));
3554 assert!(message2.loaded_context.text.contains("file2.rs"));
3555
3556 // Third message should include only context 3 (not 1 or 2)
3557 assert!(!message3.loaded_context.text.contains("file1.rs"));
3558 assert!(!message3.loaded_context.text.contains("file2.rs"));
3559 assert!(message3.loaded_context.text.contains("file3.rs"));
3560
3561 // Check entire request to make sure all contexts are properly included
3562 let request = thread.update(cx, |thread, cx| {
3563 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3564 });
3565
3566 // The request should contain all 3 messages
3567 assert_eq!(request.messages.len(), 4);
3568
3569 // Check that the contexts are properly formatted in each message
3570 assert!(request.messages[1].string_contents().contains("file1.rs"));
3571 assert!(!request.messages[1].string_contents().contains("file2.rs"));
3572 assert!(!request.messages[1].string_contents().contains("file3.rs"));
3573
3574 assert!(!request.messages[2].string_contents().contains("file1.rs"));
3575 assert!(request.messages[2].string_contents().contains("file2.rs"));
3576 assert!(!request.messages[2].string_contents().contains("file3.rs"));
3577
3578 assert!(!request.messages[3].string_contents().contains("file1.rs"));
3579 assert!(!request.messages[3].string_contents().contains("file2.rs"));
3580 assert!(request.messages[3].string_contents().contains("file3.rs"));
3581
3582 add_file_to_context(&project, &context_store, "test/file4.rs", cx)
3583 .await
3584 .unwrap();
3585 let new_contexts = context_store.update(cx, |store, cx| {
3586 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3587 });
3588 assert_eq!(new_contexts.len(), 3);
3589 let loaded_context = cx
3590 .update(|cx| load_context(new_contexts, &project, &None, cx))
3591 .await
3592 .loaded_context;
3593
3594 assert!(!loaded_context.text.contains("file1.rs"));
3595 assert!(loaded_context.text.contains("file2.rs"));
3596 assert!(loaded_context.text.contains("file3.rs"));
3597 assert!(loaded_context.text.contains("file4.rs"));
3598
3599 let new_contexts = context_store.update(cx, |store, cx| {
3600 // Remove file4.rs
3601 store.remove_context(&loaded_context.contexts[2].handle(), cx);
3602 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3603 });
3604 assert_eq!(new_contexts.len(), 2);
3605 let loaded_context = cx
3606 .update(|cx| load_context(new_contexts, &project, &None, cx))
3607 .await
3608 .loaded_context;
3609
3610 assert!(!loaded_context.text.contains("file1.rs"));
3611 assert!(loaded_context.text.contains("file2.rs"));
3612 assert!(loaded_context.text.contains("file3.rs"));
3613 assert!(!loaded_context.text.contains("file4.rs"));
3614
3615 let new_contexts = context_store.update(cx, |store, cx| {
3616 // Remove file3.rs
3617 store.remove_context(&loaded_context.contexts[1].handle(), cx);
3618 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3619 });
3620 assert_eq!(new_contexts.len(), 1);
3621 let loaded_context = cx
3622 .update(|cx| load_context(new_contexts, &project, &None, cx))
3623 .await
3624 .loaded_context;
3625
3626 assert!(!loaded_context.text.contains("file1.rs"));
3627 assert!(loaded_context.text.contains("file2.rs"));
3628 assert!(!loaded_context.text.contains("file3.rs"));
3629 assert!(!loaded_context.text.contains("file4.rs"));
3630 }
3631
3632 #[gpui::test]
3633 async fn test_message_without_files(cx: &mut TestAppContext) {
3634 init_test_settings(cx);
3635
3636 let project = create_test_project(
3637 cx,
3638 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3639 )
3640 .await;
3641
3642 let (_, _thread_store, thread, _context_store, model) =
3643 setup_test_environment(cx, project.clone()).await;
3644
3645 // Insert user message without any context (empty context vector)
3646 let message_id = thread.update(cx, |thread, cx| {
3647 thread.insert_user_message(
3648 "What is the best way to learn Rust?",
3649 ContextLoadResult::default(),
3650 None,
3651 Vec::new(),
3652 cx,
3653 )
3654 });
3655
3656 // Check content and context in message object
3657 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3658
3659 // Context should be empty when no files are included
3660 assert_eq!(message.role, Role::User);
3661 assert_eq!(message.segments.len(), 1);
3662 assert_eq!(
3663 message.segments[0],
3664 MessageSegment::Text("What is the best way to learn Rust?".to_string())
3665 );
3666 assert_eq!(message.loaded_context.text, "");
3667
3668 // Check message in request
3669 let request = thread.update(cx, |thread, cx| {
3670 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3671 });
3672
3673 assert_eq!(request.messages.len(), 2);
3674 assert_eq!(
3675 request.messages[1].string_contents(),
3676 "What is the best way to learn Rust?"
3677 );
3678
3679 // Add second message, also without context
3680 let message2_id = thread.update(cx, |thread, cx| {
3681 thread.insert_user_message(
3682 "Are there any good books?",
3683 ContextLoadResult::default(),
3684 None,
3685 Vec::new(),
3686 cx,
3687 )
3688 });
3689
3690 let message2 =
3691 thread.read_with(cx, |thread, _| thread.message(message2_id).unwrap().clone());
3692 assert_eq!(message2.loaded_context.text, "");
3693
3694 // Check that both messages appear in the request
3695 let request = thread.update(cx, |thread, cx| {
3696 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3697 });
3698
3699 assert_eq!(request.messages.len(), 3);
3700 assert_eq!(
3701 request.messages[1].string_contents(),
3702 "What is the best way to learn Rust?"
3703 );
3704 assert_eq!(
3705 request.messages[2].string_contents(),
3706 "Are there any good books?"
3707 );
3708 }
3709
3710 #[gpui::test]
3711 #[ignore] // turn this test on when project_notifications tool is re-enabled
3712 async fn test_stale_buffer_notification(cx: &mut TestAppContext) {
3713 init_test_settings(cx);
3714
3715 let project = create_test_project(
3716 cx,
3717 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3718 )
3719 .await;
3720
3721 let (_workspace, _thread_store, thread, context_store, model) =
3722 setup_test_environment(cx, project.clone()).await;
3723
3724 // Add a buffer to the context. This will be a tracked buffer
3725 let buffer = add_file_to_context(&project, &context_store, "test/code.rs", cx)
3726 .await
3727 .unwrap();
3728
3729 let context = context_store
3730 .read_with(cx, |store, _| store.context().next().cloned())
3731 .unwrap();
3732 let loaded_context = cx
3733 .update(|cx| load_context(vec![context], &project, &None, cx))
3734 .await;
3735
3736 // Insert user message and assistant response
3737 thread.update(cx, |thread, cx| {
3738 thread.insert_user_message("Explain this code", loaded_context, None, Vec::new(), cx);
3739 thread.insert_assistant_message(
3740 vec![MessageSegment::Text("This code prints 42.".into())],
3741 cx,
3742 );
3743 });
3744 cx.run_until_parked();
3745
3746 // We shouldn't have a stale buffer notification yet
3747 let notifications = thread.read_with(cx, |thread, _| {
3748 find_tool_uses(thread, "project_notifications")
3749 });
3750 assert!(
3751 notifications.is_empty(),
3752 "Should not have stale buffer notification before buffer is modified"
3753 );
3754
3755 // Modify the buffer
3756 buffer.update(cx, |buffer, cx| {
3757 buffer.edit(
3758 [(1..1, "\n println!(\"Added a new line\");\n")],
3759 None,
3760 cx,
3761 );
3762 });
3763
3764 // Insert another user message
3765 thread.update(cx, |thread, cx| {
3766 thread.insert_user_message(
3767 "What does the code do now?",
3768 ContextLoadResult::default(),
3769 None,
3770 Vec::new(),
3771 cx,
3772 )
3773 });
3774 cx.run_until_parked();
3775
3776 // Check for the stale buffer warning
3777 thread.update(cx, |thread, cx| {
3778 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3779 });
3780 cx.run_until_parked();
3781
3782 let notifications = thread.read_with(cx, |thread, _cx| {
3783 find_tool_uses(thread, "project_notifications")
3784 });
3785
3786 let [notification] = notifications.as_slice() else {
3787 panic!("Should have a `project_notifications` tool use");
3788 };
3789
3790 let Some(notification_content) = notification.content.to_str() else {
3791 panic!("`project_notifications` should return text");
3792 };
3793
3794 assert!(notification_content.contains("These files have changed since the last read:"));
3795 assert!(notification_content.contains("code.rs"));
3796
3797 // Insert another user message and flush notifications again
3798 thread.update(cx, |thread, cx| {
3799 thread.insert_user_message(
3800 "Can you tell me more?",
3801 ContextLoadResult::default(),
3802 None,
3803 Vec::new(),
3804 cx,
3805 )
3806 });
3807
3808 thread.update(cx, |thread, cx| {
3809 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3810 });
3811 cx.run_until_parked();
3812
3813 // There should be no new notifications (we already flushed one)
3814 let notifications = thread.read_with(cx, |thread, _cx| {
3815 find_tool_uses(thread, "project_notifications")
3816 });
3817
3818 assert_eq!(
3819 notifications.len(),
3820 1,
3821 "Should still have only one notification after second flush - no duplicates"
3822 );
3823 }
3824
3825 fn find_tool_uses(thread: &Thread, tool_name: &str) -> Vec<LanguageModelToolResult> {
3826 thread
3827 .messages()
3828 .flat_map(|message| {
3829 thread
3830 .tool_results_for_message(message.id)
3831 .into_iter()
3832 .filter(|result| result.tool_name == tool_name.into())
3833 .cloned()
3834 .collect::<Vec<_>>()
3835 })
3836 .collect()
3837 }
3838
3839 #[gpui::test]
3840 async fn test_storing_profile_setting_per_thread(cx: &mut TestAppContext) {
3841 init_test_settings(cx);
3842
3843 let project = create_test_project(
3844 cx,
3845 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3846 )
3847 .await;
3848
3849 let (_workspace, thread_store, thread, _context_store, _model) =
3850 setup_test_environment(cx, project.clone()).await;
3851
3852 // Check that we are starting with the default profile
3853 let profile = cx.read(|cx| thread.read(cx).profile.clone());
3854 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3855 assert_eq!(
3856 profile,
3857 AgentProfile::new(AgentProfileId::default(), tool_set)
3858 );
3859 }
3860
3861 #[gpui::test]
3862 async fn test_serializing_thread_profile(cx: &mut TestAppContext) {
3863 init_test_settings(cx);
3864
3865 let project = create_test_project(
3866 cx,
3867 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3868 )
3869 .await;
3870
3871 let (_workspace, thread_store, thread, _context_store, _model) =
3872 setup_test_environment(cx, project.clone()).await;
3873
3874 // Profile gets serialized with default values
3875 let serialized = thread
3876 .update(cx, |thread, cx| thread.serialize(cx))
3877 .await
3878 .unwrap();
3879
3880 assert_eq!(serialized.profile, Some(AgentProfileId::default()));
3881
3882 let deserialized = cx.update(|cx| {
3883 thread.update(cx, |thread, cx| {
3884 Thread::deserialize(
3885 thread.id.clone(),
3886 serialized,
3887 thread.project.clone(),
3888 thread.tools.clone(),
3889 thread.prompt_builder.clone(),
3890 thread.project_context.clone(),
3891 None,
3892 cx,
3893 )
3894 })
3895 });
3896 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3897
3898 assert_eq!(
3899 deserialized.profile,
3900 AgentProfile::new(AgentProfileId::default(), tool_set)
3901 );
3902 }
3903
3904 #[gpui::test]
3905 async fn test_temperature_setting(cx: &mut TestAppContext) {
3906 init_test_settings(cx);
3907
3908 let project = create_test_project(
3909 cx,
3910 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3911 )
3912 .await;
3913
3914 let (_workspace, _thread_store, thread, _context_store, model) =
3915 setup_test_environment(cx, project.clone()).await;
3916
3917 // Both model and provider
3918 cx.update(|cx| {
3919 AgentSettings::override_global(
3920 AgentSettings {
3921 model_parameters: vec![LanguageModelParameters {
3922 provider: Some(model.provider_id().0.to_string().into()),
3923 model: Some(model.id().0.clone()),
3924 temperature: Some(0.66),
3925 }],
3926 ..AgentSettings::get_global(cx).clone()
3927 },
3928 cx,
3929 );
3930 });
3931
3932 let request = thread.update(cx, |thread, cx| {
3933 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3934 });
3935 assert_eq!(request.temperature, Some(0.66));
3936
3937 // Only model
3938 cx.update(|cx| {
3939 AgentSettings::override_global(
3940 AgentSettings {
3941 model_parameters: vec![LanguageModelParameters {
3942 provider: None,
3943 model: Some(model.id().0.clone()),
3944 temperature: Some(0.66),
3945 }],
3946 ..AgentSettings::get_global(cx).clone()
3947 },
3948 cx,
3949 );
3950 });
3951
3952 let request = thread.update(cx, |thread, cx| {
3953 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3954 });
3955 assert_eq!(request.temperature, Some(0.66));
3956
3957 // Only provider
3958 cx.update(|cx| {
3959 AgentSettings::override_global(
3960 AgentSettings {
3961 model_parameters: vec![LanguageModelParameters {
3962 provider: Some(model.provider_id().0.to_string().into()),
3963 model: None,
3964 temperature: Some(0.66),
3965 }],
3966 ..AgentSettings::get_global(cx).clone()
3967 },
3968 cx,
3969 );
3970 });
3971
3972 let request = thread.update(cx, |thread, cx| {
3973 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3974 });
3975 assert_eq!(request.temperature, Some(0.66));
3976
3977 // Same model name, different provider
3978 cx.update(|cx| {
3979 AgentSettings::override_global(
3980 AgentSettings {
3981 model_parameters: vec![LanguageModelParameters {
3982 provider: Some("anthropic".into()),
3983 model: Some(model.id().0.clone()),
3984 temperature: Some(0.66),
3985 }],
3986 ..AgentSettings::get_global(cx).clone()
3987 },
3988 cx,
3989 );
3990 });
3991
3992 let request = thread.update(cx, |thread, cx| {
3993 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3994 });
3995 assert_eq!(request.temperature, None);
3996 }
3997
3998 #[gpui::test]
3999 async fn test_thread_summary(cx: &mut TestAppContext) {
4000 init_test_settings(cx);
4001
4002 let project = create_test_project(cx, json!({})).await;
4003
4004 let (_, _thread_store, thread, _context_store, model) =
4005 setup_test_environment(cx, project.clone()).await;
4006
4007 // Initial state should be pending
4008 thread.read_with(cx, |thread, _| {
4009 assert!(matches!(thread.summary(), ThreadSummary::Pending));
4010 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4011 });
4012
4013 // Manually setting the summary should not be allowed in this state
4014 thread.update(cx, |thread, cx| {
4015 thread.set_summary("This should not work", cx);
4016 });
4017
4018 thread.read_with(cx, |thread, _| {
4019 assert!(matches!(thread.summary(), ThreadSummary::Pending));
4020 });
4021
4022 // Send a message
4023 thread.update(cx, |thread, cx| {
4024 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
4025 thread.send_to_model(
4026 model.clone(),
4027 CompletionIntent::ThreadSummarization,
4028 None,
4029 cx,
4030 );
4031 });
4032
4033 let fake_model = model.as_fake();
4034 simulate_successful_response(&fake_model, cx);
4035
4036 // Should start generating summary when there are >= 2 messages
4037 thread.read_with(cx, |thread, _| {
4038 assert_eq!(*thread.summary(), ThreadSummary::Generating);
4039 });
4040
4041 // Should not be able to set the summary while generating
4042 thread.update(cx, |thread, cx| {
4043 thread.set_summary("This should not work either", cx);
4044 });
4045
4046 thread.read_with(cx, |thread, _| {
4047 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4048 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4049 });
4050
4051 cx.run_until_parked();
4052 fake_model.stream_last_completion_response("Brief");
4053 fake_model.stream_last_completion_response(" Introduction");
4054 fake_model.end_last_completion_stream();
4055 cx.run_until_parked();
4056
4057 // Summary should be set
4058 thread.read_with(cx, |thread, _| {
4059 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4060 assert_eq!(thread.summary().or_default(), "Brief Introduction");
4061 });
4062
4063 // Now we should be able to set a summary
4064 thread.update(cx, |thread, cx| {
4065 thread.set_summary("Brief Intro", cx);
4066 });
4067
4068 thread.read_with(cx, |thread, _| {
4069 assert_eq!(thread.summary().or_default(), "Brief Intro");
4070 });
4071
4072 // Test setting an empty summary (should default to DEFAULT)
4073 thread.update(cx, |thread, cx| {
4074 thread.set_summary("", cx);
4075 });
4076
4077 thread.read_with(cx, |thread, _| {
4078 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4079 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4080 });
4081 }
4082
4083 #[gpui::test]
4084 async fn test_thread_summary_error_set_manually(cx: &mut TestAppContext) {
4085 init_test_settings(cx);
4086
4087 let project = create_test_project(cx, json!({})).await;
4088
4089 let (_, _thread_store, thread, _context_store, model) =
4090 setup_test_environment(cx, project.clone()).await;
4091
4092 test_summarize_error(&model, &thread, cx);
4093
4094 // Now we should be able to set a summary
4095 thread.update(cx, |thread, cx| {
4096 thread.set_summary("Brief Intro", cx);
4097 });
4098
4099 thread.read_with(cx, |thread, _| {
4100 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4101 assert_eq!(thread.summary().or_default(), "Brief Intro");
4102 });
4103 }
4104
4105 #[gpui::test]
4106 async fn test_thread_summary_error_retry(cx: &mut TestAppContext) {
4107 init_test_settings(cx);
4108
4109 let project = create_test_project(cx, json!({})).await;
4110
4111 let (_, _thread_store, thread, _context_store, model) =
4112 setup_test_environment(cx, project.clone()).await;
4113
4114 test_summarize_error(&model, &thread, cx);
4115
4116 // Sending another message should not trigger another summarize request
4117 thread.update(cx, |thread, cx| {
4118 thread.insert_user_message(
4119 "How are you?",
4120 ContextLoadResult::default(),
4121 None,
4122 vec![],
4123 cx,
4124 );
4125 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4126 });
4127
4128 let fake_model = model.as_fake();
4129 simulate_successful_response(&fake_model, cx);
4130
4131 thread.read_with(cx, |thread, _| {
4132 // State is still Error, not Generating
4133 assert!(matches!(thread.summary(), ThreadSummary::Error));
4134 });
4135
4136 // But the summarize request can be invoked manually
4137 thread.update(cx, |thread, cx| {
4138 thread.summarize(cx);
4139 });
4140
4141 thread.read_with(cx, |thread, _| {
4142 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4143 });
4144
4145 cx.run_until_parked();
4146 fake_model.stream_last_completion_response("A successful summary");
4147 fake_model.end_last_completion_stream();
4148 cx.run_until_parked();
4149
4150 thread.read_with(cx, |thread, _| {
4151 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4152 assert_eq!(thread.summary().or_default(), "A successful summary");
4153 });
4154 }
4155
4156 // Helper to create a model that returns errors
4157 enum TestError {
4158 Overloaded,
4159 InternalServerError,
4160 }
4161
4162 struct ErrorInjector {
4163 inner: Arc<FakeLanguageModel>,
4164 error_type: TestError,
4165 }
4166
4167 impl ErrorInjector {
4168 fn new(error_type: TestError) -> Self {
4169 Self {
4170 inner: Arc::new(FakeLanguageModel::default()),
4171 error_type,
4172 }
4173 }
4174 }
4175
4176 impl LanguageModel for ErrorInjector {
4177 fn id(&self) -> LanguageModelId {
4178 self.inner.id()
4179 }
4180
4181 fn name(&self) -> LanguageModelName {
4182 self.inner.name()
4183 }
4184
4185 fn provider_id(&self) -> LanguageModelProviderId {
4186 self.inner.provider_id()
4187 }
4188
4189 fn provider_name(&self) -> LanguageModelProviderName {
4190 self.inner.provider_name()
4191 }
4192
4193 fn supports_tools(&self) -> bool {
4194 self.inner.supports_tools()
4195 }
4196
4197 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4198 self.inner.supports_tool_choice(choice)
4199 }
4200
4201 fn supports_images(&self) -> bool {
4202 self.inner.supports_images()
4203 }
4204
4205 fn telemetry_id(&self) -> String {
4206 self.inner.telemetry_id()
4207 }
4208
4209 fn max_token_count(&self) -> u64 {
4210 self.inner.max_token_count()
4211 }
4212
4213 fn count_tokens(
4214 &self,
4215 request: LanguageModelRequest,
4216 cx: &App,
4217 ) -> BoxFuture<'static, Result<u64>> {
4218 self.inner.count_tokens(request, cx)
4219 }
4220
4221 fn stream_completion(
4222 &self,
4223 _request: LanguageModelRequest,
4224 _cx: &AsyncApp,
4225 ) -> BoxFuture<
4226 'static,
4227 Result<
4228 BoxStream<
4229 'static,
4230 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4231 >,
4232 LanguageModelCompletionError,
4233 >,
4234 > {
4235 let error = match self.error_type {
4236 TestError::Overloaded => LanguageModelCompletionError::ServerOverloaded {
4237 provider: self.provider_name(),
4238 retry_after: None,
4239 },
4240 TestError::InternalServerError => {
4241 LanguageModelCompletionError::ApiInternalServerError {
4242 provider: self.provider_name(),
4243 message: "I'm a teapot orbiting the sun".to_string(),
4244 }
4245 }
4246 };
4247 async move {
4248 let stream = futures::stream::once(async move { Err(error) });
4249 Ok(stream.boxed())
4250 }
4251 .boxed()
4252 }
4253
4254 fn as_fake(&self) -> &FakeLanguageModel {
4255 &self.inner
4256 }
4257 }
4258
4259 #[gpui::test]
4260 async fn test_retry_on_overloaded_error(cx: &mut TestAppContext) {
4261 init_test_settings(cx);
4262
4263 let project = create_test_project(cx, json!({})).await;
4264 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4265
4266 // Enable Burn Mode to allow retries
4267 thread.update(cx, |thread, _| {
4268 thread.set_completion_mode(CompletionMode::Burn);
4269 });
4270
4271 // Create model that returns overloaded error
4272 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4273
4274 // Insert a user message
4275 thread.update(cx, |thread, cx| {
4276 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4277 });
4278
4279 // Start completion
4280 thread.update(cx, |thread, cx| {
4281 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4282 });
4283
4284 cx.run_until_parked();
4285
4286 thread.read_with(cx, |thread, _| {
4287 assert!(thread.retry_state.is_some(), "Should have retry state");
4288 let retry_state = thread.retry_state.as_ref().unwrap();
4289 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4290 assert_eq!(
4291 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
4292 "Should retry MAX_RETRY_ATTEMPTS times for overloaded errors"
4293 );
4294 });
4295
4296 // Check that a retry message was added
4297 thread.read_with(cx, |thread, _| {
4298 let mut messages = thread.messages();
4299 assert!(
4300 messages.any(|msg| {
4301 msg.role == Role::System
4302 && msg.ui_only
4303 && msg.segments.iter().any(|seg| {
4304 if let MessageSegment::Text(text) = seg {
4305 text.contains("overloaded")
4306 && text
4307 .contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS))
4308 } else {
4309 false
4310 }
4311 })
4312 }),
4313 "Should have added a system retry message"
4314 );
4315 });
4316
4317 let retry_count = thread.update(cx, |thread, _| {
4318 thread
4319 .messages
4320 .iter()
4321 .filter(|m| {
4322 m.ui_only
4323 && m.segments.iter().any(|s| {
4324 if let MessageSegment::Text(text) = s {
4325 text.contains("Retrying") && text.contains("seconds")
4326 } else {
4327 false
4328 }
4329 })
4330 })
4331 .count()
4332 });
4333
4334 assert_eq!(retry_count, 1, "Should have one retry message");
4335 }
4336
4337 #[gpui::test]
4338 async fn test_retry_on_internal_server_error(cx: &mut TestAppContext) {
4339 init_test_settings(cx);
4340
4341 let project = create_test_project(cx, json!({})).await;
4342 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4343
4344 // Enable Burn Mode to allow retries
4345 thread.update(cx, |thread, _| {
4346 thread.set_completion_mode(CompletionMode::Burn);
4347 });
4348
4349 // Create model that returns internal server error
4350 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4351
4352 // Insert a user message
4353 thread.update(cx, |thread, cx| {
4354 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4355 });
4356
4357 // Start completion
4358 thread.update(cx, |thread, cx| {
4359 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4360 });
4361
4362 cx.run_until_parked();
4363
4364 // Check retry state on thread
4365 thread.read_with(cx, |thread, _| {
4366 assert!(thread.retry_state.is_some(), "Should have retry state");
4367 let retry_state = thread.retry_state.as_ref().unwrap();
4368 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4369 assert_eq!(
4370 retry_state.max_attempts, 3,
4371 "Should have correct max attempts"
4372 );
4373 });
4374
4375 // Check that a retry message was added with provider name
4376 thread.read_with(cx, |thread, _| {
4377 let mut messages = thread.messages();
4378 assert!(
4379 messages.any(|msg| {
4380 msg.role == Role::System
4381 && msg.ui_only
4382 && msg.segments.iter().any(|seg| {
4383 if let MessageSegment::Text(text) = seg {
4384 text.contains("internal")
4385 && text.contains("Fake")
4386 && text.contains("Retrying")
4387 && text.contains("attempt 1 of 3")
4388 && text.contains("seconds")
4389 } else {
4390 false
4391 }
4392 })
4393 }),
4394 "Should have added a system retry message with provider name"
4395 );
4396 });
4397
4398 // Count retry messages
4399 let retry_count = thread.update(cx, |thread, _| {
4400 thread
4401 .messages
4402 .iter()
4403 .filter(|m| {
4404 m.ui_only
4405 && m.segments.iter().any(|s| {
4406 if let MessageSegment::Text(text) = s {
4407 text.contains("Retrying") && text.contains("seconds")
4408 } else {
4409 false
4410 }
4411 })
4412 })
4413 .count()
4414 });
4415
4416 assert_eq!(retry_count, 1, "Should have one retry message");
4417 }
4418
4419 #[gpui::test]
4420 async fn test_exponential_backoff_on_retries(cx: &mut TestAppContext) {
4421 init_test_settings(cx);
4422
4423 let project = create_test_project(cx, json!({})).await;
4424 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4425
4426 // Enable Burn Mode to allow retries
4427 thread.update(cx, |thread, _| {
4428 thread.set_completion_mode(CompletionMode::Burn);
4429 });
4430
4431 // Create model that returns internal server error
4432 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4433
4434 // Insert a user message
4435 thread.update(cx, |thread, cx| {
4436 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4437 });
4438
4439 // Track retry events and completion count
4440 // Track completion events
4441 let completion_count = Arc::new(Mutex::new(0));
4442 let completion_count_clone = completion_count.clone();
4443
4444 let _subscription = thread.update(cx, |_, cx| {
4445 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4446 if let ThreadEvent::NewRequest = event {
4447 *completion_count_clone.lock() += 1;
4448 }
4449 })
4450 });
4451
4452 // First attempt
4453 thread.update(cx, |thread, cx| {
4454 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4455 });
4456 cx.run_until_parked();
4457
4458 // Should have scheduled first retry - count retry messages
4459 let retry_count = thread.update(cx, |thread, _| {
4460 thread
4461 .messages
4462 .iter()
4463 .filter(|m| {
4464 m.ui_only
4465 && m.segments.iter().any(|s| {
4466 if let MessageSegment::Text(text) = s {
4467 text.contains("Retrying") && text.contains("seconds")
4468 } else {
4469 false
4470 }
4471 })
4472 })
4473 .count()
4474 });
4475 assert_eq!(retry_count, 1, "Should have scheduled first retry");
4476
4477 // Check retry state
4478 thread.read_with(cx, |thread, _| {
4479 assert!(thread.retry_state.is_some(), "Should have retry state");
4480 let retry_state = thread.retry_state.as_ref().unwrap();
4481 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4482 assert_eq!(
4483 retry_state.max_attempts, 3,
4484 "Internal server errors should retry up to 3 times"
4485 );
4486 });
4487
4488 // Advance clock for first retry
4489 cx.executor().advance_clock(BASE_RETRY_DELAY);
4490 cx.run_until_parked();
4491
4492 // Advance clock for second retry
4493 cx.executor().advance_clock(BASE_RETRY_DELAY);
4494 cx.run_until_parked();
4495
4496 // Advance clock for third retry
4497 cx.executor().advance_clock(BASE_RETRY_DELAY);
4498 cx.run_until_parked();
4499
4500 // Should have completed all retries - count retry messages
4501 let retry_count = thread.update(cx, |thread, _| {
4502 thread
4503 .messages
4504 .iter()
4505 .filter(|m| {
4506 m.ui_only
4507 && m.segments.iter().any(|s| {
4508 if let MessageSegment::Text(text) = s {
4509 text.contains("Retrying") && text.contains("seconds")
4510 } else {
4511 false
4512 }
4513 })
4514 })
4515 .count()
4516 });
4517 assert_eq!(
4518 retry_count, 3,
4519 "Should have 3 retries for internal server errors"
4520 );
4521
4522 // For internal server errors, we retry 3 times and then give up
4523 // Check that retry_state is cleared after all retries
4524 thread.read_with(cx, |thread, _| {
4525 assert!(
4526 thread.retry_state.is_none(),
4527 "Retry state should be cleared after all retries"
4528 );
4529 });
4530
4531 // Verify total attempts (1 initial + 3 retries)
4532 assert_eq!(
4533 *completion_count.lock(),
4534 4,
4535 "Should have attempted once plus 3 retries"
4536 );
4537 }
4538
4539 #[gpui::test]
4540 async fn test_max_retries_exceeded(cx: &mut TestAppContext) {
4541 init_test_settings(cx);
4542
4543 let project = create_test_project(cx, json!({})).await;
4544 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4545
4546 // Enable Burn Mode to allow retries
4547 thread.update(cx, |thread, _| {
4548 thread.set_completion_mode(CompletionMode::Burn);
4549 });
4550
4551 // Create model that returns overloaded error
4552 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4553
4554 // Insert a user message
4555 thread.update(cx, |thread, cx| {
4556 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4557 });
4558
4559 // Track events
4560 let stopped_with_error = Arc::new(Mutex::new(false));
4561 let stopped_with_error_clone = stopped_with_error.clone();
4562
4563 let _subscription = thread.update(cx, |_, cx| {
4564 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4565 if let ThreadEvent::Stopped(Err(_)) = event {
4566 *stopped_with_error_clone.lock() = true;
4567 }
4568 })
4569 });
4570
4571 // Start initial completion
4572 thread.update(cx, |thread, cx| {
4573 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4574 });
4575 cx.run_until_parked();
4576
4577 // Advance through all retries
4578 for _ in 0..MAX_RETRY_ATTEMPTS {
4579 cx.executor().advance_clock(BASE_RETRY_DELAY);
4580 cx.run_until_parked();
4581 }
4582
4583 let retry_count = thread.update(cx, |thread, _| {
4584 thread
4585 .messages
4586 .iter()
4587 .filter(|m| {
4588 m.ui_only
4589 && m.segments.iter().any(|s| {
4590 if let MessageSegment::Text(text) = s {
4591 text.contains("Retrying") && text.contains("seconds")
4592 } else {
4593 false
4594 }
4595 })
4596 })
4597 .count()
4598 });
4599
4600 // After max retries, should emit Stopped(Err(...)) event
4601 assert_eq!(
4602 retry_count, MAX_RETRY_ATTEMPTS as usize,
4603 "Should have attempted MAX_RETRY_ATTEMPTS retries for overloaded errors"
4604 );
4605 assert!(
4606 *stopped_with_error.lock(),
4607 "Should emit Stopped(Err(...)) event after max retries exceeded"
4608 );
4609
4610 // Retry state should be cleared
4611 thread.read_with(cx, |thread, _| {
4612 assert!(
4613 thread.retry_state.is_none(),
4614 "Retry state should be cleared after max retries"
4615 );
4616
4617 // Verify we have the expected number of retry messages
4618 let retry_messages = thread
4619 .messages
4620 .iter()
4621 .filter(|msg| msg.ui_only && msg.role == Role::System)
4622 .count();
4623 assert_eq!(
4624 retry_messages, MAX_RETRY_ATTEMPTS as usize,
4625 "Should have MAX_RETRY_ATTEMPTS retry messages for overloaded errors"
4626 );
4627 });
4628 }
4629
4630 #[gpui::test]
4631 async fn test_retry_message_removed_on_retry(cx: &mut TestAppContext) {
4632 init_test_settings(cx);
4633
4634 let project = create_test_project(cx, json!({})).await;
4635 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4636
4637 // Enable Burn Mode to allow retries
4638 thread.update(cx, |thread, _| {
4639 thread.set_completion_mode(CompletionMode::Burn);
4640 });
4641
4642 // We'll use a wrapper to switch behavior after first failure
4643 struct RetryTestModel {
4644 inner: Arc<FakeLanguageModel>,
4645 failed_once: Arc<Mutex<bool>>,
4646 }
4647
4648 impl LanguageModel for RetryTestModel {
4649 fn id(&self) -> LanguageModelId {
4650 self.inner.id()
4651 }
4652
4653 fn name(&self) -> LanguageModelName {
4654 self.inner.name()
4655 }
4656
4657 fn provider_id(&self) -> LanguageModelProviderId {
4658 self.inner.provider_id()
4659 }
4660
4661 fn provider_name(&self) -> LanguageModelProviderName {
4662 self.inner.provider_name()
4663 }
4664
4665 fn supports_tools(&self) -> bool {
4666 self.inner.supports_tools()
4667 }
4668
4669 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4670 self.inner.supports_tool_choice(choice)
4671 }
4672
4673 fn supports_images(&self) -> bool {
4674 self.inner.supports_images()
4675 }
4676
4677 fn telemetry_id(&self) -> String {
4678 self.inner.telemetry_id()
4679 }
4680
4681 fn max_token_count(&self) -> u64 {
4682 self.inner.max_token_count()
4683 }
4684
4685 fn count_tokens(
4686 &self,
4687 request: LanguageModelRequest,
4688 cx: &App,
4689 ) -> BoxFuture<'static, Result<u64>> {
4690 self.inner.count_tokens(request, cx)
4691 }
4692
4693 fn stream_completion(
4694 &self,
4695 request: LanguageModelRequest,
4696 cx: &AsyncApp,
4697 ) -> BoxFuture<
4698 'static,
4699 Result<
4700 BoxStream<
4701 'static,
4702 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4703 >,
4704 LanguageModelCompletionError,
4705 >,
4706 > {
4707 if !*self.failed_once.lock() {
4708 *self.failed_once.lock() = true;
4709 let provider = self.provider_name();
4710 // Return error on first attempt
4711 let stream = futures::stream::once(async move {
4712 Err(LanguageModelCompletionError::ServerOverloaded {
4713 provider,
4714 retry_after: None,
4715 })
4716 });
4717 async move { Ok(stream.boxed()) }.boxed()
4718 } else {
4719 // Succeed on retry
4720 self.inner.stream_completion(request, cx)
4721 }
4722 }
4723
4724 fn as_fake(&self) -> &FakeLanguageModel {
4725 &self.inner
4726 }
4727 }
4728
4729 let model = Arc::new(RetryTestModel {
4730 inner: Arc::new(FakeLanguageModel::default()),
4731 failed_once: Arc::new(Mutex::new(false)),
4732 });
4733
4734 // Insert a user message
4735 thread.update(cx, |thread, cx| {
4736 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4737 });
4738
4739 // Track message deletions
4740 // Track when retry completes successfully
4741 let retry_completed = Arc::new(Mutex::new(false));
4742 let retry_completed_clone = retry_completed.clone();
4743
4744 let _subscription = thread.update(cx, |_, cx| {
4745 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4746 if let ThreadEvent::StreamedCompletion = event {
4747 *retry_completed_clone.lock() = true;
4748 }
4749 })
4750 });
4751
4752 // Start completion
4753 thread.update(cx, |thread, cx| {
4754 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4755 });
4756 cx.run_until_parked();
4757
4758 // Get the retry message ID
4759 let retry_message_id = thread.read_with(cx, |thread, _| {
4760 thread
4761 .messages()
4762 .find(|msg| msg.role == Role::System && msg.ui_only)
4763 .map(|msg| msg.id)
4764 .expect("Should have a retry message")
4765 });
4766
4767 // Wait for retry
4768 cx.executor().advance_clock(BASE_RETRY_DELAY);
4769 cx.run_until_parked();
4770
4771 // Stream some successful content
4772 let fake_model = model.as_fake();
4773 // After the retry, there should be a new pending completion
4774 let pending = fake_model.pending_completions();
4775 assert!(
4776 !pending.is_empty(),
4777 "Should have a pending completion after retry"
4778 );
4779 fake_model.stream_completion_response(&pending[0], "Success!");
4780 fake_model.end_completion_stream(&pending[0]);
4781 cx.run_until_parked();
4782
4783 // Check that the retry completed successfully
4784 assert!(
4785 *retry_completed.lock(),
4786 "Retry should have completed successfully"
4787 );
4788
4789 // Retry message should still exist but be marked as ui_only
4790 thread.read_with(cx, |thread, _| {
4791 let retry_msg = thread
4792 .message(retry_message_id)
4793 .expect("Retry message should still exist");
4794 assert!(retry_msg.ui_only, "Retry message should be ui_only");
4795 assert_eq!(
4796 retry_msg.role,
4797 Role::System,
4798 "Retry message should have System role"
4799 );
4800 });
4801 }
4802
4803 #[gpui::test]
4804 async fn test_successful_completion_clears_retry_state(cx: &mut TestAppContext) {
4805 init_test_settings(cx);
4806
4807 let project = create_test_project(cx, json!({})).await;
4808 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4809
4810 // Enable Burn Mode to allow retries
4811 thread.update(cx, |thread, _| {
4812 thread.set_completion_mode(CompletionMode::Burn);
4813 });
4814
4815 // Create a model that fails once then succeeds
4816 struct FailOnceModel {
4817 inner: Arc<FakeLanguageModel>,
4818 failed_once: Arc<Mutex<bool>>,
4819 }
4820
4821 impl LanguageModel for FailOnceModel {
4822 fn id(&self) -> LanguageModelId {
4823 self.inner.id()
4824 }
4825
4826 fn name(&self) -> LanguageModelName {
4827 self.inner.name()
4828 }
4829
4830 fn provider_id(&self) -> LanguageModelProviderId {
4831 self.inner.provider_id()
4832 }
4833
4834 fn provider_name(&self) -> LanguageModelProviderName {
4835 self.inner.provider_name()
4836 }
4837
4838 fn supports_tools(&self) -> bool {
4839 self.inner.supports_tools()
4840 }
4841
4842 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4843 self.inner.supports_tool_choice(choice)
4844 }
4845
4846 fn supports_images(&self) -> bool {
4847 self.inner.supports_images()
4848 }
4849
4850 fn telemetry_id(&self) -> String {
4851 self.inner.telemetry_id()
4852 }
4853
4854 fn max_token_count(&self) -> u64 {
4855 self.inner.max_token_count()
4856 }
4857
4858 fn count_tokens(
4859 &self,
4860 request: LanguageModelRequest,
4861 cx: &App,
4862 ) -> BoxFuture<'static, Result<u64>> {
4863 self.inner.count_tokens(request, cx)
4864 }
4865
4866 fn stream_completion(
4867 &self,
4868 request: LanguageModelRequest,
4869 cx: &AsyncApp,
4870 ) -> BoxFuture<
4871 'static,
4872 Result<
4873 BoxStream<
4874 'static,
4875 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4876 >,
4877 LanguageModelCompletionError,
4878 >,
4879 > {
4880 if !*self.failed_once.lock() {
4881 *self.failed_once.lock() = true;
4882 let provider = self.provider_name();
4883 // Return error on first attempt
4884 let stream = futures::stream::once(async move {
4885 Err(LanguageModelCompletionError::ServerOverloaded {
4886 provider,
4887 retry_after: None,
4888 })
4889 });
4890 async move { Ok(stream.boxed()) }.boxed()
4891 } else {
4892 // Succeed on retry
4893 self.inner.stream_completion(request, cx)
4894 }
4895 }
4896 }
4897
4898 let fail_once_model = Arc::new(FailOnceModel {
4899 inner: Arc::new(FakeLanguageModel::default()),
4900 failed_once: Arc::new(Mutex::new(false)),
4901 });
4902
4903 // Insert a user message
4904 thread.update(cx, |thread, cx| {
4905 thread.insert_user_message(
4906 "Test message",
4907 ContextLoadResult::default(),
4908 None,
4909 vec![],
4910 cx,
4911 );
4912 });
4913
4914 // Start completion with fail-once model
4915 thread.update(cx, |thread, cx| {
4916 thread.send_to_model(
4917 fail_once_model.clone(),
4918 CompletionIntent::UserPrompt,
4919 None,
4920 cx,
4921 );
4922 });
4923
4924 cx.run_until_parked();
4925
4926 // Verify retry state exists after first failure
4927 thread.read_with(cx, |thread, _| {
4928 assert!(
4929 thread.retry_state.is_some(),
4930 "Should have retry state after failure"
4931 );
4932 });
4933
4934 // Wait for retry delay
4935 cx.executor().advance_clock(BASE_RETRY_DELAY);
4936 cx.run_until_parked();
4937
4938 // The retry should now use our FailOnceModel which should succeed
4939 // We need to help the FakeLanguageModel complete the stream
4940 let inner_fake = fail_once_model.inner.clone();
4941
4942 // Wait a bit for the retry to start
4943 cx.run_until_parked();
4944
4945 // Check for pending completions and complete them
4946 if let Some(pending) = inner_fake.pending_completions().first() {
4947 inner_fake.stream_completion_response(pending, "Success!");
4948 inner_fake.end_completion_stream(pending);
4949 }
4950 cx.run_until_parked();
4951
4952 thread.read_with(cx, |thread, _| {
4953 assert!(
4954 thread.retry_state.is_none(),
4955 "Retry state should be cleared after successful completion"
4956 );
4957
4958 let has_assistant_message = thread
4959 .messages
4960 .iter()
4961 .any(|msg| msg.role == Role::Assistant && !msg.ui_only);
4962 assert!(
4963 has_assistant_message,
4964 "Should have an assistant message after successful retry"
4965 );
4966 });
4967 }
4968
4969 #[gpui::test]
4970 async fn test_rate_limit_retry_single_attempt(cx: &mut TestAppContext) {
4971 init_test_settings(cx);
4972
4973 let project = create_test_project(cx, json!({})).await;
4974 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4975
4976 // Enable Burn Mode to allow retries
4977 thread.update(cx, |thread, _| {
4978 thread.set_completion_mode(CompletionMode::Burn);
4979 });
4980
4981 // Create a model that returns rate limit error with retry_after
4982 struct RateLimitModel {
4983 inner: Arc<FakeLanguageModel>,
4984 }
4985
4986 impl LanguageModel for RateLimitModel {
4987 fn id(&self) -> LanguageModelId {
4988 self.inner.id()
4989 }
4990
4991 fn name(&self) -> LanguageModelName {
4992 self.inner.name()
4993 }
4994
4995 fn provider_id(&self) -> LanguageModelProviderId {
4996 self.inner.provider_id()
4997 }
4998
4999 fn provider_name(&self) -> LanguageModelProviderName {
5000 self.inner.provider_name()
5001 }
5002
5003 fn supports_tools(&self) -> bool {
5004 self.inner.supports_tools()
5005 }
5006
5007 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
5008 self.inner.supports_tool_choice(choice)
5009 }
5010
5011 fn supports_images(&self) -> bool {
5012 self.inner.supports_images()
5013 }
5014
5015 fn telemetry_id(&self) -> String {
5016 self.inner.telemetry_id()
5017 }
5018
5019 fn max_token_count(&self) -> u64 {
5020 self.inner.max_token_count()
5021 }
5022
5023 fn count_tokens(
5024 &self,
5025 request: LanguageModelRequest,
5026 cx: &App,
5027 ) -> BoxFuture<'static, Result<u64>> {
5028 self.inner.count_tokens(request, cx)
5029 }
5030
5031 fn stream_completion(
5032 &self,
5033 _request: LanguageModelRequest,
5034 _cx: &AsyncApp,
5035 ) -> BoxFuture<
5036 'static,
5037 Result<
5038 BoxStream<
5039 'static,
5040 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
5041 >,
5042 LanguageModelCompletionError,
5043 >,
5044 > {
5045 let provider = self.provider_name();
5046 async move {
5047 let stream = futures::stream::once(async move {
5048 Err(LanguageModelCompletionError::RateLimitExceeded {
5049 provider,
5050 retry_after: Some(Duration::from_secs(TEST_RATE_LIMIT_RETRY_SECS)),
5051 })
5052 });
5053 Ok(stream.boxed())
5054 }
5055 .boxed()
5056 }
5057
5058 fn as_fake(&self) -> &FakeLanguageModel {
5059 &self.inner
5060 }
5061 }
5062
5063 let model = Arc::new(RateLimitModel {
5064 inner: Arc::new(FakeLanguageModel::default()),
5065 });
5066
5067 // Insert a user message
5068 thread.update(cx, |thread, cx| {
5069 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5070 });
5071
5072 // Start completion
5073 thread.update(cx, |thread, cx| {
5074 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5075 });
5076
5077 cx.run_until_parked();
5078
5079 let retry_count = thread.update(cx, |thread, _| {
5080 thread
5081 .messages
5082 .iter()
5083 .filter(|m| {
5084 m.ui_only
5085 && m.segments.iter().any(|s| {
5086 if let MessageSegment::Text(text) = s {
5087 text.contains("rate limit exceeded")
5088 } else {
5089 false
5090 }
5091 })
5092 })
5093 .count()
5094 });
5095 assert_eq!(retry_count, 1, "Should have scheduled one retry");
5096
5097 thread.read_with(cx, |thread, _| {
5098 assert!(
5099 thread.retry_state.is_some(),
5100 "Rate limit errors should set retry_state"
5101 );
5102 if let Some(retry_state) = &thread.retry_state {
5103 assert_eq!(
5104 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
5105 "Rate limit errors should use MAX_RETRY_ATTEMPTS"
5106 );
5107 }
5108 });
5109
5110 // Verify we have one retry message
5111 thread.read_with(cx, |thread, _| {
5112 let retry_messages = thread
5113 .messages
5114 .iter()
5115 .filter(|msg| {
5116 msg.ui_only
5117 && msg.segments.iter().any(|seg| {
5118 if let MessageSegment::Text(text) = seg {
5119 text.contains("rate limit exceeded")
5120 } else {
5121 false
5122 }
5123 })
5124 })
5125 .count();
5126 assert_eq!(
5127 retry_messages, 1,
5128 "Should have one rate limit retry message"
5129 );
5130 });
5131
5132 // Check that retry message doesn't include attempt count
5133 thread.read_with(cx, |thread, _| {
5134 let retry_message = thread
5135 .messages
5136 .iter()
5137 .find(|msg| msg.role == Role::System && msg.ui_only)
5138 .expect("Should have a retry message");
5139
5140 // Check that the message contains attempt count since we use retry_state
5141 if let Some(MessageSegment::Text(text)) = retry_message.segments.first() {
5142 assert!(
5143 text.contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS)),
5144 "Rate limit retry message should contain attempt count with MAX_RETRY_ATTEMPTS"
5145 );
5146 assert!(
5147 text.contains("Retrying"),
5148 "Rate limit retry message should contain retry text"
5149 );
5150 }
5151 });
5152 }
5153
5154 #[gpui::test]
5155 async fn test_ui_only_messages_not_sent_to_model(cx: &mut TestAppContext) {
5156 init_test_settings(cx);
5157
5158 let project = create_test_project(cx, json!({})).await;
5159 let (_, _, thread, _, model) = setup_test_environment(cx, project.clone()).await;
5160
5161 // Insert a regular user message
5162 thread.update(cx, |thread, cx| {
5163 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5164 });
5165
5166 // Insert a UI-only message (like our retry notifications)
5167 thread.update(cx, |thread, cx| {
5168 let id = thread.next_message_id.post_inc();
5169 thread.messages.push(Message {
5170 id,
5171 role: Role::System,
5172 segments: vec![MessageSegment::Text(
5173 "This is a UI-only message that should not be sent to the model".to_string(),
5174 )],
5175 loaded_context: LoadedContext::default(),
5176 creases: Vec::new(),
5177 is_hidden: true,
5178 ui_only: true,
5179 });
5180 cx.emit(ThreadEvent::MessageAdded(id));
5181 });
5182
5183 // Insert another regular message
5184 thread.update(cx, |thread, cx| {
5185 thread.insert_user_message(
5186 "How are you?",
5187 ContextLoadResult::default(),
5188 None,
5189 vec![],
5190 cx,
5191 );
5192 });
5193
5194 // Generate the completion request
5195 let request = thread.update(cx, |thread, cx| {
5196 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
5197 });
5198
5199 // Verify that the request only contains non-UI-only messages
5200 // Should have system prompt + 2 user messages, but not the UI-only message
5201 let user_messages: Vec<_> = request
5202 .messages
5203 .iter()
5204 .filter(|msg| msg.role == Role::User)
5205 .collect();
5206 assert_eq!(
5207 user_messages.len(),
5208 2,
5209 "Should have exactly 2 user messages"
5210 );
5211
5212 // Verify the UI-only content is not present anywhere in the request
5213 let request_text = request
5214 .messages
5215 .iter()
5216 .flat_map(|msg| &msg.content)
5217 .filter_map(|content| match content {
5218 MessageContent::Text(text) => Some(text.as_str()),
5219 _ => None,
5220 })
5221 .collect::<String>();
5222
5223 assert!(
5224 !request_text.contains("UI-only message"),
5225 "UI-only message content should not be in the request"
5226 );
5227
5228 // Verify the thread still has all 3 messages (including UI-only)
5229 thread.read_with(cx, |thread, _| {
5230 assert_eq!(
5231 thread.messages().count(),
5232 3,
5233 "Thread should have 3 messages"
5234 );
5235 assert_eq!(
5236 thread.messages().filter(|m| m.ui_only).count(),
5237 1,
5238 "Thread should have 1 UI-only message"
5239 );
5240 });
5241
5242 // Verify that UI-only messages are not serialized
5243 let serialized = thread
5244 .update(cx, |thread, cx| thread.serialize(cx))
5245 .await
5246 .unwrap();
5247 assert_eq!(
5248 serialized.messages.len(),
5249 2,
5250 "Serialized thread should only have 2 messages (no UI-only)"
5251 );
5252 }
5253
5254 #[gpui::test]
5255 async fn test_no_retry_without_burn_mode(cx: &mut TestAppContext) {
5256 init_test_settings(cx);
5257
5258 let project = create_test_project(cx, json!({})).await;
5259 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5260
5261 // Ensure we're in Normal mode (not Burn mode)
5262 thread.update(cx, |thread, _| {
5263 thread.set_completion_mode(CompletionMode::Normal);
5264 });
5265
5266 // Track error events
5267 let error_events = Arc::new(Mutex::new(Vec::new()));
5268 let error_events_clone = error_events.clone();
5269
5270 let _subscription = thread.update(cx, |_, cx| {
5271 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
5272 if let ThreadEvent::ShowError(error) = event {
5273 error_events_clone.lock().push(error.clone());
5274 }
5275 })
5276 });
5277
5278 // Create model that returns overloaded error
5279 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5280
5281 // Insert a user message
5282 thread.update(cx, |thread, cx| {
5283 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5284 });
5285
5286 // Start completion
5287 thread.update(cx, |thread, cx| {
5288 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5289 });
5290
5291 cx.run_until_parked();
5292
5293 // Verify no retry state was created
5294 thread.read_with(cx, |thread, _| {
5295 assert!(
5296 thread.retry_state.is_none(),
5297 "Should not have retry state in Normal mode"
5298 );
5299 });
5300
5301 // Check that a retryable error was reported
5302 let errors = error_events.lock();
5303 assert!(!errors.is_empty(), "Should have received an error event");
5304
5305 if let ThreadError::RetryableError {
5306 message: _,
5307 can_enable_burn_mode,
5308 } = &errors[0]
5309 {
5310 assert!(
5311 *can_enable_burn_mode,
5312 "Error should indicate burn mode can be enabled"
5313 );
5314 } else {
5315 panic!("Expected RetryableError, got {:?}", errors[0]);
5316 }
5317
5318 // Verify the thread is no longer generating
5319 thread.read_with(cx, |thread, _| {
5320 assert!(
5321 !thread.is_generating(),
5322 "Should not be generating after error without retry"
5323 );
5324 });
5325 }
5326
5327 #[gpui::test]
5328 async fn test_retry_cancelled_on_stop(cx: &mut TestAppContext) {
5329 init_test_settings(cx);
5330
5331 let project = create_test_project(cx, json!({})).await;
5332 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5333
5334 // Enable Burn Mode to allow retries
5335 thread.update(cx, |thread, _| {
5336 thread.set_completion_mode(CompletionMode::Burn);
5337 });
5338
5339 // Create model that returns overloaded error
5340 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5341
5342 // Insert a user message
5343 thread.update(cx, |thread, cx| {
5344 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5345 });
5346
5347 // Start completion
5348 thread.update(cx, |thread, cx| {
5349 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5350 });
5351
5352 cx.run_until_parked();
5353
5354 // Verify retry was scheduled by checking for retry message
5355 let has_retry_message = thread.read_with(cx, |thread, _| {
5356 thread.messages.iter().any(|m| {
5357 m.ui_only
5358 && m.segments.iter().any(|s| {
5359 if let MessageSegment::Text(text) = s {
5360 text.contains("Retrying") && text.contains("seconds")
5361 } else {
5362 false
5363 }
5364 })
5365 })
5366 });
5367 assert!(has_retry_message, "Should have scheduled a retry");
5368
5369 // Cancel the completion before the retry happens
5370 thread.update(cx, |thread, cx| {
5371 thread.cancel_last_completion(None, cx);
5372 });
5373
5374 cx.run_until_parked();
5375
5376 // The retry should not have happened - no pending completions
5377 let fake_model = model.as_fake();
5378 assert_eq!(
5379 fake_model.pending_completions().len(),
5380 0,
5381 "Should have no pending completions after cancellation"
5382 );
5383
5384 // Verify the retry was cancelled by checking retry state
5385 thread.read_with(cx, |thread, _| {
5386 if let Some(retry_state) = &thread.retry_state {
5387 panic!(
5388 "retry_state should be cleared after cancellation, but found: attempt={}, max_attempts={}, intent={:?}",
5389 retry_state.attempt, retry_state.max_attempts, retry_state.intent
5390 );
5391 }
5392 });
5393 }
5394
5395 fn test_summarize_error(
5396 model: &Arc<dyn LanguageModel>,
5397 thread: &Entity<Thread>,
5398 cx: &mut TestAppContext,
5399 ) {
5400 thread.update(cx, |thread, cx| {
5401 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
5402 thread.send_to_model(
5403 model.clone(),
5404 CompletionIntent::ThreadSummarization,
5405 None,
5406 cx,
5407 );
5408 });
5409
5410 let fake_model = model.as_fake();
5411 simulate_successful_response(&fake_model, cx);
5412
5413 thread.read_with(cx, |thread, _| {
5414 assert!(matches!(thread.summary(), ThreadSummary::Generating));
5415 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5416 });
5417
5418 // Simulate summary request ending
5419 cx.run_until_parked();
5420 fake_model.end_last_completion_stream();
5421 cx.run_until_parked();
5422
5423 // State is set to Error and default message
5424 thread.read_with(cx, |thread, _| {
5425 assert!(matches!(thread.summary(), ThreadSummary::Error));
5426 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5427 });
5428 }
5429
5430 fn simulate_successful_response(fake_model: &FakeLanguageModel, cx: &mut TestAppContext) {
5431 cx.run_until_parked();
5432 fake_model.stream_last_completion_response("Assistant response");
5433 fake_model.end_last_completion_stream();
5434 cx.run_until_parked();
5435 }
5436
5437 fn init_test_settings(cx: &mut TestAppContext) {
5438 cx.update(|cx| {
5439 let settings_store = SettingsStore::test(cx);
5440 cx.set_global(settings_store);
5441 language::init(cx);
5442 Project::init_settings(cx);
5443 AgentSettings::register(cx);
5444 prompt_store::init(cx);
5445 thread_store::init(cx);
5446 workspace::init_settings(cx);
5447 language_model::init_settings(cx);
5448 ThemeSettings::register(cx);
5449 ToolRegistry::default_global(cx);
5450 assistant_tool::init(cx);
5451
5452 let http_client = Arc::new(http_client::HttpClientWithUrl::new(
5453 http_client::FakeHttpClient::with_200_response(),
5454 "http://localhost".to_string(),
5455 None,
5456 ));
5457 assistant_tools::init(http_client, cx);
5458 });
5459 }
5460
5461 // Helper to create a test project with test files
5462 async fn create_test_project(
5463 cx: &mut TestAppContext,
5464 files: serde_json::Value,
5465 ) -> Entity<Project> {
5466 let fs = FakeFs::new(cx.executor());
5467 fs.insert_tree(path!("/test"), files).await;
5468 Project::test(fs, [path!("/test").as_ref()], cx).await
5469 }
5470
5471 async fn setup_test_environment(
5472 cx: &mut TestAppContext,
5473 project: Entity<Project>,
5474 ) -> (
5475 Entity<Workspace>,
5476 Entity<ThreadStore>,
5477 Entity<Thread>,
5478 Entity<ContextStore>,
5479 Arc<dyn LanguageModel>,
5480 ) {
5481 let (workspace, cx) =
5482 cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
5483
5484 let thread_store = cx
5485 .update(|_, cx| {
5486 ThreadStore::load(
5487 project.clone(),
5488 cx.new(|_| ToolWorkingSet::default()),
5489 None,
5490 Arc::new(PromptBuilder::new(None).unwrap()),
5491 cx,
5492 )
5493 })
5494 .await
5495 .unwrap();
5496
5497 let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
5498 let context_store = cx.new(|_cx| ContextStore::new(project.downgrade(), None));
5499
5500 let provider = Arc::new(FakeLanguageModelProvider);
5501 let model = provider.test_model();
5502 let model: Arc<dyn LanguageModel> = Arc::new(model);
5503
5504 cx.update(|_, cx| {
5505 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
5506 registry.set_default_model(
5507 Some(ConfiguredModel {
5508 provider: provider.clone(),
5509 model: model.clone(),
5510 }),
5511 cx,
5512 );
5513 registry.set_thread_summary_model(
5514 Some(ConfiguredModel {
5515 provider,
5516 model: model.clone(),
5517 }),
5518 cx,
5519 );
5520 })
5521 });
5522
5523 (workspace, thread_store, thread, context_store, model)
5524 }
5525
5526 async fn add_file_to_context(
5527 project: &Entity<Project>,
5528 context_store: &Entity<ContextStore>,
5529 path: &str,
5530 cx: &mut TestAppContext,
5531 ) -> Result<Entity<language::Buffer>> {
5532 let buffer_path = project
5533 .read_with(cx, |project, cx| project.find_project_path(path, cx))
5534 .unwrap();
5535
5536 let buffer = project
5537 .update(cx, |project, cx| {
5538 project.open_buffer(buffer_path.clone(), cx)
5539 })
5540 .await
5541 .unwrap();
5542
5543 context_store.update(cx, |context_store, cx| {
5544 context_store.add_file_from_buffer(&buffer_path, buffer.clone(), false, cx);
5545 });
5546
5547 Ok(buffer)
5548 }
5549}