1use crate::{
2 agent_profile::AgentProfile,
3 context::{AgentContext, AgentContextHandle, ContextLoadResult, LoadedContext},
4 thread_store::{
5 SerializedCrease, SerializedLanguageModel, SerializedMessage, SerializedMessageSegment,
6 SerializedThread, SerializedToolResult, SerializedToolUse, SharedProjectContext,
7 ThreadStore,
8 },
9 tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState},
10};
11use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
12use anyhow::{Result, anyhow};
13use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolWorkingSet};
14use chrono::{DateTime, Utc};
15use client::{ModelRequestUsage, RequestUsage};
16use collections::HashMap;
17use feature_flags::{self, FeatureFlagAppExt};
18use futures::{FutureExt, StreamExt as _, future::Shared};
19use git::repository::DiffType;
20use gpui::{
21 AnyWindowHandle, App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task,
22 WeakEntity, Window,
23};
24use http_client::StatusCode;
25use language_model::{
26 ConfiguredModel, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
27 LanguageModelExt as _, LanguageModelId, LanguageModelRegistry, LanguageModelRequest,
28 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
29 LanguageModelToolResultContent, LanguageModelToolUse, LanguageModelToolUseId, MessageContent,
30 ModelRequestLimitReachedError, PaymentRequiredError, Role, SelectedModel, StopReason,
31 TokenUsage,
32};
33use postage::stream::Stream as _;
34use project::{
35 Project,
36 git_store::{GitStore, GitStoreCheckpoint, RepositoryState},
37};
38use prompt_store::{ModelContext, PromptBuilder};
39use proto::Plan;
40use schemars::JsonSchema;
41use serde::{Deserialize, Serialize};
42use settings::Settings;
43use std::{
44 io::Write,
45 ops::Range,
46 sync::Arc,
47 time::{Duration, Instant},
48};
49use thiserror::Error;
50use util::{ResultExt as _, debug_panic, post_inc};
51use uuid::Uuid;
52use zed_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
53
54const MAX_RETRY_ATTEMPTS: u8 = 3;
55const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
56
57#[derive(Debug, Clone)]
58enum RetryStrategy {
59 ExponentialBackoff {
60 initial_delay: Duration,
61 max_attempts: u8,
62 },
63 Fixed {
64 delay: Duration,
65 max_attempts: u8,
66 },
67}
68
69#[derive(
70 Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
71)]
72pub struct ThreadId(Arc<str>);
73
74impl ThreadId {
75 pub fn new() -> Self {
76 Self(Uuid::new_v4().to_string().into())
77 }
78}
79
80impl std::fmt::Display for ThreadId {
81 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
82 write!(f, "{}", self.0)
83 }
84}
85
86impl From<&str> for ThreadId {
87 fn from(value: &str) -> Self {
88 Self(value.into())
89 }
90}
91
92/// The ID of the user prompt that initiated a request.
93///
94/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
95#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
96pub struct PromptId(Arc<str>);
97
98impl PromptId {
99 pub fn new() -> Self {
100 Self(Uuid::new_v4().to_string().into())
101 }
102}
103
104impl std::fmt::Display for PromptId {
105 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
106 write!(f, "{}", self.0)
107 }
108}
109
110#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
111pub struct MessageId(pub(crate) usize);
112
113impl MessageId {
114 fn post_inc(&mut self) -> Self {
115 Self(post_inc(&mut self.0))
116 }
117
118 pub fn as_usize(&self) -> usize {
119 self.0
120 }
121}
122
123/// Stored information that can be used to resurrect a context crease when creating an editor for a past message.
124#[derive(Clone, Debug)]
125pub struct MessageCrease {
126 pub range: Range<usize>,
127 pub icon_path: SharedString,
128 pub label: SharedString,
129 /// None for a deserialized message, Some otherwise.
130 pub context: Option<AgentContextHandle>,
131}
132
133/// A message in a [`Thread`].
134#[derive(Debug, Clone)]
135pub struct Message {
136 pub id: MessageId,
137 pub role: Role,
138 pub segments: Vec<MessageSegment>,
139 pub loaded_context: LoadedContext,
140 pub creases: Vec<MessageCrease>,
141 pub is_hidden: bool,
142 pub ui_only: bool,
143}
144
145impl Message {
146 /// Returns whether the message contains any meaningful text that should be displayed
147 /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
148 pub fn should_display_content(&self) -> bool {
149 self.segments.iter().all(|segment| segment.should_display())
150 }
151
152 pub fn push_thinking(&mut self, text: &str, signature: Option<String>) {
153 if let Some(MessageSegment::Thinking {
154 text: segment,
155 signature: current_signature,
156 }) = self.segments.last_mut()
157 {
158 if let Some(signature) = signature {
159 *current_signature = Some(signature);
160 }
161 segment.push_str(text);
162 } else {
163 self.segments.push(MessageSegment::Thinking {
164 text: text.to_string(),
165 signature,
166 });
167 }
168 }
169
170 pub fn push_redacted_thinking(&mut self, data: String) {
171 self.segments.push(MessageSegment::RedactedThinking(data));
172 }
173
174 pub fn push_text(&mut self, text: &str) {
175 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
176 segment.push_str(text);
177 } else {
178 self.segments.push(MessageSegment::Text(text.to_string()));
179 }
180 }
181
182 pub fn to_string(&self) -> String {
183 let mut result = String::new();
184
185 if !self.loaded_context.text.is_empty() {
186 result.push_str(&self.loaded_context.text);
187 }
188
189 for segment in &self.segments {
190 match segment {
191 MessageSegment::Text(text) => result.push_str(text),
192 MessageSegment::Thinking { text, .. } => {
193 result.push_str("<think>\n");
194 result.push_str(text);
195 result.push_str("\n</think>");
196 }
197 MessageSegment::RedactedThinking(_) => {}
198 }
199 }
200
201 result
202 }
203}
204
205#[derive(Debug, Clone, PartialEq, Eq)]
206pub enum MessageSegment {
207 Text(String),
208 Thinking {
209 text: String,
210 signature: Option<String>,
211 },
212 RedactedThinking(String),
213}
214
215impl MessageSegment {
216 pub fn should_display(&self) -> bool {
217 match self {
218 Self::Text(text) => text.is_empty(),
219 Self::Thinking { text, .. } => text.is_empty(),
220 Self::RedactedThinking(_) => false,
221 }
222 }
223
224 pub fn text(&self) -> Option<&str> {
225 match self {
226 MessageSegment::Text(text) => Some(text),
227 _ => None,
228 }
229 }
230}
231
232#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
233pub struct ProjectSnapshot {
234 pub worktree_snapshots: Vec<WorktreeSnapshot>,
235 pub unsaved_buffer_paths: Vec<String>,
236 pub timestamp: DateTime<Utc>,
237}
238
239#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
240pub struct WorktreeSnapshot {
241 pub worktree_path: String,
242 pub git_state: Option<GitState>,
243}
244
245#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
246pub struct GitState {
247 pub remote_url: Option<String>,
248 pub head_sha: Option<String>,
249 pub current_branch: Option<String>,
250 pub diff: Option<String>,
251}
252
253#[derive(Clone, Debug)]
254pub struct ThreadCheckpoint {
255 message_id: MessageId,
256 git_checkpoint: GitStoreCheckpoint,
257}
258
259#[derive(Copy, Clone, Debug, PartialEq, Eq)]
260pub enum ThreadFeedback {
261 Positive,
262 Negative,
263}
264
265pub enum LastRestoreCheckpoint {
266 Pending {
267 message_id: MessageId,
268 },
269 Error {
270 message_id: MessageId,
271 error: String,
272 },
273}
274
275impl LastRestoreCheckpoint {
276 pub fn message_id(&self) -> MessageId {
277 match self {
278 LastRestoreCheckpoint::Pending { message_id } => *message_id,
279 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
280 }
281 }
282}
283
284#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
285pub enum DetailedSummaryState {
286 #[default]
287 NotGenerated,
288 Generating {
289 message_id: MessageId,
290 },
291 Generated {
292 text: SharedString,
293 message_id: MessageId,
294 },
295}
296
297impl DetailedSummaryState {
298 fn text(&self) -> Option<SharedString> {
299 if let Self::Generated { text, .. } = self {
300 Some(text.clone())
301 } else {
302 None
303 }
304 }
305}
306
307#[derive(Default, Debug)]
308pub struct TotalTokenUsage {
309 pub total: u64,
310 pub max: u64,
311}
312
313impl TotalTokenUsage {
314 pub fn ratio(&self) -> TokenUsageRatio {
315 #[cfg(debug_assertions)]
316 let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
317 .unwrap_or("0.8".to_string())
318 .parse()
319 .unwrap();
320 #[cfg(not(debug_assertions))]
321 let warning_threshold: f32 = 0.8;
322
323 // When the maximum is unknown because there is no selected model,
324 // avoid showing the token limit warning.
325 if self.max == 0 {
326 TokenUsageRatio::Normal
327 } else if self.total >= self.max {
328 TokenUsageRatio::Exceeded
329 } else if self.total as f32 / self.max as f32 >= warning_threshold {
330 TokenUsageRatio::Warning
331 } else {
332 TokenUsageRatio::Normal
333 }
334 }
335
336 pub fn add(&self, tokens: u64) -> TotalTokenUsage {
337 TotalTokenUsage {
338 total: self.total + tokens,
339 max: self.max,
340 }
341 }
342}
343
344#[derive(Debug, Default, PartialEq, Eq)]
345pub enum TokenUsageRatio {
346 #[default]
347 Normal,
348 Warning,
349 Exceeded,
350}
351
352#[derive(Debug, Clone, Copy)]
353pub enum QueueState {
354 Sending,
355 Queued { position: usize },
356 Started,
357}
358
359/// A thread of conversation with the LLM.
360pub struct Thread {
361 id: ThreadId,
362 updated_at: DateTime<Utc>,
363 summary: ThreadSummary,
364 pending_summary: Task<Option<()>>,
365 detailed_summary_task: Task<Option<()>>,
366 detailed_summary_tx: postage::watch::Sender<DetailedSummaryState>,
367 detailed_summary_rx: postage::watch::Receiver<DetailedSummaryState>,
368 completion_mode: agent_settings::CompletionMode,
369 messages: Vec<Message>,
370 next_message_id: MessageId,
371 last_prompt_id: PromptId,
372 project_context: SharedProjectContext,
373 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
374 completion_count: usize,
375 pending_completions: Vec<PendingCompletion>,
376 project: Entity<Project>,
377 prompt_builder: Arc<PromptBuilder>,
378 tools: Entity<ToolWorkingSet>,
379 tool_use: ToolUseState,
380 action_log: Entity<ActionLog>,
381 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
382 pending_checkpoint: Option<ThreadCheckpoint>,
383 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
384 request_token_usage: Vec<TokenUsage>,
385 cumulative_token_usage: TokenUsage,
386 exceeded_window_error: Option<ExceededWindowError>,
387 tool_use_limit_reached: bool,
388 feedback: Option<ThreadFeedback>,
389 retry_state: Option<RetryState>,
390 message_feedback: HashMap<MessageId, ThreadFeedback>,
391 last_auto_capture_at: Option<Instant>,
392 last_received_chunk_at: Option<Instant>,
393 request_callback: Option<
394 Box<dyn FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>])>,
395 >,
396 remaining_turns: u32,
397 configured_model: Option<ConfiguredModel>,
398 profile: AgentProfile,
399 last_error_context: Option<(Arc<dyn LanguageModel>, CompletionIntent)>,
400}
401
402#[derive(Clone, Debug)]
403struct RetryState {
404 attempt: u8,
405 max_attempts: u8,
406 intent: CompletionIntent,
407}
408
409#[derive(Clone, Debug, PartialEq, Eq)]
410pub enum ThreadSummary {
411 Pending,
412 Generating,
413 Ready(SharedString),
414 Error,
415}
416
417impl ThreadSummary {
418 pub const DEFAULT: SharedString = SharedString::new_static("New Thread");
419
420 pub fn or_default(&self) -> SharedString {
421 self.unwrap_or(Self::DEFAULT)
422 }
423
424 pub fn unwrap_or(&self, message: impl Into<SharedString>) -> SharedString {
425 self.ready().unwrap_or_else(|| message.into())
426 }
427
428 pub fn ready(&self) -> Option<SharedString> {
429 match self {
430 ThreadSummary::Ready(summary) => Some(summary.clone()),
431 ThreadSummary::Pending | ThreadSummary::Generating | ThreadSummary::Error => None,
432 }
433 }
434}
435
436#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
437pub struct ExceededWindowError {
438 /// Model used when last message exceeded context window
439 model_id: LanguageModelId,
440 /// Token count including last message
441 token_count: u64,
442}
443
444impl Thread {
445 pub fn new(
446 project: Entity<Project>,
447 tools: Entity<ToolWorkingSet>,
448 prompt_builder: Arc<PromptBuilder>,
449 system_prompt: SharedProjectContext,
450 cx: &mut Context<Self>,
451 ) -> Self {
452 let (detailed_summary_tx, detailed_summary_rx) = postage::watch::channel();
453 let configured_model = LanguageModelRegistry::read_global(cx).default_model();
454 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
455
456 Self {
457 id: ThreadId::new(),
458 updated_at: Utc::now(),
459 summary: ThreadSummary::Pending,
460 pending_summary: Task::ready(None),
461 detailed_summary_task: Task::ready(None),
462 detailed_summary_tx,
463 detailed_summary_rx,
464 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
465 messages: Vec::new(),
466 next_message_id: MessageId(0),
467 last_prompt_id: PromptId::new(),
468 project_context: system_prompt,
469 checkpoints_by_message: HashMap::default(),
470 completion_count: 0,
471 pending_completions: Vec::new(),
472 project: project.clone(),
473 prompt_builder,
474 tools: tools.clone(),
475 last_restore_checkpoint: None,
476 pending_checkpoint: None,
477 tool_use: ToolUseState::new(tools.clone()),
478 action_log: cx.new(|_| ActionLog::new(project.clone())),
479 initial_project_snapshot: {
480 let project_snapshot = Self::project_snapshot(project, cx);
481 cx.foreground_executor()
482 .spawn(async move { Some(project_snapshot.await) })
483 .shared()
484 },
485 request_token_usage: Vec::new(),
486 cumulative_token_usage: TokenUsage::default(),
487 exceeded_window_error: None,
488 tool_use_limit_reached: false,
489 feedback: None,
490 retry_state: None,
491 message_feedback: HashMap::default(),
492 last_auto_capture_at: None,
493 last_error_context: None,
494 last_received_chunk_at: None,
495 request_callback: None,
496 remaining_turns: u32::MAX,
497 configured_model: configured_model.clone(),
498 profile: AgentProfile::new(profile_id, tools),
499 }
500 }
501
502 pub fn deserialize(
503 id: ThreadId,
504 serialized: SerializedThread,
505 project: Entity<Project>,
506 tools: Entity<ToolWorkingSet>,
507 prompt_builder: Arc<PromptBuilder>,
508 project_context: SharedProjectContext,
509 window: Option<&mut Window>, // None in headless mode
510 cx: &mut Context<Self>,
511 ) -> Self {
512 let next_message_id = MessageId(
513 serialized
514 .messages
515 .last()
516 .map(|message| message.id.0 + 1)
517 .unwrap_or(0),
518 );
519 let tool_use = ToolUseState::from_serialized_messages(
520 tools.clone(),
521 &serialized.messages,
522 project.clone(),
523 window,
524 cx,
525 );
526 let (detailed_summary_tx, detailed_summary_rx) =
527 postage::watch::channel_with(serialized.detailed_summary_state);
528
529 let configured_model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
530 serialized
531 .model
532 .and_then(|model| {
533 let model = SelectedModel {
534 provider: model.provider.clone().into(),
535 model: model.model.clone().into(),
536 };
537 registry.select_model(&model, cx)
538 })
539 .or_else(|| registry.default_model())
540 });
541
542 let completion_mode = serialized
543 .completion_mode
544 .unwrap_or_else(|| AgentSettings::get_global(cx).preferred_completion_mode);
545 let profile_id = serialized
546 .profile
547 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
548
549 Self {
550 id,
551 updated_at: serialized.updated_at,
552 summary: ThreadSummary::Ready(serialized.summary),
553 pending_summary: Task::ready(None),
554 detailed_summary_task: Task::ready(None),
555 detailed_summary_tx,
556 detailed_summary_rx,
557 completion_mode,
558 retry_state: None,
559 messages: serialized
560 .messages
561 .into_iter()
562 .map(|message| Message {
563 id: message.id,
564 role: message.role,
565 segments: message
566 .segments
567 .into_iter()
568 .map(|segment| match segment {
569 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
570 SerializedMessageSegment::Thinking { text, signature } => {
571 MessageSegment::Thinking { text, signature }
572 }
573 SerializedMessageSegment::RedactedThinking { data } => {
574 MessageSegment::RedactedThinking(data)
575 }
576 })
577 .collect(),
578 loaded_context: LoadedContext {
579 contexts: Vec::new(),
580 text: message.context,
581 images: Vec::new(),
582 },
583 creases: message
584 .creases
585 .into_iter()
586 .map(|crease| MessageCrease {
587 range: crease.start..crease.end,
588 icon_path: crease.icon_path,
589 label: crease.label,
590 context: None,
591 })
592 .collect(),
593 is_hidden: message.is_hidden,
594 ui_only: false, // UI-only messages are not persisted
595 })
596 .collect(),
597 next_message_id,
598 last_prompt_id: PromptId::new(),
599 project_context,
600 checkpoints_by_message: HashMap::default(),
601 completion_count: 0,
602 pending_completions: Vec::new(),
603 last_restore_checkpoint: None,
604 pending_checkpoint: None,
605 project: project.clone(),
606 prompt_builder,
607 tools: tools.clone(),
608 tool_use,
609 action_log: cx.new(|_| ActionLog::new(project)),
610 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
611 request_token_usage: serialized.request_token_usage,
612 cumulative_token_usage: serialized.cumulative_token_usage,
613 exceeded_window_error: None,
614 tool_use_limit_reached: serialized.tool_use_limit_reached,
615 feedback: None,
616 message_feedback: HashMap::default(),
617 last_auto_capture_at: None,
618 last_error_context: None,
619 last_received_chunk_at: None,
620 request_callback: None,
621 remaining_turns: u32::MAX,
622 configured_model,
623 profile: AgentProfile::new(profile_id, tools),
624 }
625 }
626
627 pub fn set_request_callback(
628 &mut self,
629 callback: impl 'static
630 + FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>]),
631 ) {
632 self.request_callback = Some(Box::new(callback));
633 }
634
635 pub fn id(&self) -> &ThreadId {
636 &self.id
637 }
638
639 pub fn profile(&self) -> &AgentProfile {
640 &self.profile
641 }
642
643 pub fn set_profile(&mut self, id: AgentProfileId, cx: &mut Context<Self>) {
644 if &id != self.profile.id() {
645 self.profile = AgentProfile::new(id, self.tools.clone());
646 cx.emit(ThreadEvent::ProfileChanged);
647 }
648 }
649
650 pub fn is_empty(&self) -> bool {
651 self.messages.is_empty()
652 }
653
654 pub fn updated_at(&self) -> DateTime<Utc> {
655 self.updated_at
656 }
657
658 pub fn touch_updated_at(&mut self) {
659 self.updated_at = Utc::now();
660 }
661
662 pub fn advance_prompt_id(&mut self) {
663 self.last_prompt_id = PromptId::new();
664 }
665
666 pub fn project_context(&self) -> SharedProjectContext {
667 self.project_context.clone()
668 }
669
670 pub fn get_or_init_configured_model(&mut self, cx: &App) -> Option<ConfiguredModel> {
671 if self.configured_model.is_none() {
672 self.configured_model = LanguageModelRegistry::read_global(cx).default_model();
673 }
674 self.configured_model.clone()
675 }
676
677 pub fn configured_model(&self) -> Option<ConfiguredModel> {
678 self.configured_model.clone()
679 }
680
681 pub fn set_configured_model(&mut self, model: Option<ConfiguredModel>, cx: &mut Context<Self>) {
682 self.configured_model = model;
683 cx.notify();
684 }
685
686 pub fn summary(&self) -> &ThreadSummary {
687 &self.summary
688 }
689
690 pub fn set_summary(&mut self, new_summary: impl Into<SharedString>, cx: &mut Context<Self>) {
691 let current_summary = match &self.summary {
692 ThreadSummary::Pending | ThreadSummary::Generating => return,
693 ThreadSummary::Ready(summary) => summary,
694 ThreadSummary::Error => &ThreadSummary::DEFAULT,
695 };
696
697 let mut new_summary = new_summary.into();
698
699 if new_summary.is_empty() {
700 new_summary = ThreadSummary::DEFAULT;
701 }
702
703 if current_summary != &new_summary {
704 self.summary = ThreadSummary::Ready(new_summary);
705 cx.emit(ThreadEvent::SummaryChanged);
706 }
707 }
708
709 pub fn completion_mode(&self) -> CompletionMode {
710 self.completion_mode
711 }
712
713 pub fn set_completion_mode(&mut self, mode: CompletionMode) {
714 self.completion_mode = mode;
715 }
716
717 pub fn message(&self, id: MessageId) -> Option<&Message> {
718 let index = self
719 .messages
720 .binary_search_by(|message| message.id.cmp(&id))
721 .ok()?;
722
723 self.messages.get(index)
724 }
725
726 pub fn messages(&self) -> impl ExactSizeIterator<Item = &Message> {
727 self.messages.iter()
728 }
729
730 pub fn is_generating(&self) -> bool {
731 !self.pending_completions.is_empty() || !self.all_tools_finished()
732 }
733
734 /// Indicates whether streaming of language model events is stale.
735 /// When `is_generating()` is false, this method returns `None`.
736 pub fn is_generation_stale(&self) -> Option<bool> {
737 const STALE_THRESHOLD: u128 = 250;
738
739 self.last_received_chunk_at
740 .map(|instant| instant.elapsed().as_millis() > STALE_THRESHOLD)
741 }
742
743 fn received_chunk(&mut self) {
744 self.last_received_chunk_at = Some(Instant::now());
745 }
746
747 pub fn queue_state(&self) -> Option<QueueState> {
748 self.pending_completions
749 .first()
750 .map(|pending_completion| pending_completion.queue_state)
751 }
752
753 pub fn tools(&self) -> &Entity<ToolWorkingSet> {
754 &self.tools
755 }
756
757 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
758 self.tool_use
759 .pending_tool_uses()
760 .into_iter()
761 .find(|tool_use| &tool_use.id == id)
762 }
763
764 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
765 self.tool_use
766 .pending_tool_uses()
767 .into_iter()
768 .filter(|tool_use| tool_use.status.needs_confirmation())
769 }
770
771 pub fn has_pending_tool_uses(&self) -> bool {
772 !self.tool_use.pending_tool_uses().is_empty()
773 }
774
775 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
776 self.checkpoints_by_message.get(&id).cloned()
777 }
778
779 pub fn restore_checkpoint(
780 &mut self,
781 checkpoint: ThreadCheckpoint,
782 cx: &mut Context<Self>,
783 ) -> Task<Result<()>> {
784 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
785 message_id: checkpoint.message_id,
786 });
787 cx.emit(ThreadEvent::CheckpointChanged);
788 cx.notify();
789
790 let git_store = self.project().read(cx).git_store().clone();
791 let restore = git_store.update(cx, |git_store, cx| {
792 git_store.restore_checkpoint(checkpoint.git_checkpoint.clone(), cx)
793 });
794
795 cx.spawn(async move |this, cx| {
796 let result = restore.await;
797 this.update(cx, |this, cx| {
798 if let Err(err) = result.as_ref() {
799 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
800 message_id: checkpoint.message_id,
801 error: err.to_string(),
802 });
803 } else {
804 this.truncate(checkpoint.message_id, cx);
805 this.last_restore_checkpoint = None;
806 }
807 this.pending_checkpoint = None;
808 cx.emit(ThreadEvent::CheckpointChanged);
809 cx.notify();
810 })?;
811 result
812 })
813 }
814
815 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
816 let pending_checkpoint = if self.is_generating() {
817 return;
818 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
819 checkpoint
820 } else {
821 return;
822 };
823
824 self.finalize_checkpoint(pending_checkpoint, cx);
825 }
826
827 fn finalize_checkpoint(
828 &mut self,
829 pending_checkpoint: ThreadCheckpoint,
830 cx: &mut Context<Self>,
831 ) {
832 let git_store = self.project.read(cx).git_store().clone();
833 let final_checkpoint = git_store.update(cx, |git_store, cx| git_store.checkpoint(cx));
834 cx.spawn(async move |this, cx| match final_checkpoint.await {
835 Ok(final_checkpoint) => {
836 let equal = git_store
837 .update(cx, |store, cx| {
838 store.compare_checkpoints(
839 pending_checkpoint.git_checkpoint.clone(),
840 final_checkpoint.clone(),
841 cx,
842 )
843 })?
844 .await
845 .unwrap_or(false);
846
847 if !equal {
848 this.update(cx, |this, cx| {
849 this.insert_checkpoint(pending_checkpoint, cx)
850 })?;
851 }
852
853 Ok(())
854 }
855 Err(_) => this.update(cx, |this, cx| {
856 this.insert_checkpoint(pending_checkpoint, cx)
857 }),
858 })
859 .detach();
860 }
861
862 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
863 self.checkpoints_by_message
864 .insert(checkpoint.message_id, checkpoint);
865 cx.emit(ThreadEvent::CheckpointChanged);
866 cx.notify();
867 }
868
869 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
870 self.last_restore_checkpoint.as_ref()
871 }
872
873 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
874 let Some(message_ix) = self
875 .messages
876 .iter()
877 .rposition(|message| message.id == message_id)
878 else {
879 return;
880 };
881 for deleted_message in self.messages.drain(message_ix..) {
882 self.checkpoints_by_message.remove(&deleted_message.id);
883 }
884 cx.notify();
885 }
886
887 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AgentContext> {
888 self.messages
889 .iter()
890 .find(|message| message.id == id)
891 .into_iter()
892 .flat_map(|message| message.loaded_context.contexts.iter())
893 }
894
895 pub fn is_turn_end(&self, ix: usize) -> bool {
896 if self.messages.is_empty() {
897 return false;
898 }
899
900 if !self.is_generating() && ix == self.messages.len() - 1 {
901 return true;
902 }
903
904 let Some(message) = self.messages.get(ix) else {
905 return false;
906 };
907
908 if message.role != Role::Assistant {
909 return false;
910 }
911
912 self.messages
913 .get(ix + 1)
914 .and_then(|message| {
915 self.message(message.id)
916 .map(|next_message| next_message.role == Role::User && !next_message.is_hidden)
917 })
918 .unwrap_or(false)
919 }
920
921 pub fn tool_use_limit_reached(&self) -> bool {
922 self.tool_use_limit_reached
923 }
924
925 /// Returns whether all of the tool uses have finished running.
926 pub fn all_tools_finished(&self) -> bool {
927 // If the only pending tool uses left are the ones with errors, then
928 // that means that we've finished running all of the pending tools.
929 self.tool_use
930 .pending_tool_uses()
931 .iter()
932 .all(|pending_tool_use| pending_tool_use.status.is_error())
933 }
934
935 /// Returns whether any pending tool uses may perform edits
936 pub fn has_pending_edit_tool_uses(&self) -> bool {
937 self.tool_use
938 .pending_tool_uses()
939 .iter()
940 .filter(|pending_tool_use| !pending_tool_use.status.is_error())
941 .any(|pending_tool_use| pending_tool_use.may_perform_edits)
942 }
943
944 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
945 self.tool_use.tool_uses_for_message(id, cx)
946 }
947
948 pub fn tool_results_for_message(
949 &self,
950 assistant_message_id: MessageId,
951 ) -> Vec<&LanguageModelToolResult> {
952 self.tool_use.tool_results_for_message(assistant_message_id)
953 }
954
955 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
956 self.tool_use.tool_result(id)
957 }
958
959 pub fn output_for_tool(&self, id: &LanguageModelToolUseId) -> Option<&Arc<str>> {
960 match &self.tool_use.tool_result(id)?.content {
961 LanguageModelToolResultContent::Text(text) => Some(text),
962 LanguageModelToolResultContent::Image(_) => {
963 // TODO: We should display image
964 None
965 }
966 }
967 }
968
969 pub fn card_for_tool(&self, id: &LanguageModelToolUseId) -> Option<AnyToolCard> {
970 self.tool_use.tool_result_card(id).cloned()
971 }
972
973 /// Return tools that are both enabled and supported by the model
974 pub fn available_tools(
975 &self,
976 cx: &App,
977 model: Arc<dyn LanguageModel>,
978 ) -> Vec<LanguageModelRequestTool> {
979 if model.supports_tools() {
980 self.profile
981 .enabled_tools(cx)
982 .into_iter()
983 .filter_map(|(name, tool)| {
984 // Skip tools that cannot be supported
985 let input_schema = tool.input_schema(model.tool_input_format()).ok()?;
986 Some(LanguageModelRequestTool {
987 name: name.into(),
988 description: tool.description(),
989 input_schema,
990 })
991 })
992 .collect()
993 } else {
994 Vec::default()
995 }
996 }
997
998 pub fn insert_user_message(
999 &mut self,
1000 text: impl Into<String>,
1001 loaded_context: ContextLoadResult,
1002 git_checkpoint: Option<GitStoreCheckpoint>,
1003 creases: Vec<MessageCrease>,
1004 cx: &mut Context<Self>,
1005 ) -> MessageId {
1006 if !loaded_context.referenced_buffers.is_empty() {
1007 self.action_log.update(cx, |log, cx| {
1008 for buffer in loaded_context.referenced_buffers {
1009 log.buffer_read(buffer, cx);
1010 }
1011 });
1012 }
1013
1014 let message_id = self.insert_message(
1015 Role::User,
1016 vec![MessageSegment::Text(text.into())],
1017 loaded_context.loaded_context,
1018 creases,
1019 false,
1020 cx,
1021 );
1022
1023 if let Some(git_checkpoint) = git_checkpoint {
1024 self.pending_checkpoint = Some(ThreadCheckpoint {
1025 message_id,
1026 git_checkpoint,
1027 });
1028 }
1029
1030 self.auto_capture_telemetry(cx);
1031
1032 message_id
1033 }
1034
1035 pub fn insert_invisible_continue_message(&mut self, cx: &mut Context<Self>) -> MessageId {
1036 let id = self.insert_message(
1037 Role::User,
1038 vec![MessageSegment::Text("Continue where you left off".into())],
1039 LoadedContext::default(),
1040 vec![],
1041 true,
1042 cx,
1043 );
1044 self.pending_checkpoint = None;
1045
1046 id
1047 }
1048
1049 pub fn insert_assistant_message(
1050 &mut self,
1051 segments: Vec<MessageSegment>,
1052 cx: &mut Context<Self>,
1053 ) -> MessageId {
1054 self.insert_message(
1055 Role::Assistant,
1056 segments,
1057 LoadedContext::default(),
1058 Vec::new(),
1059 false,
1060 cx,
1061 )
1062 }
1063
1064 pub fn insert_message(
1065 &mut self,
1066 role: Role,
1067 segments: Vec<MessageSegment>,
1068 loaded_context: LoadedContext,
1069 creases: Vec<MessageCrease>,
1070 is_hidden: bool,
1071 cx: &mut Context<Self>,
1072 ) -> MessageId {
1073 let id = self.next_message_id.post_inc();
1074 self.messages.push(Message {
1075 id,
1076 role,
1077 segments,
1078 loaded_context,
1079 creases,
1080 is_hidden,
1081 ui_only: false,
1082 });
1083 self.touch_updated_at();
1084 cx.emit(ThreadEvent::MessageAdded(id));
1085 id
1086 }
1087
1088 pub fn edit_message(
1089 &mut self,
1090 id: MessageId,
1091 new_role: Role,
1092 new_segments: Vec<MessageSegment>,
1093 creases: Vec<MessageCrease>,
1094 loaded_context: Option<LoadedContext>,
1095 checkpoint: Option<GitStoreCheckpoint>,
1096 cx: &mut Context<Self>,
1097 ) -> bool {
1098 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
1099 return false;
1100 };
1101 message.role = new_role;
1102 message.segments = new_segments;
1103 message.creases = creases;
1104 if let Some(context) = loaded_context {
1105 message.loaded_context = context;
1106 }
1107 if let Some(git_checkpoint) = checkpoint {
1108 self.checkpoints_by_message.insert(
1109 id,
1110 ThreadCheckpoint {
1111 message_id: id,
1112 git_checkpoint,
1113 },
1114 );
1115 }
1116 self.touch_updated_at();
1117 cx.emit(ThreadEvent::MessageEdited(id));
1118 true
1119 }
1120
1121 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
1122 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
1123 return false;
1124 };
1125 self.messages.remove(index);
1126 self.touch_updated_at();
1127 cx.emit(ThreadEvent::MessageDeleted(id));
1128 true
1129 }
1130
1131 /// Returns the representation of this [`Thread`] in a textual form.
1132 ///
1133 /// This is the representation we use when attaching a thread as context to another thread.
1134 pub fn text(&self) -> String {
1135 let mut text = String::new();
1136
1137 for message in &self.messages {
1138 text.push_str(match message.role {
1139 language_model::Role::User => "User:",
1140 language_model::Role::Assistant => "Agent:",
1141 language_model::Role::System => "System:",
1142 });
1143 text.push('\n');
1144
1145 for segment in &message.segments {
1146 match segment {
1147 MessageSegment::Text(content) => text.push_str(content),
1148 MessageSegment::Thinking { text: content, .. } => {
1149 text.push_str(&format!("<think>{}</think>", content))
1150 }
1151 MessageSegment::RedactedThinking(_) => {}
1152 }
1153 }
1154 text.push('\n');
1155 }
1156
1157 text
1158 }
1159
1160 /// Serializes this thread into a format for storage or telemetry.
1161 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
1162 let initial_project_snapshot = self.initial_project_snapshot.clone();
1163 cx.spawn(async move |this, cx| {
1164 let initial_project_snapshot = initial_project_snapshot.await;
1165 this.read_with(cx, |this, cx| SerializedThread {
1166 version: SerializedThread::VERSION.to_string(),
1167 summary: this.summary().or_default(),
1168 updated_at: this.updated_at(),
1169 messages: this
1170 .messages()
1171 .filter(|message| !message.ui_only)
1172 .map(|message| SerializedMessage {
1173 id: message.id,
1174 role: message.role,
1175 segments: message
1176 .segments
1177 .iter()
1178 .map(|segment| match segment {
1179 MessageSegment::Text(text) => {
1180 SerializedMessageSegment::Text { text: text.clone() }
1181 }
1182 MessageSegment::Thinking { text, signature } => {
1183 SerializedMessageSegment::Thinking {
1184 text: text.clone(),
1185 signature: signature.clone(),
1186 }
1187 }
1188 MessageSegment::RedactedThinking(data) => {
1189 SerializedMessageSegment::RedactedThinking {
1190 data: data.clone(),
1191 }
1192 }
1193 })
1194 .collect(),
1195 tool_uses: this
1196 .tool_uses_for_message(message.id, cx)
1197 .into_iter()
1198 .map(|tool_use| SerializedToolUse {
1199 id: tool_use.id,
1200 name: tool_use.name,
1201 input: tool_use.input,
1202 })
1203 .collect(),
1204 tool_results: this
1205 .tool_results_for_message(message.id)
1206 .into_iter()
1207 .map(|tool_result| SerializedToolResult {
1208 tool_use_id: tool_result.tool_use_id.clone(),
1209 is_error: tool_result.is_error,
1210 content: tool_result.content.clone(),
1211 output: tool_result.output.clone(),
1212 })
1213 .collect(),
1214 context: message.loaded_context.text.clone(),
1215 creases: message
1216 .creases
1217 .iter()
1218 .map(|crease| SerializedCrease {
1219 start: crease.range.start,
1220 end: crease.range.end,
1221 icon_path: crease.icon_path.clone(),
1222 label: crease.label.clone(),
1223 })
1224 .collect(),
1225 is_hidden: message.is_hidden,
1226 })
1227 .collect(),
1228 initial_project_snapshot,
1229 cumulative_token_usage: this.cumulative_token_usage,
1230 request_token_usage: this.request_token_usage.clone(),
1231 detailed_summary_state: this.detailed_summary_rx.borrow().clone(),
1232 exceeded_window_error: this.exceeded_window_error.clone(),
1233 model: this
1234 .configured_model
1235 .as_ref()
1236 .map(|model| SerializedLanguageModel {
1237 provider: model.provider.id().0.to_string(),
1238 model: model.model.id().0.to_string(),
1239 }),
1240 completion_mode: Some(this.completion_mode),
1241 tool_use_limit_reached: this.tool_use_limit_reached,
1242 profile: Some(this.profile.id().clone()),
1243 })
1244 })
1245 }
1246
1247 pub fn remaining_turns(&self) -> u32 {
1248 self.remaining_turns
1249 }
1250
1251 pub fn set_remaining_turns(&mut self, remaining_turns: u32) {
1252 self.remaining_turns = remaining_turns;
1253 }
1254
1255 pub fn send_to_model(
1256 &mut self,
1257 model: Arc<dyn LanguageModel>,
1258 intent: CompletionIntent,
1259 window: Option<AnyWindowHandle>,
1260 cx: &mut Context<Self>,
1261 ) {
1262 if self.remaining_turns == 0 {
1263 return;
1264 }
1265
1266 self.remaining_turns -= 1;
1267
1268 self.flush_notifications(model.clone(), intent, cx);
1269
1270 let _checkpoint = self.finalize_pending_checkpoint(cx);
1271 self.stream_completion(
1272 self.to_completion_request(model.clone(), intent, cx),
1273 model,
1274 intent,
1275 window,
1276 cx,
1277 );
1278 }
1279
1280 pub fn retry_last_completion(
1281 &mut self,
1282 window: Option<AnyWindowHandle>,
1283 cx: &mut Context<Self>,
1284 ) {
1285 // Clear any existing error state
1286 self.retry_state = None;
1287
1288 // Use the last error context if available, otherwise fall back to configured model
1289 let (model, intent) = if let Some((model, intent)) = self.last_error_context.take() {
1290 (model, intent)
1291 } else if let Some(configured_model) = self.configured_model.as_ref() {
1292 let model = configured_model.model.clone();
1293 let intent = if self.has_pending_tool_uses() {
1294 CompletionIntent::ToolResults
1295 } else {
1296 CompletionIntent::UserPrompt
1297 };
1298 (model, intent)
1299 } else if let Some(configured_model) = self.get_or_init_configured_model(cx) {
1300 let model = configured_model.model.clone();
1301 let intent = if self.has_pending_tool_uses() {
1302 CompletionIntent::ToolResults
1303 } else {
1304 CompletionIntent::UserPrompt
1305 };
1306 (model, intent)
1307 } else {
1308 return;
1309 };
1310
1311 self.send_to_model(model, intent, window, cx);
1312 }
1313
1314 pub fn enable_burn_mode_and_retry(
1315 &mut self,
1316 window: Option<AnyWindowHandle>,
1317 cx: &mut Context<Self>,
1318 ) {
1319 self.completion_mode = CompletionMode::Burn;
1320 cx.emit(ThreadEvent::ProfileChanged);
1321 self.retry_last_completion(window, cx);
1322 }
1323
1324 pub fn used_tools_since_last_user_message(&self) -> bool {
1325 for message in self.messages.iter().rev() {
1326 if self.tool_use.message_has_tool_results(message.id) {
1327 return true;
1328 } else if message.role == Role::User {
1329 return false;
1330 }
1331 }
1332
1333 false
1334 }
1335
1336 pub fn to_completion_request(
1337 &self,
1338 model: Arc<dyn LanguageModel>,
1339 intent: CompletionIntent,
1340 cx: &mut Context<Self>,
1341 ) -> LanguageModelRequest {
1342 let mut request = LanguageModelRequest {
1343 thread_id: Some(self.id.to_string()),
1344 prompt_id: Some(self.last_prompt_id.to_string()),
1345 intent: Some(intent),
1346 mode: None,
1347 messages: vec![],
1348 tools: Vec::new(),
1349 tool_choice: None,
1350 stop: Vec::new(),
1351 temperature: AgentSettings::temperature_for_model(&model, cx),
1352 };
1353
1354 let available_tools = self.available_tools(cx, model.clone());
1355 let available_tool_names = available_tools
1356 .iter()
1357 .map(|tool| tool.name.clone())
1358 .collect();
1359
1360 let model_context = &ModelContext {
1361 available_tools: available_tool_names,
1362 };
1363
1364 if let Some(project_context) = self.project_context.borrow().as_ref() {
1365 match self
1366 .prompt_builder
1367 .generate_assistant_system_prompt(project_context, model_context)
1368 {
1369 Err(err) => {
1370 let message = format!("{err:?}").into();
1371 log::error!("{message}");
1372 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1373 header: "Error generating system prompt".into(),
1374 message,
1375 }));
1376 }
1377 Ok(system_prompt) => {
1378 request.messages.push(LanguageModelRequestMessage {
1379 role: Role::System,
1380 content: vec![MessageContent::Text(system_prompt)],
1381 cache: true,
1382 });
1383 }
1384 }
1385 } else {
1386 let message = "Context for system prompt unexpectedly not ready.".into();
1387 log::error!("{message}");
1388 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1389 header: "Error generating system prompt".into(),
1390 message,
1391 }));
1392 }
1393
1394 let mut message_ix_to_cache = None;
1395 for message in &self.messages {
1396 // ui_only messages are for the UI only, not for the model
1397 if message.ui_only {
1398 continue;
1399 }
1400
1401 let mut request_message = LanguageModelRequestMessage {
1402 role: message.role,
1403 content: Vec::new(),
1404 cache: false,
1405 };
1406
1407 message
1408 .loaded_context
1409 .add_to_request_message(&mut request_message);
1410
1411 for segment in &message.segments {
1412 match segment {
1413 MessageSegment::Text(text) => {
1414 let text = text.trim_end();
1415 if !text.is_empty() {
1416 request_message
1417 .content
1418 .push(MessageContent::Text(text.into()));
1419 }
1420 }
1421 MessageSegment::Thinking { text, signature } => {
1422 if !text.is_empty() {
1423 request_message.content.push(MessageContent::Thinking {
1424 text: text.into(),
1425 signature: signature.clone(),
1426 });
1427 }
1428 }
1429 MessageSegment::RedactedThinking(data) => {
1430 request_message
1431 .content
1432 .push(MessageContent::RedactedThinking(data.clone()));
1433 }
1434 };
1435 }
1436
1437 let mut cache_message = true;
1438 let mut tool_results_message = LanguageModelRequestMessage {
1439 role: Role::User,
1440 content: Vec::new(),
1441 cache: false,
1442 };
1443 for (tool_use, tool_result) in self.tool_use.tool_results(message.id) {
1444 if let Some(tool_result) = tool_result {
1445 request_message
1446 .content
1447 .push(MessageContent::ToolUse(tool_use.clone()));
1448 tool_results_message
1449 .content
1450 .push(MessageContent::ToolResult(LanguageModelToolResult {
1451 tool_use_id: tool_use.id.clone(),
1452 tool_name: tool_result.tool_name.clone(),
1453 is_error: tool_result.is_error,
1454 content: if tool_result.content.is_empty() {
1455 // Surprisingly, the API fails if we return an empty string here.
1456 // It thinks we are sending a tool use without a tool result.
1457 "<Tool returned an empty string>".into()
1458 } else {
1459 tool_result.content.clone()
1460 },
1461 output: None,
1462 }));
1463 } else {
1464 cache_message = false;
1465 log::debug!(
1466 "skipped tool use {:?} because it is still pending",
1467 tool_use
1468 );
1469 }
1470 }
1471
1472 if cache_message {
1473 message_ix_to_cache = Some(request.messages.len());
1474 }
1475 request.messages.push(request_message);
1476
1477 if !tool_results_message.content.is_empty() {
1478 if cache_message {
1479 message_ix_to_cache = Some(request.messages.len());
1480 }
1481 request.messages.push(tool_results_message);
1482 }
1483 }
1484
1485 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
1486 if let Some(message_ix_to_cache) = message_ix_to_cache {
1487 request.messages[message_ix_to_cache].cache = true;
1488 }
1489
1490 request.tools = available_tools;
1491 request.mode = if model.supports_burn_mode() {
1492 Some(self.completion_mode.into())
1493 } else {
1494 Some(CompletionMode::Normal.into())
1495 };
1496
1497 request
1498 }
1499
1500 fn to_summarize_request(
1501 &self,
1502 model: &Arc<dyn LanguageModel>,
1503 intent: CompletionIntent,
1504 added_user_message: String,
1505 cx: &App,
1506 ) -> LanguageModelRequest {
1507 let mut request = LanguageModelRequest {
1508 thread_id: None,
1509 prompt_id: None,
1510 intent: Some(intent),
1511 mode: None,
1512 messages: vec![],
1513 tools: Vec::new(),
1514 tool_choice: None,
1515 stop: Vec::new(),
1516 temperature: AgentSettings::temperature_for_model(model, cx),
1517 };
1518
1519 for message in &self.messages {
1520 let mut request_message = LanguageModelRequestMessage {
1521 role: message.role,
1522 content: Vec::new(),
1523 cache: false,
1524 };
1525
1526 for segment in &message.segments {
1527 match segment {
1528 MessageSegment::Text(text) => request_message
1529 .content
1530 .push(MessageContent::Text(text.clone())),
1531 MessageSegment::Thinking { .. } => {}
1532 MessageSegment::RedactedThinking(_) => {}
1533 }
1534 }
1535
1536 if request_message.content.is_empty() {
1537 continue;
1538 }
1539
1540 request.messages.push(request_message);
1541 }
1542
1543 request.messages.push(LanguageModelRequestMessage {
1544 role: Role::User,
1545 content: vec![MessageContent::Text(added_user_message)],
1546 cache: false,
1547 });
1548
1549 request
1550 }
1551
1552 /// Insert auto-generated notifications (if any) to the thread
1553 fn flush_notifications(
1554 &mut self,
1555 model: Arc<dyn LanguageModel>,
1556 intent: CompletionIntent,
1557 cx: &mut Context<Self>,
1558 ) {
1559 match intent {
1560 CompletionIntent::UserPrompt | CompletionIntent::ToolResults => {
1561 if let Some(pending_tool_use) = self.attach_tracked_files_state(model, cx) {
1562 cx.emit(ThreadEvent::ToolFinished {
1563 tool_use_id: pending_tool_use.id.clone(),
1564 pending_tool_use: Some(pending_tool_use),
1565 });
1566 }
1567 }
1568 CompletionIntent::ThreadSummarization
1569 | CompletionIntent::ThreadContextSummarization
1570 | CompletionIntent::CreateFile
1571 | CompletionIntent::EditFile
1572 | CompletionIntent::InlineAssist
1573 | CompletionIntent::TerminalInlineAssist
1574 | CompletionIntent::GenerateGitCommitMessage => {}
1575 };
1576 }
1577
1578 fn attach_tracked_files_state(
1579 &mut self,
1580 model: Arc<dyn LanguageModel>,
1581 cx: &mut App,
1582 ) -> Option<PendingToolUse> {
1583 let action_log = self.action_log.read(cx);
1584
1585 action_log.unnotified_stale_buffers(cx).next()?;
1586
1587 // Represent notification as a simulated `project_notifications` tool call
1588 let tool_name = Arc::from("project_notifications");
1589 let Some(tool) = self.tools.read(cx).tool(&tool_name, cx) else {
1590 debug_panic!("`project_notifications` tool not found");
1591 return None;
1592 };
1593
1594 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
1595 return None;
1596 }
1597
1598 let input = serde_json::json!({});
1599 let request = Arc::new(LanguageModelRequest::default()); // unused
1600 let window = None;
1601 let tool_result = tool.run(
1602 input,
1603 request,
1604 self.project.clone(),
1605 self.action_log.clone(),
1606 model.clone(),
1607 window,
1608 cx,
1609 );
1610
1611 let tool_use_id =
1612 LanguageModelToolUseId::from(format!("project_notifications_{}", self.messages.len()));
1613
1614 let tool_use = LanguageModelToolUse {
1615 id: tool_use_id.clone(),
1616 name: tool_name.clone(),
1617 raw_input: "{}".to_string(),
1618 input: serde_json::json!({}),
1619 is_input_complete: true,
1620 };
1621
1622 let tool_output = cx.background_executor().block(tool_result.output);
1623
1624 // Attach a project_notification tool call to the latest existing
1625 // Assistant message. We cannot create a new Assistant message
1626 // because thinking models require a `thinking` block that we
1627 // cannot mock. We cannot send a notification as a normal
1628 // (non-tool-use) User message because this distracts Agent
1629 // too much.
1630 let tool_message_id = self
1631 .messages
1632 .iter()
1633 .enumerate()
1634 .rfind(|(_, message)| message.role == Role::Assistant)
1635 .map(|(_, message)| message.id)?;
1636
1637 let tool_use_metadata = ToolUseMetadata {
1638 model: model.clone(),
1639 thread_id: self.id.clone(),
1640 prompt_id: self.last_prompt_id.clone(),
1641 };
1642
1643 self.tool_use
1644 .request_tool_use(tool_message_id, tool_use, tool_use_metadata.clone(), cx);
1645
1646 let pending_tool_use = self.tool_use.insert_tool_output(
1647 tool_use_id.clone(),
1648 tool_name,
1649 tool_output,
1650 self.configured_model.as_ref(),
1651 self.completion_mode,
1652 );
1653
1654 pending_tool_use
1655 }
1656
1657 pub fn stream_completion(
1658 &mut self,
1659 request: LanguageModelRequest,
1660 model: Arc<dyn LanguageModel>,
1661 intent: CompletionIntent,
1662 window: Option<AnyWindowHandle>,
1663 cx: &mut Context<Self>,
1664 ) {
1665 self.tool_use_limit_reached = false;
1666
1667 let pending_completion_id = post_inc(&mut self.completion_count);
1668 let mut request_callback_parameters = if self.request_callback.is_some() {
1669 Some((request.clone(), Vec::new()))
1670 } else {
1671 None
1672 };
1673 let prompt_id = self.last_prompt_id.clone();
1674 let tool_use_metadata = ToolUseMetadata {
1675 model: model.clone(),
1676 thread_id: self.id.clone(),
1677 prompt_id: prompt_id.clone(),
1678 };
1679
1680 let completion_mode = request
1681 .mode
1682 .unwrap_or(zed_llm_client::CompletionMode::Normal);
1683
1684 self.last_received_chunk_at = Some(Instant::now());
1685
1686 let task = cx.spawn(async move |thread, cx| {
1687 let stream_completion_future = model.stream_completion(request, &cx);
1688 let initial_token_usage =
1689 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage);
1690 let stream_completion = async {
1691 let mut events = stream_completion_future.await?;
1692
1693 let mut stop_reason = StopReason::EndTurn;
1694 let mut current_token_usage = TokenUsage::default();
1695
1696 thread
1697 .update(cx, |_thread, cx| {
1698 cx.emit(ThreadEvent::NewRequest);
1699 })
1700 .ok();
1701
1702 let mut request_assistant_message_id = None;
1703
1704 while let Some(event) = events.next().await {
1705 if let Some((_, response_events)) = request_callback_parameters.as_mut() {
1706 response_events
1707 .push(event.as_ref().map_err(|error| error.to_string()).cloned());
1708 }
1709
1710 thread.update(cx, |thread, cx| {
1711 match event? {
1712 LanguageModelCompletionEvent::StartMessage { .. } => {
1713 request_assistant_message_id =
1714 Some(thread.insert_assistant_message(
1715 vec![MessageSegment::Text(String::new())],
1716 cx,
1717 ));
1718 }
1719 LanguageModelCompletionEvent::Stop(reason) => {
1720 stop_reason = reason;
1721 }
1722 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1723 thread.update_token_usage_at_last_message(token_usage);
1724 thread.cumulative_token_usage = thread.cumulative_token_usage
1725 + token_usage
1726 - current_token_usage;
1727 current_token_usage = token_usage;
1728 }
1729 LanguageModelCompletionEvent::Text(chunk) => {
1730 thread.received_chunk();
1731
1732 cx.emit(ThreadEvent::ReceivedTextChunk);
1733 if let Some(last_message) = thread.messages.last_mut() {
1734 if last_message.role == Role::Assistant
1735 && !thread.tool_use.has_tool_results(last_message.id)
1736 {
1737 last_message.push_text(&chunk);
1738 cx.emit(ThreadEvent::StreamedAssistantText(
1739 last_message.id,
1740 chunk,
1741 ));
1742 } else {
1743 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1744 // of a new Assistant response.
1745 //
1746 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1747 // will result in duplicating the text of the chunk in the rendered Markdown.
1748 request_assistant_message_id =
1749 Some(thread.insert_assistant_message(
1750 vec![MessageSegment::Text(chunk.to_string())],
1751 cx,
1752 ));
1753 };
1754 }
1755 }
1756 LanguageModelCompletionEvent::Thinking {
1757 text: chunk,
1758 signature,
1759 } => {
1760 thread.received_chunk();
1761
1762 if let Some(last_message) = thread.messages.last_mut() {
1763 if last_message.role == Role::Assistant
1764 && !thread.tool_use.has_tool_results(last_message.id)
1765 {
1766 last_message.push_thinking(&chunk, signature);
1767 cx.emit(ThreadEvent::StreamedAssistantThinking(
1768 last_message.id,
1769 chunk,
1770 ));
1771 } else {
1772 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1773 // of a new Assistant response.
1774 //
1775 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1776 // will result in duplicating the text of the chunk in the rendered Markdown.
1777 request_assistant_message_id =
1778 Some(thread.insert_assistant_message(
1779 vec![MessageSegment::Thinking {
1780 text: chunk.to_string(),
1781 signature,
1782 }],
1783 cx,
1784 ));
1785 };
1786 }
1787 }
1788 LanguageModelCompletionEvent::RedactedThinking { data } => {
1789 thread.received_chunk();
1790
1791 if let Some(last_message) = thread.messages.last_mut() {
1792 if last_message.role == Role::Assistant
1793 && !thread.tool_use.has_tool_results(last_message.id)
1794 {
1795 last_message.push_redacted_thinking(data);
1796 } else {
1797 request_assistant_message_id =
1798 Some(thread.insert_assistant_message(
1799 vec![MessageSegment::RedactedThinking(data)],
1800 cx,
1801 ));
1802 };
1803 }
1804 }
1805 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1806 let last_assistant_message_id = request_assistant_message_id
1807 .unwrap_or_else(|| {
1808 let new_assistant_message_id =
1809 thread.insert_assistant_message(vec![], cx);
1810 request_assistant_message_id =
1811 Some(new_assistant_message_id);
1812 new_assistant_message_id
1813 });
1814
1815 let tool_use_id = tool_use.id.clone();
1816 let streamed_input = if tool_use.is_input_complete {
1817 None
1818 } else {
1819 Some((&tool_use.input).clone())
1820 };
1821
1822 let ui_text = thread.tool_use.request_tool_use(
1823 last_assistant_message_id,
1824 tool_use,
1825 tool_use_metadata.clone(),
1826 cx,
1827 );
1828
1829 if let Some(input) = streamed_input {
1830 cx.emit(ThreadEvent::StreamedToolUse {
1831 tool_use_id,
1832 ui_text,
1833 input,
1834 });
1835 }
1836 }
1837 LanguageModelCompletionEvent::ToolUseJsonParseError {
1838 id,
1839 tool_name,
1840 raw_input: invalid_input_json,
1841 json_parse_error,
1842 } => {
1843 thread.receive_invalid_tool_json(
1844 id,
1845 tool_name,
1846 invalid_input_json,
1847 json_parse_error,
1848 window,
1849 cx,
1850 );
1851 }
1852 LanguageModelCompletionEvent::StatusUpdate(status_update) => {
1853 if let Some(completion) = thread
1854 .pending_completions
1855 .iter_mut()
1856 .find(|completion| completion.id == pending_completion_id)
1857 {
1858 match status_update {
1859 CompletionRequestStatus::Queued { position } => {
1860 completion.queue_state =
1861 QueueState::Queued { position };
1862 }
1863 CompletionRequestStatus::Started => {
1864 completion.queue_state = QueueState::Started;
1865 }
1866 CompletionRequestStatus::Failed {
1867 code,
1868 message,
1869 request_id: _,
1870 retry_after,
1871 } => {
1872 return Err(
1873 LanguageModelCompletionError::from_cloud_failure(
1874 model.upstream_provider_name(),
1875 code,
1876 message,
1877 retry_after.map(Duration::from_secs_f64),
1878 ),
1879 );
1880 }
1881 CompletionRequestStatus::UsageUpdated { amount, limit } => {
1882 thread.update_model_request_usage(
1883 amount as u32,
1884 limit,
1885 cx,
1886 );
1887 }
1888 CompletionRequestStatus::ToolUseLimitReached => {
1889 thread.tool_use_limit_reached = true;
1890 cx.emit(ThreadEvent::ToolUseLimitReached);
1891 }
1892 }
1893 }
1894 }
1895 }
1896
1897 thread.touch_updated_at();
1898 cx.emit(ThreadEvent::StreamedCompletion);
1899 cx.notify();
1900
1901 thread.auto_capture_telemetry(cx);
1902 Ok(())
1903 })??;
1904
1905 smol::future::yield_now().await;
1906 }
1907
1908 thread.update(cx, |thread, cx| {
1909 thread.last_received_chunk_at = None;
1910 thread
1911 .pending_completions
1912 .retain(|completion| completion.id != pending_completion_id);
1913
1914 // If there is a response without tool use, summarize the message. Otherwise,
1915 // allow two tool uses before summarizing.
1916 if matches!(thread.summary, ThreadSummary::Pending)
1917 && thread.messages.len() >= 2
1918 && (!thread.has_pending_tool_uses() || thread.messages.len() >= 6)
1919 {
1920 thread.summarize(cx);
1921 }
1922 })?;
1923
1924 anyhow::Ok(stop_reason)
1925 };
1926
1927 let result = stream_completion.await;
1928 let mut retry_scheduled = false;
1929
1930 thread
1931 .update(cx, |thread, cx| {
1932 thread.finalize_pending_checkpoint(cx);
1933 match result.as_ref() {
1934 Ok(stop_reason) => {
1935 match stop_reason {
1936 StopReason::ToolUse => {
1937 let tool_uses =
1938 thread.use_pending_tools(window, model.clone(), cx);
1939 cx.emit(ThreadEvent::UsePendingTools { tool_uses });
1940 }
1941 StopReason::EndTurn | StopReason::MaxTokens => {
1942 thread.project.update(cx, |project, cx| {
1943 project.set_agent_location(None, cx);
1944 });
1945 }
1946 StopReason::Refusal => {
1947 thread.project.update(cx, |project, cx| {
1948 project.set_agent_location(None, cx);
1949 });
1950
1951 // Remove the turn that was refused.
1952 //
1953 // https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/handle-streaming-refusals#reset-context-after-refusal
1954 {
1955 let mut messages_to_remove = Vec::new();
1956
1957 for (ix, message) in
1958 thread.messages.iter().enumerate().rev()
1959 {
1960 messages_to_remove.push(message.id);
1961
1962 if message.role == Role::User {
1963 if ix == 0 {
1964 break;
1965 }
1966
1967 if let Some(prev_message) =
1968 thread.messages.get(ix - 1)
1969 {
1970 if prev_message.role == Role::Assistant {
1971 break;
1972 }
1973 }
1974 }
1975 }
1976
1977 for message_id in messages_to_remove {
1978 thread.delete_message(message_id, cx);
1979 }
1980 }
1981
1982 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1983 header: "Language model refusal".into(),
1984 message:
1985 "Model refused to generate content for safety reasons."
1986 .into(),
1987 }));
1988 }
1989 }
1990
1991 // We successfully completed, so cancel any remaining retries.
1992 thread.retry_state = None;
1993 }
1994 Err(error) => {
1995 thread.project.update(cx, |project, cx| {
1996 project.set_agent_location(None, cx);
1997 });
1998
1999 if error.is::<PaymentRequiredError>() {
2000 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
2001 } else if let Some(error) =
2002 error.downcast_ref::<ModelRequestLimitReachedError>()
2003 {
2004 cx.emit(ThreadEvent::ShowError(
2005 ThreadError::ModelRequestLimitReached { plan: error.plan },
2006 ));
2007 } else if let Some(completion_error) =
2008 error.downcast_ref::<LanguageModelCompletionError>()
2009 {
2010 match &completion_error {
2011 LanguageModelCompletionError::PromptTooLarge {
2012 tokens, ..
2013 } => {
2014 let tokens = tokens.unwrap_or_else(|| {
2015 // We didn't get an exact token count from the API, so fall back on our estimate.
2016 thread
2017 .total_token_usage()
2018 .map(|usage| usage.total)
2019 .unwrap_or(0)
2020 // We know the context window was exceeded in practice, so if our estimate was
2021 // lower than max tokens, the estimate was wrong; return that we exceeded by 1.
2022 .max(
2023 model
2024 .max_token_count_for_mode(completion_mode)
2025 .saturating_add(1),
2026 )
2027 });
2028 thread.exceeded_window_error = Some(ExceededWindowError {
2029 model_id: model.id(),
2030 token_count: tokens,
2031 });
2032 cx.notify();
2033 }
2034 _ => {
2035 if let Some(retry_strategy) =
2036 Thread::get_retry_strategy(completion_error)
2037 {
2038 retry_scheduled = thread
2039 .handle_retryable_error_with_delay(
2040 &completion_error,
2041 Some(retry_strategy),
2042 model.clone(),
2043 intent,
2044 window,
2045 cx,
2046 );
2047 }
2048 }
2049 }
2050 }
2051
2052 if !retry_scheduled {
2053 thread.cancel_last_completion(window, cx);
2054 }
2055 }
2056 }
2057
2058 if !retry_scheduled {
2059 cx.emit(ThreadEvent::Stopped(result.map_err(Arc::new)));
2060 }
2061
2062 if let Some((request_callback, (request, response_events))) = thread
2063 .request_callback
2064 .as_mut()
2065 .zip(request_callback_parameters.as_ref())
2066 {
2067 request_callback(request, response_events);
2068 }
2069
2070 thread.auto_capture_telemetry(cx);
2071
2072 if let Ok(initial_usage) = initial_token_usage {
2073 let usage = thread.cumulative_token_usage - initial_usage;
2074
2075 telemetry::event!(
2076 "Assistant Thread Completion",
2077 thread_id = thread.id().to_string(),
2078 prompt_id = prompt_id,
2079 model = model.telemetry_id(),
2080 model_provider = model.provider_id().to_string(),
2081 input_tokens = usage.input_tokens,
2082 output_tokens = usage.output_tokens,
2083 cache_creation_input_tokens = usage.cache_creation_input_tokens,
2084 cache_read_input_tokens = usage.cache_read_input_tokens,
2085 );
2086 }
2087 })
2088 .ok();
2089 });
2090
2091 self.pending_completions.push(PendingCompletion {
2092 id: pending_completion_id,
2093 queue_state: QueueState::Sending,
2094 _task: task,
2095 });
2096 }
2097
2098 pub fn summarize(&mut self, cx: &mut Context<Self>) {
2099 let Some(model) = LanguageModelRegistry::read_global(cx).thread_summary_model() else {
2100 println!("No thread summary model");
2101 return;
2102 };
2103
2104 if !model.provider.is_authenticated(cx) {
2105 return;
2106 }
2107
2108 let added_user_message = include_str!("./prompts/summarize_thread_prompt.txt");
2109
2110 let request = self.to_summarize_request(
2111 &model.model,
2112 CompletionIntent::ThreadSummarization,
2113 added_user_message.into(),
2114 cx,
2115 );
2116
2117 self.summary = ThreadSummary::Generating;
2118
2119 self.pending_summary = cx.spawn(async move |this, cx| {
2120 let result = async {
2121 let mut messages = model.model.stream_completion(request, &cx).await?;
2122
2123 let mut new_summary = String::new();
2124 while let Some(event) = messages.next().await {
2125 let Ok(event) = event else {
2126 continue;
2127 };
2128 let text = match event {
2129 LanguageModelCompletionEvent::Text(text) => text,
2130 LanguageModelCompletionEvent::StatusUpdate(
2131 CompletionRequestStatus::UsageUpdated { amount, limit },
2132 ) => {
2133 this.update(cx, |thread, cx| {
2134 thread.update_model_request_usage(amount as u32, limit, cx);
2135 })?;
2136 continue;
2137 }
2138 _ => continue,
2139 };
2140
2141 let mut lines = text.lines();
2142 new_summary.extend(lines.next());
2143
2144 // Stop if the LLM generated multiple lines.
2145 if lines.next().is_some() {
2146 break;
2147 }
2148 }
2149
2150 anyhow::Ok(new_summary)
2151 }
2152 .await;
2153
2154 this.update(cx, |this, cx| {
2155 match result {
2156 Ok(new_summary) => {
2157 if new_summary.is_empty() {
2158 this.summary = ThreadSummary::Error;
2159 } else {
2160 this.summary = ThreadSummary::Ready(new_summary.into());
2161 }
2162 }
2163 Err(err) => {
2164 this.summary = ThreadSummary::Error;
2165 log::error!("Failed to generate thread summary: {}", err);
2166 }
2167 }
2168 cx.emit(ThreadEvent::SummaryGenerated);
2169 })
2170 .log_err()?;
2171
2172 Some(())
2173 });
2174 }
2175
2176 fn get_retry_strategy(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2177 use LanguageModelCompletionError::*;
2178
2179 // General strategy here:
2180 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2181 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), try multiple times with exponential backoff.
2182 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), just retry once.
2183 match error {
2184 HttpResponseError {
2185 status_code: StatusCode::TOO_MANY_REQUESTS,
2186 ..
2187 } => Some(RetryStrategy::ExponentialBackoff {
2188 initial_delay: BASE_RETRY_DELAY,
2189 max_attempts: MAX_RETRY_ATTEMPTS,
2190 }),
2191 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2192 Some(RetryStrategy::Fixed {
2193 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2194 max_attempts: MAX_RETRY_ATTEMPTS,
2195 })
2196 }
2197 UpstreamProviderError {
2198 status,
2199 retry_after,
2200 ..
2201 } => match *status {
2202 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2203 Some(RetryStrategy::Fixed {
2204 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2205 max_attempts: MAX_RETRY_ATTEMPTS,
2206 })
2207 }
2208 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2209 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2210 // Internal Server Error could be anything, so only retry once.
2211 max_attempts: 1,
2212 }),
2213 status => {
2214 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2215 // but we frequently get them in practice. See https://http.dev/529
2216 if status.as_u16() == 529 {
2217 Some(RetryStrategy::Fixed {
2218 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2219 max_attempts: MAX_RETRY_ATTEMPTS,
2220 })
2221 } else {
2222 None
2223 }
2224 }
2225 },
2226 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2227 delay: BASE_RETRY_DELAY,
2228 max_attempts: 1,
2229 }),
2230 ApiReadResponseError { .. }
2231 | HttpSend { .. }
2232 | DeserializeResponse { .. }
2233 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2234 delay: BASE_RETRY_DELAY,
2235 max_attempts: 1,
2236 }),
2237 // Retrying these errors definitely shouldn't help.
2238 HttpResponseError {
2239 status_code:
2240 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2241 ..
2242 }
2243 | SerializeRequest { .. }
2244 | BuildRequestBody { .. }
2245 | PromptTooLarge { .. }
2246 | AuthenticationError { .. }
2247 | PermissionError { .. }
2248 | ApiEndpointNotFound { .. }
2249 | NoApiKey { .. } => None,
2250 // Retry all other 4xx and 5xx errors once.
2251 HttpResponseError { status_code, .. }
2252 if status_code.is_client_error() || status_code.is_server_error() =>
2253 {
2254 Some(RetryStrategy::Fixed {
2255 delay: BASE_RETRY_DELAY,
2256 max_attempts: 1,
2257 })
2258 }
2259 // Conservatively assume that any other errors are non-retryable
2260 HttpResponseError { .. } | Other(..) => None,
2261 }
2262 }
2263
2264 fn handle_retryable_error_with_delay(
2265 &mut self,
2266 error: &LanguageModelCompletionError,
2267 strategy: Option<RetryStrategy>,
2268 model: Arc<dyn LanguageModel>,
2269 intent: CompletionIntent,
2270 window: Option<AnyWindowHandle>,
2271 cx: &mut Context<Self>,
2272 ) -> bool {
2273 // Store context for the Retry button
2274 self.last_error_context = Some((model.clone(), intent));
2275
2276 // Only auto-retry if Burn Mode is enabled
2277 if self.completion_mode != CompletionMode::Burn {
2278 // Show error with retry options
2279 cx.emit(ThreadEvent::ShowError(ThreadError::RetryableError {
2280 message: format!(
2281 "{}\n\nTo automatically retry when similar errors happen, enable Burn Mode.",
2282 error
2283 )
2284 .into(),
2285 can_enable_burn_mode: true,
2286 }));
2287 return false;
2288 }
2289
2290 let Some(strategy) = strategy.or_else(|| Self::get_retry_strategy(error)) else {
2291 return false;
2292 };
2293
2294 let max_attempts = match &strategy {
2295 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
2296 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
2297 };
2298
2299 let retry_state = self.retry_state.get_or_insert(RetryState {
2300 attempt: 0,
2301 max_attempts,
2302 intent,
2303 });
2304
2305 retry_state.attempt += 1;
2306 let attempt = retry_state.attempt;
2307 let max_attempts = retry_state.max_attempts;
2308 let intent = retry_state.intent;
2309
2310 if attempt <= max_attempts {
2311 let delay = match &strategy {
2312 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
2313 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
2314 Duration::from_secs(delay_secs)
2315 }
2316 RetryStrategy::Fixed { delay, .. } => *delay,
2317 };
2318
2319 // Add a transient message to inform the user
2320 let delay_secs = delay.as_secs();
2321 let retry_message = if max_attempts == 1 {
2322 format!("{error}. Retrying in {delay_secs} seconds...")
2323 } else {
2324 format!(
2325 "{error}. Retrying (attempt {attempt} of {max_attempts}) \
2326 in {delay_secs} seconds..."
2327 )
2328 };
2329 log::warn!(
2330 "Retrying completion request (attempt {attempt} of {max_attempts}) \
2331 in {delay_secs} seconds: {error:?}",
2332 );
2333
2334 // Add a UI-only message instead of a regular message
2335 let id = self.next_message_id.post_inc();
2336 self.messages.push(Message {
2337 id,
2338 role: Role::System,
2339 segments: vec![MessageSegment::Text(retry_message)],
2340 loaded_context: LoadedContext::default(),
2341 creases: Vec::new(),
2342 is_hidden: false,
2343 ui_only: true,
2344 });
2345 cx.emit(ThreadEvent::MessageAdded(id));
2346
2347 // Schedule the retry
2348 let thread_handle = cx.entity().downgrade();
2349
2350 cx.spawn(async move |_thread, cx| {
2351 cx.background_executor().timer(delay).await;
2352
2353 thread_handle
2354 .update(cx, |thread, cx| {
2355 // Retry the completion
2356 thread.send_to_model(model, intent, window, cx);
2357 })
2358 .log_err();
2359 })
2360 .detach();
2361
2362 true
2363 } else {
2364 // Max retries exceeded
2365 self.retry_state = None;
2366
2367 // Stop generating since we're giving up on retrying.
2368 self.pending_completions.clear();
2369
2370 // Show error alongside a Retry button, but no
2371 // Enable Burn Mode button (since it's already enabled)
2372 cx.emit(ThreadEvent::ShowError(ThreadError::RetryableError {
2373 message: format!("Failed after retrying: {}", error).into(),
2374 can_enable_burn_mode: false,
2375 }));
2376
2377 false
2378 }
2379 }
2380
2381 pub fn start_generating_detailed_summary_if_needed(
2382 &mut self,
2383 thread_store: WeakEntity<ThreadStore>,
2384 cx: &mut Context<Self>,
2385 ) {
2386 let Some(last_message_id) = self.messages.last().map(|message| message.id) else {
2387 return;
2388 };
2389
2390 match &*self.detailed_summary_rx.borrow() {
2391 DetailedSummaryState::Generating { message_id, .. }
2392 | DetailedSummaryState::Generated { message_id, .. }
2393 if *message_id == last_message_id =>
2394 {
2395 // Already up-to-date
2396 return;
2397 }
2398 _ => {}
2399 }
2400
2401 let Some(ConfiguredModel { model, provider }) =
2402 LanguageModelRegistry::read_global(cx).thread_summary_model()
2403 else {
2404 return;
2405 };
2406
2407 if !provider.is_authenticated(cx) {
2408 return;
2409 }
2410
2411 let added_user_message = include_str!("./prompts/summarize_thread_detailed_prompt.txt");
2412
2413 let request = self.to_summarize_request(
2414 &model,
2415 CompletionIntent::ThreadContextSummarization,
2416 added_user_message.into(),
2417 cx,
2418 );
2419
2420 *self.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generating {
2421 message_id: last_message_id,
2422 };
2423
2424 // Replace the detailed summarization task if there is one, cancelling it. It would probably
2425 // be better to allow the old task to complete, but this would require logic for choosing
2426 // which result to prefer (the old task could complete after the new one, resulting in a
2427 // stale summary).
2428 self.detailed_summary_task = cx.spawn(async move |thread, cx| {
2429 let stream = model.stream_completion_text(request, &cx);
2430 let Some(mut messages) = stream.await.log_err() else {
2431 thread
2432 .update(cx, |thread, _cx| {
2433 *thread.detailed_summary_tx.borrow_mut() =
2434 DetailedSummaryState::NotGenerated;
2435 })
2436 .ok()?;
2437 return None;
2438 };
2439
2440 let mut new_detailed_summary = String::new();
2441
2442 while let Some(chunk) = messages.stream.next().await {
2443 if let Some(chunk) = chunk.log_err() {
2444 new_detailed_summary.push_str(&chunk);
2445 }
2446 }
2447
2448 thread
2449 .update(cx, |thread, _cx| {
2450 *thread.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generated {
2451 text: new_detailed_summary.into(),
2452 message_id: last_message_id,
2453 };
2454 })
2455 .ok()?;
2456
2457 // Save thread so its summary can be reused later
2458 if let Some(thread) = thread.upgrade() {
2459 if let Ok(Ok(save_task)) = cx.update(|cx| {
2460 thread_store
2461 .update(cx, |thread_store, cx| thread_store.save_thread(&thread, cx))
2462 }) {
2463 save_task.await.log_err();
2464 }
2465 }
2466
2467 Some(())
2468 });
2469 }
2470
2471 pub async fn wait_for_detailed_summary_or_text(
2472 this: &Entity<Self>,
2473 cx: &mut AsyncApp,
2474 ) -> Option<SharedString> {
2475 let mut detailed_summary_rx = this
2476 .read_with(cx, |this, _cx| this.detailed_summary_rx.clone())
2477 .ok()?;
2478 loop {
2479 match detailed_summary_rx.recv().await? {
2480 DetailedSummaryState::Generating { .. } => {}
2481 DetailedSummaryState::NotGenerated => {
2482 return this.read_with(cx, |this, _cx| this.text().into()).ok();
2483 }
2484 DetailedSummaryState::Generated { text, .. } => return Some(text),
2485 }
2486 }
2487 }
2488
2489 pub fn latest_detailed_summary_or_text(&self) -> SharedString {
2490 self.detailed_summary_rx
2491 .borrow()
2492 .text()
2493 .unwrap_or_else(|| self.text().into())
2494 }
2495
2496 pub fn is_generating_detailed_summary(&self) -> bool {
2497 matches!(
2498 &*self.detailed_summary_rx.borrow(),
2499 DetailedSummaryState::Generating { .. }
2500 )
2501 }
2502
2503 pub fn use_pending_tools(
2504 &mut self,
2505 window: Option<AnyWindowHandle>,
2506 model: Arc<dyn LanguageModel>,
2507 cx: &mut Context<Self>,
2508 ) -> Vec<PendingToolUse> {
2509 self.auto_capture_telemetry(cx);
2510 let request =
2511 Arc::new(self.to_completion_request(model.clone(), CompletionIntent::ToolResults, cx));
2512 let pending_tool_uses = self
2513 .tool_use
2514 .pending_tool_uses()
2515 .into_iter()
2516 .filter(|tool_use| tool_use.status.is_idle())
2517 .cloned()
2518 .collect::<Vec<_>>();
2519
2520 for tool_use in pending_tool_uses.iter() {
2521 self.use_pending_tool(tool_use.clone(), request.clone(), model.clone(), window, cx);
2522 }
2523
2524 pending_tool_uses
2525 }
2526
2527 fn use_pending_tool(
2528 &mut self,
2529 tool_use: PendingToolUse,
2530 request: Arc<LanguageModelRequest>,
2531 model: Arc<dyn LanguageModel>,
2532 window: Option<AnyWindowHandle>,
2533 cx: &mut Context<Self>,
2534 ) {
2535 let Some(tool) = self.tools.read(cx).tool(&tool_use.name, cx) else {
2536 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2537 };
2538
2539 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
2540 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2541 }
2542
2543 if tool.needs_confirmation(&tool_use.input, cx)
2544 && !AgentSettings::get_global(cx).always_allow_tool_actions
2545 {
2546 self.tool_use.confirm_tool_use(
2547 tool_use.id,
2548 tool_use.ui_text,
2549 tool_use.input,
2550 request,
2551 tool,
2552 );
2553 cx.emit(ThreadEvent::ToolConfirmationNeeded);
2554 } else {
2555 self.run_tool(
2556 tool_use.id,
2557 tool_use.ui_text,
2558 tool_use.input,
2559 request,
2560 tool,
2561 model,
2562 window,
2563 cx,
2564 );
2565 }
2566 }
2567
2568 pub fn handle_hallucinated_tool_use(
2569 &mut self,
2570 tool_use_id: LanguageModelToolUseId,
2571 hallucinated_tool_name: Arc<str>,
2572 window: Option<AnyWindowHandle>,
2573 cx: &mut Context<Thread>,
2574 ) {
2575 let available_tools = self.profile.enabled_tools(cx);
2576
2577 let tool_list = available_tools
2578 .iter()
2579 .map(|(name, tool)| format!("- {}: {}", name, tool.description()))
2580 .collect::<Vec<_>>()
2581 .join("\n");
2582
2583 let error_message = format!(
2584 "The tool '{}' doesn't exist or is not enabled. Available tools:\n{}",
2585 hallucinated_tool_name, tool_list
2586 );
2587
2588 let pending_tool_use = self.tool_use.insert_tool_output(
2589 tool_use_id.clone(),
2590 hallucinated_tool_name,
2591 Err(anyhow!("Missing tool call: {error_message}")),
2592 self.configured_model.as_ref(),
2593 self.completion_mode,
2594 );
2595
2596 cx.emit(ThreadEvent::MissingToolUse {
2597 tool_use_id: tool_use_id.clone(),
2598 ui_text: error_message.into(),
2599 });
2600
2601 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2602 }
2603
2604 pub fn receive_invalid_tool_json(
2605 &mut self,
2606 tool_use_id: LanguageModelToolUseId,
2607 tool_name: Arc<str>,
2608 invalid_json: Arc<str>,
2609 error: String,
2610 window: Option<AnyWindowHandle>,
2611 cx: &mut Context<Thread>,
2612 ) {
2613 log::error!("The model returned invalid input JSON: {invalid_json}");
2614
2615 let pending_tool_use = self.tool_use.insert_tool_output(
2616 tool_use_id.clone(),
2617 tool_name,
2618 Err(anyhow!("Error parsing input JSON: {error}")),
2619 self.configured_model.as_ref(),
2620 self.completion_mode,
2621 );
2622 let ui_text = if let Some(pending_tool_use) = &pending_tool_use {
2623 pending_tool_use.ui_text.clone()
2624 } else {
2625 log::error!(
2626 "There was no pending tool use for tool use {tool_use_id}, even though it finished (with invalid input JSON)."
2627 );
2628 format!("Unknown tool {}", tool_use_id).into()
2629 };
2630
2631 cx.emit(ThreadEvent::InvalidToolInput {
2632 tool_use_id: tool_use_id.clone(),
2633 ui_text,
2634 invalid_input_json: invalid_json,
2635 });
2636
2637 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2638 }
2639
2640 pub fn run_tool(
2641 &mut self,
2642 tool_use_id: LanguageModelToolUseId,
2643 ui_text: impl Into<SharedString>,
2644 input: serde_json::Value,
2645 request: Arc<LanguageModelRequest>,
2646 tool: Arc<dyn Tool>,
2647 model: Arc<dyn LanguageModel>,
2648 window: Option<AnyWindowHandle>,
2649 cx: &mut Context<Thread>,
2650 ) {
2651 let task =
2652 self.spawn_tool_use(tool_use_id.clone(), request, input, tool, model, window, cx);
2653 self.tool_use
2654 .run_pending_tool(tool_use_id, ui_text.into(), task);
2655 }
2656
2657 fn spawn_tool_use(
2658 &mut self,
2659 tool_use_id: LanguageModelToolUseId,
2660 request: Arc<LanguageModelRequest>,
2661 input: serde_json::Value,
2662 tool: Arc<dyn Tool>,
2663 model: Arc<dyn LanguageModel>,
2664 window: Option<AnyWindowHandle>,
2665 cx: &mut Context<Thread>,
2666 ) -> Task<()> {
2667 let tool_name: Arc<str> = tool.name().into();
2668
2669 let tool_result = tool.run(
2670 input,
2671 request,
2672 self.project.clone(),
2673 self.action_log.clone(),
2674 model,
2675 window,
2676 cx,
2677 );
2678
2679 // Store the card separately if it exists
2680 if let Some(card) = tool_result.card.clone() {
2681 self.tool_use
2682 .insert_tool_result_card(tool_use_id.clone(), card);
2683 }
2684
2685 cx.spawn({
2686 async move |thread: WeakEntity<Thread>, cx| {
2687 let output = tool_result.output.await;
2688
2689 thread
2690 .update(cx, |thread, cx| {
2691 let pending_tool_use = thread.tool_use.insert_tool_output(
2692 tool_use_id.clone(),
2693 tool_name,
2694 output,
2695 thread.configured_model.as_ref(),
2696 thread.completion_mode,
2697 );
2698 thread.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2699 })
2700 .ok();
2701 }
2702 })
2703 }
2704
2705 fn tool_finished(
2706 &mut self,
2707 tool_use_id: LanguageModelToolUseId,
2708 pending_tool_use: Option<PendingToolUse>,
2709 canceled: bool,
2710 window: Option<AnyWindowHandle>,
2711 cx: &mut Context<Self>,
2712 ) {
2713 if self.all_tools_finished() {
2714 if let Some(ConfiguredModel { model, .. }) = self.configured_model.as_ref() {
2715 if !canceled {
2716 self.send_to_model(model.clone(), CompletionIntent::ToolResults, window, cx);
2717 }
2718 self.auto_capture_telemetry(cx);
2719 }
2720 }
2721
2722 cx.emit(ThreadEvent::ToolFinished {
2723 tool_use_id,
2724 pending_tool_use,
2725 });
2726 }
2727
2728 /// Cancels the last pending completion, if there are any pending.
2729 ///
2730 /// Returns whether a completion was canceled.
2731 pub fn cancel_last_completion(
2732 &mut self,
2733 window: Option<AnyWindowHandle>,
2734 cx: &mut Context<Self>,
2735 ) -> bool {
2736 let mut canceled = self.pending_completions.pop().is_some() || self.retry_state.is_some();
2737
2738 self.retry_state = None;
2739
2740 for pending_tool_use in self.tool_use.cancel_pending() {
2741 canceled = true;
2742 self.tool_finished(
2743 pending_tool_use.id.clone(),
2744 Some(pending_tool_use),
2745 true,
2746 window,
2747 cx,
2748 );
2749 }
2750
2751 if canceled {
2752 cx.emit(ThreadEvent::CompletionCanceled);
2753
2754 // When canceled, we always want to insert the checkpoint.
2755 // (We skip over finalize_pending_checkpoint, because it
2756 // would conclude we didn't have anything to insert here.)
2757 if let Some(checkpoint) = self.pending_checkpoint.take() {
2758 self.insert_checkpoint(checkpoint, cx);
2759 }
2760 } else {
2761 self.finalize_pending_checkpoint(cx);
2762 }
2763
2764 canceled
2765 }
2766
2767 /// Signals that any in-progress editing should be canceled.
2768 ///
2769 /// This method is used to notify listeners (like ActiveThread) that
2770 /// they should cancel any editing operations.
2771 pub fn cancel_editing(&mut self, cx: &mut Context<Self>) {
2772 cx.emit(ThreadEvent::CancelEditing);
2773 }
2774
2775 pub fn feedback(&self) -> Option<ThreadFeedback> {
2776 self.feedback
2777 }
2778
2779 pub fn message_feedback(&self, message_id: MessageId) -> Option<ThreadFeedback> {
2780 self.message_feedback.get(&message_id).copied()
2781 }
2782
2783 pub fn report_message_feedback(
2784 &mut self,
2785 message_id: MessageId,
2786 feedback: ThreadFeedback,
2787 cx: &mut Context<Self>,
2788 ) -> Task<Result<()>> {
2789 if self.message_feedback.get(&message_id) == Some(&feedback) {
2790 return Task::ready(Ok(()));
2791 }
2792
2793 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2794 let serialized_thread = self.serialize(cx);
2795 let thread_id = self.id().clone();
2796 let client = self.project.read(cx).client();
2797
2798 let enabled_tool_names: Vec<String> = self
2799 .profile
2800 .enabled_tools(cx)
2801 .iter()
2802 .map(|(name, _)| name.clone().into())
2803 .collect();
2804
2805 self.message_feedback.insert(message_id, feedback);
2806
2807 cx.notify();
2808
2809 let message_content = self
2810 .message(message_id)
2811 .map(|msg| msg.to_string())
2812 .unwrap_or_default();
2813
2814 cx.background_spawn(async move {
2815 let final_project_snapshot = final_project_snapshot.await;
2816 let serialized_thread = serialized_thread.await?;
2817 let thread_data =
2818 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
2819
2820 let rating = match feedback {
2821 ThreadFeedback::Positive => "positive",
2822 ThreadFeedback::Negative => "negative",
2823 };
2824 telemetry::event!(
2825 "Assistant Thread Rated",
2826 rating,
2827 thread_id,
2828 enabled_tool_names,
2829 message_id = message_id.0,
2830 message_content,
2831 thread_data,
2832 final_project_snapshot
2833 );
2834 client.telemetry().flush_events().await;
2835
2836 Ok(())
2837 })
2838 }
2839
2840 pub fn report_feedback(
2841 &mut self,
2842 feedback: ThreadFeedback,
2843 cx: &mut Context<Self>,
2844 ) -> Task<Result<()>> {
2845 let last_assistant_message_id = self
2846 .messages
2847 .iter()
2848 .rev()
2849 .find(|msg| msg.role == Role::Assistant)
2850 .map(|msg| msg.id);
2851
2852 if let Some(message_id) = last_assistant_message_id {
2853 self.report_message_feedback(message_id, feedback, cx)
2854 } else {
2855 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2856 let serialized_thread = self.serialize(cx);
2857 let thread_id = self.id().clone();
2858 let client = self.project.read(cx).client();
2859 self.feedback = Some(feedback);
2860 cx.notify();
2861
2862 cx.background_spawn(async move {
2863 let final_project_snapshot = final_project_snapshot.await;
2864 let serialized_thread = serialized_thread.await?;
2865 let thread_data = serde_json::to_value(serialized_thread)
2866 .unwrap_or_else(|_| serde_json::Value::Null);
2867
2868 let rating = match feedback {
2869 ThreadFeedback::Positive => "positive",
2870 ThreadFeedback::Negative => "negative",
2871 };
2872 telemetry::event!(
2873 "Assistant Thread Rated",
2874 rating,
2875 thread_id,
2876 thread_data,
2877 final_project_snapshot
2878 );
2879 client.telemetry().flush_events().await;
2880
2881 Ok(())
2882 })
2883 }
2884 }
2885
2886 /// Create a snapshot of the current project state including git information and unsaved buffers.
2887 fn project_snapshot(
2888 project: Entity<Project>,
2889 cx: &mut Context<Self>,
2890 ) -> Task<Arc<ProjectSnapshot>> {
2891 let git_store = project.read(cx).git_store().clone();
2892 let worktree_snapshots: Vec<_> = project
2893 .read(cx)
2894 .visible_worktrees(cx)
2895 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
2896 .collect();
2897
2898 cx.spawn(async move |_, cx| {
2899 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
2900
2901 let mut unsaved_buffers = Vec::new();
2902 cx.update(|app_cx| {
2903 let buffer_store = project.read(app_cx).buffer_store();
2904 for buffer_handle in buffer_store.read(app_cx).buffers() {
2905 let buffer = buffer_handle.read(app_cx);
2906 if buffer.is_dirty() {
2907 if let Some(file) = buffer.file() {
2908 let path = file.path().to_string_lossy().to_string();
2909 unsaved_buffers.push(path);
2910 }
2911 }
2912 }
2913 })
2914 .ok();
2915
2916 Arc::new(ProjectSnapshot {
2917 worktree_snapshots,
2918 unsaved_buffer_paths: unsaved_buffers,
2919 timestamp: Utc::now(),
2920 })
2921 })
2922 }
2923
2924 fn worktree_snapshot(
2925 worktree: Entity<project::Worktree>,
2926 git_store: Entity<GitStore>,
2927 cx: &App,
2928 ) -> Task<WorktreeSnapshot> {
2929 cx.spawn(async move |cx| {
2930 // Get worktree path and snapshot
2931 let worktree_info = cx.update(|app_cx| {
2932 let worktree = worktree.read(app_cx);
2933 let path = worktree.abs_path().to_string_lossy().to_string();
2934 let snapshot = worktree.snapshot();
2935 (path, snapshot)
2936 });
2937
2938 let Ok((worktree_path, _snapshot)) = worktree_info else {
2939 return WorktreeSnapshot {
2940 worktree_path: String::new(),
2941 git_state: None,
2942 };
2943 };
2944
2945 let git_state = git_store
2946 .update(cx, |git_store, cx| {
2947 git_store
2948 .repositories()
2949 .values()
2950 .find(|repo| {
2951 repo.read(cx)
2952 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
2953 .is_some()
2954 })
2955 .cloned()
2956 })
2957 .ok()
2958 .flatten()
2959 .map(|repo| {
2960 repo.update(cx, |repo, _| {
2961 let current_branch =
2962 repo.branch.as_ref().map(|branch| branch.name().to_owned());
2963 repo.send_job(None, |state, _| async move {
2964 let RepositoryState::Local { backend, .. } = state else {
2965 return GitState {
2966 remote_url: None,
2967 head_sha: None,
2968 current_branch,
2969 diff: None,
2970 };
2971 };
2972
2973 let remote_url = backend.remote_url("origin");
2974 let head_sha = backend.head_sha().await;
2975 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
2976
2977 GitState {
2978 remote_url,
2979 head_sha,
2980 current_branch,
2981 diff,
2982 }
2983 })
2984 })
2985 });
2986
2987 let git_state = match git_state {
2988 Some(git_state) => match git_state.ok() {
2989 Some(git_state) => git_state.await.ok(),
2990 None => None,
2991 },
2992 None => None,
2993 };
2994
2995 WorktreeSnapshot {
2996 worktree_path,
2997 git_state,
2998 }
2999 })
3000 }
3001
3002 pub fn to_markdown(&self, cx: &App) -> Result<String> {
3003 let mut markdown = Vec::new();
3004
3005 let summary = self.summary().or_default();
3006 writeln!(markdown, "# {summary}\n")?;
3007
3008 for message in self.messages() {
3009 writeln!(
3010 markdown,
3011 "## {role}\n",
3012 role = match message.role {
3013 Role::User => "User",
3014 Role::Assistant => "Agent",
3015 Role::System => "System",
3016 }
3017 )?;
3018
3019 if !message.loaded_context.text.is_empty() {
3020 writeln!(markdown, "{}", message.loaded_context.text)?;
3021 }
3022
3023 if !message.loaded_context.images.is_empty() {
3024 writeln!(
3025 markdown,
3026 "\n{} images attached as context.\n",
3027 message.loaded_context.images.len()
3028 )?;
3029 }
3030
3031 for segment in &message.segments {
3032 match segment {
3033 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
3034 MessageSegment::Thinking { text, .. } => {
3035 writeln!(markdown, "<think>\n{}\n</think>\n", text)?
3036 }
3037 MessageSegment::RedactedThinking(_) => {}
3038 }
3039 }
3040
3041 for tool_use in self.tool_uses_for_message(message.id, cx) {
3042 writeln!(
3043 markdown,
3044 "**Use Tool: {} ({})**",
3045 tool_use.name, tool_use.id
3046 )?;
3047 writeln!(markdown, "```json")?;
3048 writeln!(
3049 markdown,
3050 "{}",
3051 serde_json::to_string_pretty(&tool_use.input)?
3052 )?;
3053 writeln!(markdown, "```")?;
3054 }
3055
3056 for tool_result in self.tool_results_for_message(message.id) {
3057 write!(markdown, "\n**Tool Results: {}", tool_result.tool_use_id)?;
3058 if tool_result.is_error {
3059 write!(markdown, " (Error)")?;
3060 }
3061
3062 writeln!(markdown, "**\n")?;
3063 match &tool_result.content {
3064 LanguageModelToolResultContent::Text(text) => {
3065 writeln!(markdown, "{text}")?;
3066 }
3067 LanguageModelToolResultContent::Image(image) => {
3068 writeln!(markdown, "", image.source)?;
3069 }
3070 }
3071
3072 if let Some(output) = tool_result.output.as_ref() {
3073 writeln!(
3074 markdown,
3075 "\n\nDebug Output:\n\n```json\n{}\n```\n",
3076 serde_json::to_string_pretty(output)?
3077 )?;
3078 }
3079 }
3080 }
3081
3082 Ok(String::from_utf8_lossy(&markdown).to_string())
3083 }
3084
3085 pub fn keep_edits_in_range(
3086 &mut self,
3087 buffer: Entity<language::Buffer>,
3088 buffer_range: Range<language::Anchor>,
3089 cx: &mut Context<Self>,
3090 ) {
3091 self.action_log.update(cx, |action_log, cx| {
3092 action_log.keep_edits_in_range(buffer, buffer_range, cx)
3093 });
3094 }
3095
3096 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
3097 self.action_log
3098 .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
3099 }
3100
3101 pub fn reject_edits_in_ranges(
3102 &mut self,
3103 buffer: Entity<language::Buffer>,
3104 buffer_ranges: Vec<Range<language::Anchor>>,
3105 cx: &mut Context<Self>,
3106 ) -> Task<Result<()>> {
3107 self.action_log.update(cx, |action_log, cx| {
3108 action_log.reject_edits_in_ranges(buffer, buffer_ranges, cx)
3109 })
3110 }
3111
3112 pub fn action_log(&self) -> &Entity<ActionLog> {
3113 &self.action_log
3114 }
3115
3116 pub fn project(&self) -> &Entity<Project> {
3117 &self.project
3118 }
3119
3120 pub fn auto_capture_telemetry(&mut self, cx: &mut Context<Self>) {
3121 if !cx.has_flag::<feature_flags::ThreadAutoCaptureFeatureFlag>() {
3122 return;
3123 }
3124
3125 let now = Instant::now();
3126 if let Some(last) = self.last_auto_capture_at {
3127 if now.duration_since(last).as_secs() < 10 {
3128 return;
3129 }
3130 }
3131
3132 self.last_auto_capture_at = Some(now);
3133
3134 let thread_id = self.id().clone();
3135 let github_login = self
3136 .project
3137 .read(cx)
3138 .user_store()
3139 .read(cx)
3140 .current_user()
3141 .map(|user| user.github_login.clone());
3142 let client = self.project.read(cx).client();
3143 let serialize_task = self.serialize(cx);
3144
3145 cx.background_executor()
3146 .spawn(async move {
3147 if let Ok(serialized_thread) = serialize_task.await {
3148 if let Ok(thread_data) = serde_json::to_value(serialized_thread) {
3149 telemetry::event!(
3150 "Agent Thread Auto-Captured",
3151 thread_id = thread_id.to_string(),
3152 thread_data = thread_data,
3153 auto_capture_reason = "tracked_user",
3154 github_login = github_login
3155 );
3156
3157 client.telemetry().flush_events().await;
3158 }
3159 }
3160 })
3161 .detach();
3162 }
3163
3164 pub fn cumulative_token_usage(&self) -> TokenUsage {
3165 self.cumulative_token_usage
3166 }
3167
3168 pub fn token_usage_up_to_message(&self, message_id: MessageId) -> TotalTokenUsage {
3169 let Some(model) = self.configured_model.as_ref() else {
3170 return TotalTokenUsage::default();
3171 };
3172
3173 let max = model
3174 .model
3175 .max_token_count_for_mode(self.completion_mode().into());
3176
3177 let index = self
3178 .messages
3179 .iter()
3180 .position(|msg| msg.id == message_id)
3181 .unwrap_or(0);
3182
3183 if index == 0 {
3184 return TotalTokenUsage { total: 0, max };
3185 }
3186
3187 let token_usage = &self
3188 .request_token_usage
3189 .get(index - 1)
3190 .cloned()
3191 .unwrap_or_default();
3192
3193 TotalTokenUsage {
3194 total: token_usage.total_tokens(),
3195 max,
3196 }
3197 }
3198
3199 pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
3200 let model = self.configured_model.as_ref()?;
3201
3202 let max = model
3203 .model
3204 .max_token_count_for_mode(self.completion_mode().into());
3205
3206 if let Some(exceeded_error) = &self.exceeded_window_error {
3207 if model.model.id() == exceeded_error.model_id {
3208 return Some(TotalTokenUsage {
3209 total: exceeded_error.token_count,
3210 max,
3211 });
3212 }
3213 }
3214
3215 let total = self
3216 .token_usage_at_last_message()
3217 .unwrap_or_default()
3218 .total_tokens();
3219
3220 Some(TotalTokenUsage { total, max })
3221 }
3222
3223 fn token_usage_at_last_message(&self) -> Option<TokenUsage> {
3224 self.request_token_usage
3225 .get(self.messages.len().saturating_sub(1))
3226 .or_else(|| self.request_token_usage.last())
3227 .cloned()
3228 }
3229
3230 fn update_token_usage_at_last_message(&mut self, token_usage: TokenUsage) {
3231 let placeholder = self.token_usage_at_last_message().unwrap_or_default();
3232 self.request_token_usage
3233 .resize(self.messages.len(), placeholder);
3234
3235 if let Some(last) = self.request_token_usage.last_mut() {
3236 *last = token_usage;
3237 }
3238 }
3239
3240 fn update_model_request_usage(&self, amount: u32, limit: UsageLimit, cx: &mut Context<Self>) {
3241 self.project.update(cx, |project, cx| {
3242 project.user_store().update(cx, |user_store, cx| {
3243 user_store.update_model_request_usage(
3244 ModelRequestUsage(RequestUsage {
3245 amount: amount as i32,
3246 limit,
3247 }),
3248 cx,
3249 )
3250 })
3251 });
3252 }
3253
3254 pub fn deny_tool_use(
3255 &mut self,
3256 tool_use_id: LanguageModelToolUseId,
3257 tool_name: Arc<str>,
3258 window: Option<AnyWindowHandle>,
3259 cx: &mut Context<Self>,
3260 ) {
3261 let err = Err(anyhow::anyhow!(
3262 "Permission to run tool action denied by user"
3263 ));
3264
3265 self.tool_use.insert_tool_output(
3266 tool_use_id.clone(),
3267 tool_name,
3268 err,
3269 self.configured_model.as_ref(),
3270 self.completion_mode,
3271 );
3272 self.tool_finished(tool_use_id.clone(), None, true, window, cx);
3273 }
3274}
3275
3276#[derive(Debug, Clone, Error)]
3277pub enum ThreadError {
3278 #[error("Payment required")]
3279 PaymentRequired,
3280 #[error("Model request limit reached")]
3281 ModelRequestLimitReached { plan: Plan },
3282 #[error("Message {header}: {message}")]
3283 Message {
3284 header: SharedString,
3285 message: SharedString,
3286 },
3287 #[error("Retryable error: {message}")]
3288 RetryableError {
3289 message: SharedString,
3290 can_enable_burn_mode: bool,
3291 },
3292}
3293
3294#[derive(Debug, Clone)]
3295pub enum ThreadEvent {
3296 ShowError(ThreadError),
3297 StreamedCompletion,
3298 ReceivedTextChunk,
3299 NewRequest,
3300 StreamedAssistantText(MessageId, String),
3301 StreamedAssistantThinking(MessageId, String),
3302 StreamedToolUse {
3303 tool_use_id: LanguageModelToolUseId,
3304 ui_text: Arc<str>,
3305 input: serde_json::Value,
3306 },
3307 MissingToolUse {
3308 tool_use_id: LanguageModelToolUseId,
3309 ui_text: Arc<str>,
3310 },
3311 InvalidToolInput {
3312 tool_use_id: LanguageModelToolUseId,
3313 ui_text: Arc<str>,
3314 invalid_input_json: Arc<str>,
3315 },
3316 Stopped(Result<StopReason, Arc<anyhow::Error>>),
3317 MessageAdded(MessageId),
3318 MessageEdited(MessageId),
3319 MessageDeleted(MessageId),
3320 SummaryGenerated,
3321 SummaryChanged,
3322 UsePendingTools {
3323 tool_uses: Vec<PendingToolUse>,
3324 },
3325 ToolFinished {
3326 #[allow(unused)]
3327 tool_use_id: LanguageModelToolUseId,
3328 /// The pending tool use that corresponds to this tool.
3329 pending_tool_use: Option<PendingToolUse>,
3330 },
3331 CheckpointChanged,
3332 ToolConfirmationNeeded,
3333 ToolUseLimitReached,
3334 CancelEditing,
3335 CompletionCanceled,
3336 ProfileChanged,
3337}
3338
3339impl EventEmitter<ThreadEvent> for Thread {}
3340
3341struct PendingCompletion {
3342 id: usize,
3343 queue_state: QueueState,
3344 _task: Task<()>,
3345}
3346
3347#[cfg(test)]
3348mod tests {
3349 use super::*;
3350 use crate::{
3351 context::load_context, context_store::ContextStore, thread_store, thread_store::ThreadStore,
3352 };
3353
3354 // Test-specific constants
3355 const TEST_RATE_LIMIT_RETRY_SECS: u64 = 30;
3356 use agent_settings::{AgentProfileId, AgentSettings, LanguageModelParameters};
3357 use assistant_tool::ToolRegistry;
3358 use assistant_tools;
3359 use futures::StreamExt;
3360 use futures::future::BoxFuture;
3361 use futures::stream::BoxStream;
3362 use gpui::TestAppContext;
3363 use http_client;
3364 use indoc::indoc;
3365 use language_model::fake_provider::{FakeLanguageModel, FakeLanguageModelProvider};
3366 use language_model::{
3367 LanguageModelCompletionError, LanguageModelName, LanguageModelProviderId,
3368 LanguageModelProviderName, LanguageModelToolChoice,
3369 };
3370 use parking_lot::Mutex;
3371 use project::{FakeFs, Project};
3372 use prompt_store::PromptBuilder;
3373 use serde_json::json;
3374 use settings::{Settings, SettingsStore};
3375 use std::sync::Arc;
3376 use std::time::Duration;
3377 use theme::ThemeSettings;
3378 use util::path;
3379 use workspace::Workspace;
3380
3381 #[gpui::test]
3382 async fn test_message_with_context(cx: &mut TestAppContext) {
3383 init_test_settings(cx);
3384
3385 let project = create_test_project(
3386 cx,
3387 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3388 )
3389 .await;
3390
3391 let (_workspace, _thread_store, thread, context_store, model) =
3392 setup_test_environment(cx, project.clone()).await;
3393
3394 add_file_to_context(&project, &context_store, "test/code.rs", cx)
3395 .await
3396 .unwrap();
3397
3398 let context =
3399 context_store.read_with(cx, |store, _| store.context().next().cloned().unwrap());
3400 let loaded_context = cx
3401 .update(|cx| load_context(vec![context], &project, &None, cx))
3402 .await;
3403
3404 // Insert user message with context
3405 let message_id = thread.update(cx, |thread, cx| {
3406 thread.insert_user_message(
3407 "Please explain this code",
3408 loaded_context,
3409 None,
3410 Vec::new(),
3411 cx,
3412 )
3413 });
3414
3415 // Check content and context in message object
3416 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3417
3418 // Use different path format strings based on platform for the test
3419 #[cfg(windows)]
3420 let path_part = r"test\code.rs";
3421 #[cfg(not(windows))]
3422 let path_part = "test/code.rs";
3423
3424 let expected_context = format!(
3425 r#"
3426<context>
3427The following items were attached by the user. They are up-to-date and don't need to be re-read.
3428
3429<files>
3430```rs {path_part}
3431fn main() {{
3432 println!("Hello, world!");
3433}}
3434```
3435</files>
3436</context>
3437"#
3438 );
3439
3440 assert_eq!(message.role, Role::User);
3441 assert_eq!(message.segments.len(), 1);
3442 assert_eq!(
3443 message.segments[0],
3444 MessageSegment::Text("Please explain this code".to_string())
3445 );
3446 assert_eq!(message.loaded_context.text, expected_context);
3447
3448 // Check message in request
3449 let request = thread.update(cx, |thread, cx| {
3450 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3451 });
3452
3453 assert_eq!(request.messages.len(), 2);
3454 let expected_full_message = format!("{}Please explain this code", expected_context);
3455 assert_eq!(request.messages[1].string_contents(), expected_full_message);
3456 }
3457
3458 #[gpui::test]
3459 async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
3460 init_test_settings(cx);
3461
3462 let project = create_test_project(
3463 cx,
3464 json!({
3465 "file1.rs": "fn function1() {}\n",
3466 "file2.rs": "fn function2() {}\n",
3467 "file3.rs": "fn function3() {}\n",
3468 "file4.rs": "fn function4() {}\n",
3469 }),
3470 )
3471 .await;
3472
3473 let (_, _thread_store, thread, context_store, model) =
3474 setup_test_environment(cx, project.clone()).await;
3475
3476 // First message with context 1
3477 add_file_to_context(&project, &context_store, "test/file1.rs", cx)
3478 .await
3479 .unwrap();
3480 let new_contexts = context_store.update(cx, |store, cx| {
3481 store.new_context_for_thread(thread.read(cx), None)
3482 });
3483 assert_eq!(new_contexts.len(), 1);
3484 let loaded_context = cx
3485 .update(|cx| load_context(new_contexts, &project, &None, cx))
3486 .await;
3487 let message1_id = thread.update(cx, |thread, cx| {
3488 thread.insert_user_message("Message 1", loaded_context, None, Vec::new(), cx)
3489 });
3490
3491 // Second message with contexts 1 and 2 (context 1 should be skipped as it's already included)
3492 add_file_to_context(&project, &context_store, "test/file2.rs", cx)
3493 .await
3494 .unwrap();
3495 let new_contexts = context_store.update(cx, |store, cx| {
3496 store.new_context_for_thread(thread.read(cx), None)
3497 });
3498 assert_eq!(new_contexts.len(), 1);
3499 let loaded_context = cx
3500 .update(|cx| load_context(new_contexts, &project, &None, cx))
3501 .await;
3502 let message2_id = thread.update(cx, |thread, cx| {
3503 thread.insert_user_message("Message 2", loaded_context, None, Vec::new(), cx)
3504 });
3505
3506 // Third message with all three contexts (contexts 1 and 2 should be skipped)
3507 //
3508 add_file_to_context(&project, &context_store, "test/file3.rs", cx)
3509 .await
3510 .unwrap();
3511 let new_contexts = context_store.update(cx, |store, cx| {
3512 store.new_context_for_thread(thread.read(cx), None)
3513 });
3514 assert_eq!(new_contexts.len(), 1);
3515 let loaded_context = cx
3516 .update(|cx| load_context(new_contexts, &project, &None, cx))
3517 .await;
3518 let message3_id = thread.update(cx, |thread, cx| {
3519 thread.insert_user_message("Message 3", loaded_context, None, Vec::new(), cx)
3520 });
3521
3522 // Check what contexts are included in each message
3523 let (message1, message2, message3) = thread.read_with(cx, |thread, _| {
3524 (
3525 thread.message(message1_id).unwrap().clone(),
3526 thread.message(message2_id).unwrap().clone(),
3527 thread.message(message3_id).unwrap().clone(),
3528 )
3529 });
3530
3531 // First message should include context 1
3532 assert!(message1.loaded_context.text.contains("file1.rs"));
3533
3534 // Second message should include only context 2 (not 1)
3535 assert!(!message2.loaded_context.text.contains("file1.rs"));
3536 assert!(message2.loaded_context.text.contains("file2.rs"));
3537
3538 // Third message should include only context 3 (not 1 or 2)
3539 assert!(!message3.loaded_context.text.contains("file1.rs"));
3540 assert!(!message3.loaded_context.text.contains("file2.rs"));
3541 assert!(message3.loaded_context.text.contains("file3.rs"));
3542
3543 // Check entire request to make sure all contexts are properly included
3544 let request = thread.update(cx, |thread, cx| {
3545 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3546 });
3547
3548 // The request should contain all 3 messages
3549 assert_eq!(request.messages.len(), 4);
3550
3551 // Check that the contexts are properly formatted in each message
3552 assert!(request.messages[1].string_contents().contains("file1.rs"));
3553 assert!(!request.messages[1].string_contents().contains("file2.rs"));
3554 assert!(!request.messages[1].string_contents().contains("file3.rs"));
3555
3556 assert!(!request.messages[2].string_contents().contains("file1.rs"));
3557 assert!(request.messages[2].string_contents().contains("file2.rs"));
3558 assert!(!request.messages[2].string_contents().contains("file3.rs"));
3559
3560 assert!(!request.messages[3].string_contents().contains("file1.rs"));
3561 assert!(!request.messages[3].string_contents().contains("file2.rs"));
3562 assert!(request.messages[3].string_contents().contains("file3.rs"));
3563
3564 add_file_to_context(&project, &context_store, "test/file4.rs", cx)
3565 .await
3566 .unwrap();
3567 let new_contexts = context_store.update(cx, |store, cx| {
3568 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3569 });
3570 assert_eq!(new_contexts.len(), 3);
3571 let loaded_context = cx
3572 .update(|cx| load_context(new_contexts, &project, &None, cx))
3573 .await
3574 .loaded_context;
3575
3576 assert!(!loaded_context.text.contains("file1.rs"));
3577 assert!(loaded_context.text.contains("file2.rs"));
3578 assert!(loaded_context.text.contains("file3.rs"));
3579 assert!(loaded_context.text.contains("file4.rs"));
3580
3581 let new_contexts = context_store.update(cx, |store, cx| {
3582 // Remove file4.rs
3583 store.remove_context(&loaded_context.contexts[2].handle(), cx);
3584 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3585 });
3586 assert_eq!(new_contexts.len(), 2);
3587 let loaded_context = cx
3588 .update(|cx| load_context(new_contexts, &project, &None, cx))
3589 .await
3590 .loaded_context;
3591
3592 assert!(!loaded_context.text.contains("file1.rs"));
3593 assert!(loaded_context.text.contains("file2.rs"));
3594 assert!(loaded_context.text.contains("file3.rs"));
3595 assert!(!loaded_context.text.contains("file4.rs"));
3596
3597 let new_contexts = context_store.update(cx, |store, cx| {
3598 // Remove file3.rs
3599 store.remove_context(&loaded_context.contexts[1].handle(), cx);
3600 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3601 });
3602 assert_eq!(new_contexts.len(), 1);
3603 let loaded_context = cx
3604 .update(|cx| load_context(new_contexts, &project, &None, cx))
3605 .await
3606 .loaded_context;
3607
3608 assert!(!loaded_context.text.contains("file1.rs"));
3609 assert!(loaded_context.text.contains("file2.rs"));
3610 assert!(!loaded_context.text.contains("file3.rs"));
3611 assert!(!loaded_context.text.contains("file4.rs"));
3612 }
3613
3614 #[gpui::test]
3615 async fn test_message_without_files(cx: &mut TestAppContext) {
3616 init_test_settings(cx);
3617
3618 let project = create_test_project(
3619 cx,
3620 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3621 )
3622 .await;
3623
3624 let (_, _thread_store, thread, _context_store, model) =
3625 setup_test_environment(cx, project.clone()).await;
3626
3627 // Insert user message without any context (empty context vector)
3628 let message_id = thread.update(cx, |thread, cx| {
3629 thread.insert_user_message(
3630 "What is the best way to learn Rust?",
3631 ContextLoadResult::default(),
3632 None,
3633 Vec::new(),
3634 cx,
3635 )
3636 });
3637
3638 // Check content and context in message object
3639 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3640
3641 // Context should be empty when no files are included
3642 assert_eq!(message.role, Role::User);
3643 assert_eq!(message.segments.len(), 1);
3644 assert_eq!(
3645 message.segments[0],
3646 MessageSegment::Text("What is the best way to learn Rust?".to_string())
3647 );
3648 assert_eq!(message.loaded_context.text, "");
3649
3650 // Check message in request
3651 let request = thread.update(cx, |thread, cx| {
3652 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3653 });
3654
3655 assert_eq!(request.messages.len(), 2);
3656 assert_eq!(
3657 request.messages[1].string_contents(),
3658 "What is the best way to learn Rust?"
3659 );
3660
3661 // Add second message, also without context
3662 let message2_id = thread.update(cx, |thread, cx| {
3663 thread.insert_user_message(
3664 "Are there any good books?",
3665 ContextLoadResult::default(),
3666 None,
3667 Vec::new(),
3668 cx,
3669 )
3670 });
3671
3672 let message2 =
3673 thread.read_with(cx, |thread, _| thread.message(message2_id).unwrap().clone());
3674 assert_eq!(message2.loaded_context.text, "");
3675
3676 // Check that both messages appear in the request
3677 let request = thread.update(cx, |thread, cx| {
3678 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3679 });
3680
3681 assert_eq!(request.messages.len(), 3);
3682 assert_eq!(
3683 request.messages[1].string_contents(),
3684 "What is the best way to learn Rust?"
3685 );
3686 assert_eq!(
3687 request.messages[2].string_contents(),
3688 "Are there any good books?"
3689 );
3690 }
3691
3692 #[gpui::test]
3693 async fn test_stale_buffer_notification(cx: &mut TestAppContext) {
3694 init_test_settings(cx);
3695
3696 let project = create_test_project(
3697 cx,
3698 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3699 )
3700 .await;
3701
3702 let (_workspace, _thread_store, thread, context_store, model) =
3703 setup_test_environment(cx, project.clone()).await;
3704
3705 // Add a buffer to the context. This will be a tracked buffer
3706 let buffer = add_file_to_context(&project, &context_store, "test/code.rs", cx)
3707 .await
3708 .unwrap();
3709
3710 let context = context_store
3711 .read_with(cx, |store, _| store.context().next().cloned())
3712 .unwrap();
3713 let loaded_context = cx
3714 .update(|cx| load_context(vec![context], &project, &None, cx))
3715 .await;
3716
3717 // Insert user message and assistant response
3718 thread.update(cx, |thread, cx| {
3719 thread.insert_user_message("Explain this code", loaded_context, None, Vec::new(), cx);
3720 thread.insert_assistant_message(
3721 vec![MessageSegment::Text("This code prints 42.".into())],
3722 cx,
3723 );
3724 });
3725
3726 // We shouldn't have a stale buffer notification yet
3727 let notifications = thread.read_with(cx, |thread, _| {
3728 find_tool_uses(thread, "project_notifications")
3729 });
3730 assert!(
3731 notifications.is_empty(),
3732 "Should not have stale buffer notification before buffer is modified"
3733 );
3734
3735 // Modify the buffer
3736 buffer.update(cx, |buffer, cx| {
3737 buffer.edit(
3738 [(1..1, "\n println!(\"Added a new line\");\n")],
3739 None,
3740 cx,
3741 );
3742 });
3743
3744 // Insert another user message
3745 thread.update(cx, |thread, cx| {
3746 thread.insert_user_message(
3747 "What does the code do now?",
3748 ContextLoadResult::default(),
3749 None,
3750 Vec::new(),
3751 cx,
3752 )
3753 });
3754
3755 // Check for the stale buffer warning
3756 thread.update(cx, |thread, cx| {
3757 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3758 });
3759
3760 let notifications = thread.read_with(cx, |thread, _cx| {
3761 find_tool_uses(thread, "project_notifications")
3762 });
3763
3764 let [notification] = notifications.as_slice() else {
3765 panic!("Should have a `project_notifications` tool use");
3766 };
3767
3768 let Some(notification_content) = notification.content.to_str() else {
3769 panic!("`project_notifications` should return text");
3770 };
3771
3772 let expected_content = indoc! {"[The following is an auto-generated notification; do not reply]
3773
3774 These files have changed since the last read:
3775 - code.rs
3776 "};
3777 assert_eq!(notification_content, expected_content);
3778
3779 // Insert another user message and flush notifications again
3780 thread.update(cx, |thread, cx| {
3781 thread.insert_user_message(
3782 "Can you tell me more?",
3783 ContextLoadResult::default(),
3784 None,
3785 Vec::new(),
3786 cx,
3787 )
3788 });
3789
3790 thread.update(cx, |thread, cx| {
3791 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3792 });
3793
3794 // There should be no new notifications (we already flushed one)
3795 let notifications = thread.read_with(cx, |thread, _cx| {
3796 find_tool_uses(thread, "project_notifications")
3797 });
3798
3799 assert_eq!(
3800 notifications.len(),
3801 1,
3802 "Should still have only one notification after second flush - no duplicates"
3803 );
3804 }
3805
3806 fn find_tool_uses(thread: &Thread, tool_name: &str) -> Vec<LanguageModelToolResult> {
3807 thread
3808 .messages()
3809 .flat_map(|message| {
3810 thread
3811 .tool_results_for_message(message.id)
3812 .into_iter()
3813 .filter(|result| result.tool_name == tool_name.into())
3814 .cloned()
3815 .collect::<Vec<_>>()
3816 })
3817 .collect()
3818 }
3819
3820 #[gpui::test]
3821 async fn test_storing_profile_setting_per_thread(cx: &mut TestAppContext) {
3822 init_test_settings(cx);
3823
3824 let project = create_test_project(
3825 cx,
3826 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3827 )
3828 .await;
3829
3830 let (_workspace, thread_store, thread, _context_store, _model) =
3831 setup_test_environment(cx, project.clone()).await;
3832
3833 // Check that we are starting with the default profile
3834 let profile = cx.read(|cx| thread.read(cx).profile.clone());
3835 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3836 assert_eq!(
3837 profile,
3838 AgentProfile::new(AgentProfileId::default(), tool_set)
3839 );
3840 }
3841
3842 #[gpui::test]
3843 async fn test_serializing_thread_profile(cx: &mut TestAppContext) {
3844 init_test_settings(cx);
3845
3846 let project = create_test_project(
3847 cx,
3848 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3849 )
3850 .await;
3851
3852 let (_workspace, thread_store, thread, _context_store, _model) =
3853 setup_test_environment(cx, project.clone()).await;
3854
3855 // Profile gets serialized with default values
3856 let serialized = thread
3857 .update(cx, |thread, cx| thread.serialize(cx))
3858 .await
3859 .unwrap();
3860
3861 assert_eq!(serialized.profile, Some(AgentProfileId::default()));
3862
3863 let deserialized = cx.update(|cx| {
3864 thread.update(cx, |thread, cx| {
3865 Thread::deserialize(
3866 thread.id.clone(),
3867 serialized,
3868 thread.project.clone(),
3869 thread.tools.clone(),
3870 thread.prompt_builder.clone(),
3871 thread.project_context.clone(),
3872 None,
3873 cx,
3874 )
3875 })
3876 });
3877 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3878
3879 assert_eq!(
3880 deserialized.profile,
3881 AgentProfile::new(AgentProfileId::default(), tool_set)
3882 );
3883 }
3884
3885 #[gpui::test]
3886 async fn test_temperature_setting(cx: &mut TestAppContext) {
3887 init_test_settings(cx);
3888
3889 let project = create_test_project(
3890 cx,
3891 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3892 )
3893 .await;
3894
3895 let (_workspace, _thread_store, thread, _context_store, model) =
3896 setup_test_environment(cx, project.clone()).await;
3897
3898 // Both model and provider
3899 cx.update(|cx| {
3900 AgentSettings::override_global(
3901 AgentSettings {
3902 model_parameters: vec![LanguageModelParameters {
3903 provider: Some(model.provider_id().0.to_string().into()),
3904 model: Some(model.id().0.clone()),
3905 temperature: Some(0.66),
3906 }],
3907 ..AgentSettings::get_global(cx).clone()
3908 },
3909 cx,
3910 );
3911 });
3912
3913 let request = thread.update(cx, |thread, cx| {
3914 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3915 });
3916 assert_eq!(request.temperature, Some(0.66));
3917
3918 // Only model
3919 cx.update(|cx| {
3920 AgentSettings::override_global(
3921 AgentSettings {
3922 model_parameters: vec![LanguageModelParameters {
3923 provider: None,
3924 model: Some(model.id().0.clone()),
3925 temperature: Some(0.66),
3926 }],
3927 ..AgentSettings::get_global(cx).clone()
3928 },
3929 cx,
3930 );
3931 });
3932
3933 let request = thread.update(cx, |thread, cx| {
3934 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3935 });
3936 assert_eq!(request.temperature, Some(0.66));
3937
3938 // Only provider
3939 cx.update(|cx| {
3940 AgentSettings::override_global(
3941 AgentSettings {
3942 model_parameters: vec![LanguageModelParameters {
3943 provider: Some(model.provider_id().0.to_string().into()),
3944 model: None,
3945 temperature: Some(0.66),
3946 }],
3947 ..AgentSettings::get_global(cx).clone()
3948 },
3949 cx,
3950 );
3951 });
3952
3953 let request = thread.update(cx, |thread, cx| {
3954 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3955 });
3956 assert_eq!(request.temperature, Some(0.66));
3957
3958 // Same model name, different provider
3959 cx.update(|cx| {
3960 AgentSettings::override_global(
3961 AgentSettings {
3962 model_parameters: vec![LanguageModelParameters {
3963 provider: Some("anthropic".into()),
3964 model: Some(model.id().0.clone()),
3965 temperature: Some(0.66),
3966 }],
3967 ..AgentSettings::get_global(cx).clone()
3968 },
3969 cx,
3970 );
3971 });
3972
3973 let request = thread.update(cx, |thread, cx| {
3974 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3975 });
3976 assert_eq!(request.temperature, None);
3977 }
3978
3979 #[gpui::test]
3980 async fn test_thread_summary(cx: &mut TestAppContext) {
3981 init_test_settings(cx);
3982
3983 let project = create_test_project(cx, json!({})).await;
3984
3985 let (_, _thread_store, thread, _context_store, model) =
3986 setup_test_environment(cx, project.clone()).await;
3987
3988 // Initial state should be pending
3989 thread.read_with(cx, |thread, _| {
3990 assert!(matches!(thread.summary(), ThreadSummary::Pending));
3991 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3992 });
3993
3994 // Manually setting the summary should not be allowed in this state
3995 thread.update(cx, |thread, cx| {
3996 thread.set_summary("This should not work", cx);
3997 });
3998
3999 thread.read_with(cx, |thread, _| {
4000 assert!(matches!(thread.summary(), ThreadSummary::Pending));
4001 });
4002
4003 // Send a message
4004 thread.update(cx, |thread, cx| {
4005 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
4006 thread.send_to_model(
4007 model.clone(),
4008 CompletionIntent::ThreadSummarization,
4009 None,
4010 cx,
4011 );
4012 });
4013
4014 let fake_model = model.as_fake();
4015 simulate_successful_response(&fake_model, cx);
4016
4017 // Should start generating summary when there are >= 2 messages
4018 thread.read_with(cx, |thread, _| {
4019 assert_eq!(*thread.summary(), ThreadSummary::Generating);
4020 });
4021
4022 // Should not be able to set the summary while generating
4023 thread.update(cx, |thread, cx| {
4024 thread.set_summary("This should not work either", cx);
4025 });
4026
4027 thread.read_with(cx, |thread, _| {
4028 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4029 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4030 });
4031
4032 cx.run_until_parked();
4033 fake_model.stream_last_completion_response("Brief");
4034 fake_model.stream_last_completion_response(" Introduction");
4035 fake_model.end_last_completion_stream();
4036 cx.run_until_parked();
4037
4038 // Summary should be set
4039 thread.read_with(cx, |thread, _| {
4040 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4041 assert_eq!(thread.summary().or_default(), "Brief Introduction");
4042 });
4043
4044 // Now we should be able to set a summary
4045 thread.update(cx, |thread, cx| {
4046 thread.set_summary("Brief Intro", cx);
4047 });
4048
4049 thread.read_with(cx, |thread, _| {
4050 assert_eq!(thread.summary().or_default(), "Brief Intro");
4051 });
4052
4053 // Test setting an empty summary (should default to DEFAULT)
4054 thread.update(cx, |thread, cx| {
4055 thread.set_summary("", cx);
4056 });
4057
4058 thread.read_with(cx, |thread, _| {
4059 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4060 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4061 });
4062 }
4063
4064 #[gpui::test]
4065 async fn test_thread_summary_error_set_manually(cx: &mut TestAppContext) {
4066 init_test_settings(cx);
4067
4068 let project = create_test_project(cx, json!({})).await;
4069
4070 let (_, _thread_store, thread, _context_store, model) =
4071 setup_test_environment(cx, project.clone()).await;
4072
4073 test_summarize_error(&model, &thread, cx);
4074
4075 // Now we should be able to set a summary
4076 thread.update(cx, |thread, cx| {
4077 thread.set_summary("Brief Intro", cx);
4078 });
4079
4080 thread.read_with(cx, |thread, _| {
4081 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4082 assert_eq!(thread.summary().or_default(), "Brief Intro");
4083 });
4084 }
4085
4086 #[gpui::test]
4087 async fn test_thread_summary_error_retry(cx: &mut TestAppContext) {
4088 init_test_settings(cx);
4089
4090 let project = create_test_project(cx, json!({})).await;
4091
4092 let (_, _thread_store, thread, _context_store, model) =
4093 setup_test_environment(cx, project.clone()).await;
4094
4095 test_summarize_error(&model, &thread, cx);
4096
4097 // Sending another message should not trigger another summarize request
4098 thread.update(cx, |thread, cx| {
4099 thread.insert_user_message(
4100 "How are you?",
4101 ContextLoadResult::default(),
4102 None,
4103 vec![],
4104 cx,
4105 );
4106 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4107 });
4108
4109 let fake_model = model.as_fake();
4110 simulate_successful_response(&fake_model, cx);
4111
4112 thread.read_with(cx, |thread, _| {
4113 // State is still Error, not Generating
4114 assert!(matches!(thread.summary(), ThreadSummary::Error));
4115 });
4116
4117 // But the summarize request can be invoked manually
4118 thread.update(cx, |thread, cx| {
4119 thread.summarize(cx);
4120 });
4121
4122 thread.read_with(cx, |thread, _| {
4123 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4124 });
4125
4126 cx.run_until_parked();
4127 fake_model.stream_last_completion_response("A successful summary");
4128 fake_model.end_last_completion_stream();
4129 cx.run_until_parked();
4130
4131 thread.read_with(cx, |thread, _| {
4132 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4133 assert_eq!(thread.summary().or_default(), "A successful summary");
4134 });
4135 }
4136
4137 // Helper to create a model that returns errors
4138 enum TestError {
4139 Overloaded,
4140 InternalServerError,
4141 }
4142
4143 struct ErrorInjector {
4144 inner: Arc<FakeLanguageModel>,
4145 error_type: TestError,
4146 }
4147
4148 impl ErrorInjector {
4149 fn new(error_type: TestError) -> Self {
4150 Self {
4151 inner: Arc::new(FakeLanguageModel::default()),
4152 error_type,
4153 }
4154 }
4155 }
4156
4157 impl LanguageModel for ErrorInjector {
4158 fn id(&self) -> LanguageModelId {
4159 self.inner.id()
4160 }
4161
4162 fn name(&self) -> LanguageModelName {
4163 self.inner.name()
4164 }
4165
4166 fn provider_id(&self) -> LanguageModelProviderId {
4167 self.inner.provider_id()
4168 }
4169
4170 fn provider_name(&self) -> LanguageModelProviderName {
4171 self.inner.provider_name()
4172 }
4173
4174 fn supports_tools(&self) -> bool {
4175 self.inner.supports_tools()
4176 }
4177
4178 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4179 self.inner.supports_tool_choice(choice)
4180 }
4181
4182 fn supports_images(&self) -> bool {
4183 self.inner.supports_images()
4184 }
4185
4186 fn telemetry_id(&self) -> String {
4187 self.inner.telemetry_id()
4188 }
4189
4190 fn max_token_count(&self) -> u64 {
4191 self.inner.max_token_count()
4192 }
4193
4194 fn count_tokens(
4195 &self,
4196 request: LanguageModelRequest,
4197 cx: &App,
4198 ) -> BoxFuture<'static, Result<u64>> {
4199 self.inner.count_tokens(request, cx)
4200 }
4201
4202 fn stream_completion(
4203 &self,
4204 _request: LanguageModelRequest,
4205 _cx: &AsyncApp,
4206 ) -> BoxFuture<
4207 'static,
4208 Result<
4209 BoxStream<
4210 'static,
4211 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4212 >,
4213 LanguageModelCompletionError,
4214 >,
4215 > {
4216 let error = match self.error_type {
4217 TestError::Overloaded => LanguageModelCompletionError::ServerOverloaded {
4218 provider: self.provider_name(),
4219 retry_after: None,
4220 },
4221 TestError::InternalServerError => {
4222 LanguageModelCompletionError::ApiInternalServerError {
4223 provider: self.provider_name(),
4224 message: "I'm a teapot orbiting the sun".to_string(),
4225 }
4226 }
4227 };
4228 async move {
4229 let stream = futures::stream::once(async move { Err(error) });
4230 Ok(stream.boxed())
4231 }
4232 .boxed()
4233 }
4234
4235 fn as_fake(&self) -> &FakeLanguageModel {
4236 &self.inner
4237 }
4238 }
4239
4240 #[gpui::test]
4241 async fn test_retry_on_overloaded_error(cx: &mut TestAppContext) {
4242 init_test_settings(cx);
4243
4244 let project = create_test_project(cx, json!({})).await;
4245 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4246
4247 // Enable Burn Mode to allow retries
4248 thread.update(cx, |thread, _| {
4249 thread.set_completion_mode(CompletionMode::Burn);
4250 });
4251
4252 // Create model that returns overloaded error
4253 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4254
4255 // Insert a user message
4256 thread.update(cx, |thread, cx| {
4257 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4258 });
4259
4260 // Start completion
4261 thread.update(cx, |thread, cx| {
4262 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4263 });
4264
4265 cx.run_until_parked();
4266
4267 thread.read_with(cx, |thread, _| {
4268 assert!(thread.retry_state.is_some(), "Should have retry state");
4269 let retry_state = thread.retry_state.as_ref().unwrap();
4270 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4271 assert_eq!(
4272 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
4273 "Should retry MAX_RETRY_ATTEMPTS times for overloaded errors"
4274 );
4275 });
4276
4277 // Check that a retry message was added
4278 thread.read_with(cx, |thread, _| {
4279 let mut messages = thread.messages();
4280 assert!(
4281 messages.any(|msg| {
4282 msg.role == Role::System
4283 && msg.ui_only
4284 && msg.segments.iter().any(|seg| {
4285 if let MessageSegment::Text(text) = seg {
4286 text.contains("overloaded")
4287 && text
4288 .contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS))
4289 } else {
4290 false
4291 }
4292 })
4293 }),
4294 "Should have added a system retry message"
4295 );
4296 });
4297
4298 let retry_count = thread.update(cx, |thread, _| {
4299 thread
4300 .messages
4301 .iter()
4302 .filter(|m| {
4303 m.ui_only
4304 && m.segments.iter().any(|s| {
4305 if let MessageSegment::Text(text) = s {
4306 text.contains("Retrying") && text.contains("seconds")
4307 } else {
4308 false
4309 }
4310 })
4311 })
4312 .count()
4313 });
4314
4315 assert_eq!(retry_count, 1, "Should have one retry message");
4316 }
4317
4318 #[gpui::test]
4319 async fn test_retry_on_internal_server_error(cx: &mut TestAppContext) {
4320 init_test_settings(cx);
4321
4322 let project = create_test_project(cx, json!({})).await;
4323 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4324
4325 // Enable Burn Mode to allow retries
4326 thread.update(cx, |thread, _| {
4327 thread.set_completion_mode(CompletionMode::Burn);
4328 });
4329
4330 // Create model that returns internal server error
4331 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4332
4333 // Insert a user message
4334 thread.update(cx, |thread, cx| {
4335 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4336 });
4337
4338 // Start completion
4339 thread.update(cx, |thread, cx| {
4340 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4341 });
4342
4343 cx.run_until_parked();
4344
4345 // Check retry state on thread
4346 thread.read_with(cx, |thread, _| {
4347 assert!(thread.retry_state.is_some(), "Should have retry state");
4348 let retry_state = thread.retry_state.as_ref().unwrap();
4349 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4350 assert_eq!(
4351 retry_state.max_attempts, 1,
4352 "Should have correct max attempts"
4353 );
4354 });
4355
4356 // Check that a retry message was added with provider name
4357 thread.read_with(cx, |thread, _| {
4358 let mut messages = thread.messages();
4359 assert!(
4360 messages.any(|msg| {
4361 msg.role == Role::System
4362 && msg.ui_only
4363 && msg.segments.iter().any(|seg| {
4364 if let MessageSegment::Text(text) = seg {
4365 text.contains("internal")
4366 && text.contains("Fake")
4367 && text.contains("Retrying in")
4368 && !text.contains("attempt")
4369 } else {
4370 false
4371 }
4372 })
4373 }),
4374 "Should have added a system retry message with provider name"
4375 );
4376 });
4377
4378 // Count retry messages
4379 let retry_count = thread.update(cx, |thread, _| {
4380 thread
4381 .messages
4382 .iter()
4383 .filter(|m| {
4384 m.ui_only
4385 && m.segments.iter().any(|s| {
4386 if let MessageSegment::Text(text) = s {
4387 text.contains("Retrying") && text.contains("seconds")
4388 } else {
4389 false
4390 }
4391 })
4392 })
4393 .count()
4394 });
4395
4396 assert_eq!(retry_count, 1, "Should have one retry message");
4397 }
4398
4399 #[gpui::test]
4400 async fn test_exponential_backoff_on_retries(cx: &mut TestAppContext) {
4401 init_test_settings(cx);
4402
4403 let project = create_test_project(cx, json!({})).await;
4404 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4405
4406 // Enable Burn Mode to allow retries
4407 thread.update(cx, |thread, _| {
4408 thread.set_completion_mode(CompletionMode::Burn);
4409 });
4410
4411 // Create model that returns internal server error
4412 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4413
4414 // Insert a user message
4415 thread.update(cx, |thread, cx| {
4416 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4417 });
4418
4419 // Track retry events and completion count
4420 // Track completion events
4421 let completion_count = Arc::new(Mutex::new(0));
4422 let completion_count_clone = completion_count.clone();
4423
4424 let _subscription = thread.update(cx, |_, cx| {
4425 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4426 if let ThreadEvent::NewRequest = event {
4427 *completion_count_clone.lock() += 1;
4428 }
4429 })
4430 });
4431
4432 // First attempt
4433 thread.update(cx, |thread, cx| {
4434 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4435 });
4436 cx.run_until_parked();
4437
4438 // Should have scheduled first retry - count retry messages
4439 let retry_count = thread.update(cx, |thread, _| {
4440 thread
4441 .messages
4442 .iter()
4443 .filter(|m| {
4444 m.ui_only
4445 && m.segments.iter().any(|s| {
4446 if let MessageSegment::Text(text) = s {
4447 text.contains("Retrying") && text.contains("seconds")
4448 } else {
4449 false
4450 }
4451 })
4452 })
4453 .count()
4454 });
4455 assert_eq!(retry_count, 1, "Should have scheduled first retry");
4456
4457 // Check retry state
4458 thread.read_with(cx, |thread, _| {
4459 assert!(thread.retry_state.is_some(), "Should have retry state");
4460 let retry_state = thread.retry_state.as_ref().unwrap();
4461 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4462 assert_eq!(
4463 retry_state.max_attempts, 1,
4464 "Internal server errors should only retry once"
4465 );
4466 });
4467
4468 // Advance clock for first retry
4469 cx.executor().advance_clock(BASE_RETRY_DELAY);
4470 cx.run_until_parked();
4471
4472 // Should have scheduled second retry - count retry messages
4473 let retry_count = thread.update(cx, |thread, _| {
4474 thread
4475 .messages
4476 .iter()
4477 .filter(|m| {
4478 m.ui_only
4479 && m.segments.iter().any(|s| {
4480 if let MessageSegment::Text(text) = s {
4481 text.contains("Retrying") && text.contains("seconds")
4482 } else {
4483 false
4484 }
4485 })
4486 })
4487 .count()
4488 });
4489 assert_eq!(
4490 retry_count, 1,
4491 "Should have only one retry for internal server errors"
4492 );
4493
4494 // For internal server errors, we only retry once and then give up
4495 // Check that retry_state is cleared after the single retry
4496 thread.read_with(cx, |thread, _| {
4497 assert!(
4498 thread.retry_state.is_none(),
4499 "Retry state should be cleared after single retry"
4500 );
4501 });
4502
4503 // Verify total attempts (1 initial + 1 retry)
4504 assert_eq!(
4505 *completion_count.lock(),
4506 2,
4507 "Should have attempted once plus 1 retry"
4508 );
4509 }
4510
4511 #[gpui::test]
4512 async fn test_max_retries_exceeded(cx: &mut TestAppContext) {
4513 init_test_settings(cx);
4514
4515 let project = create_test_project(cx, json!({})).await;
4516 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4517
4518 // Enable Burn Mode to allow retries
4519 thread.update(cx, |thread, _| {
4520 thread.set_completion_mode(CompletionMode::Burn);
4521 });
4522
4523 // Create model that returns overloaded error
4524 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4525
4526 // Insert a user message
4527 thread.update(cx, |thread, cx| {
4528 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4529 });
4530
4531 // Track events
4532 let stopped_with_error = Arc::new(Mutex::new(false));
4533 let stopped_with_error_clone = stopped_with_error.clone();
4534
4535 let _subscription = thread.update(cx, |_, cx| {
4536 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4537 if let ThreadEvent::Stopped(Err(_)) = event {
4538 *stopped_with_error_clone.lock() = true;
4539 }
4540 })
4541 });
4542
4543 // Start initial completion
4544 thread.update(cx, |thread, cx| {
4545 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4546 });
4547 cx.run_until_parked();
4548
4549 // Advance through all retries
4550 for _ in 0..MAX_RETRY_ATTEMPTS {
4551 cx.executor().advance_clock(BASE_RETRY_DELAY);
4552 cx.run_until_parked();
4553 }
4554
4555 let retry_count = thread.update(cx, |thread, _| {
4556 thread
4557 .messages
4558 .iter()
4559 .filter(|m| {
4560 m.ui_only
4561 && m.segments.iter().any(|s| {
4562 if let MessageSegment::Text(text) = s {
4563 text.contains("Retrying") && text.contains("seconds")
4564 } else {
4565 false
4566 }
4567 })
4568 })
4569 .count()
4570 });
4571
4572 // After max retries, should emit Stopped(Err(...)) event
4573 assert_eq!(
4574 retry_count, MAX_RETRY_ATTEMPTS as usize,
4575 "Should have attempted MAX_RETRY_ATTEMPTS retries for overloaded errors"
4576 );
4577 assert!(
4578 *stopped_with_error.lock(),
4579 "Should emit Stopped(Err(...)) event after max retries exceeded"
4580 );
4581
4582 // Retry state should be cleared
4583 thread.read_with(cx, |thread, _| {
4584 assert!(
4585 thread.retry_state.is_none(),
4586 "Retry state should be cleared after max retries"
4587 );
4588
4589 // Verify we have the expected number of retry messages
4590 let retry_messages = thread
4591 .messages
4592 .iter()
4593 .filter(|msg| msg.ui_only && msg.role == Role::System)
4594 .count();
4595 assert_eq!(
4596 retry_messages, MAX_RETRY_ATTEMPTS as usize,
4597 "Should have MAX_RETRY_ATTEMPTS retry messages for overloaded errors"
4598 );
4599 });
4600 }
4601
4602 #[gpui::test]
4603 async fn test_retry_message_removed_on_retry(cx: &mut TestAppContext) {
4604 init_test_settings(cx);
4605
4606 let project = create_test_project(cx, json!({})).await;
4607 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4608
4609 // Enable Burn Mode to allow retries
4610 thread.update(cx, |thread, _| {
4611 thread.set_completion_mode(CompletionMode::Burn);
4612 });
4613
4614 // We'll use a wrapper to switch behavior after first failure
4615 struct RetryTestModel {
4616 inner: Arc<FakeLanguageModel>,
4617 failed_once: Arc<Mutex<bool>>,
4618 }
4619
4620 impl LanguageModel for RetryTestModel {
4621 fn id(&self) -> LanguageModelId {
4622 self.inner.id()
4623 }
4624
4625 fn name(&self) -> LanguageModelName {
4626 self.inner.name()
4627 }
4628
4629 fn provider_id(&self) -> LanguageModelProviderId {
4630 self.inner.provider_id()
4631 }
4632
4633 fn provider_name(&self) -> LanguageModelProviderName {
4634 self.inner.provider_name()
4635 }
4636
4637 fn supports_tools(&self) -> bool {
4638 self.inner.supports_tools()
4639 }
4640
4641 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4642 self.inner.supports_tool_choice(choice)
4643 }
4644
4645 fn supports_images(&self) -> bool {
4646 self.inner.supports_images()
4647 }
4648
4649 fn telemetry_id(&self) -> String {
4650 self.inner.telemetry_id()
4651 }
4652
4653 fn max_token_count(&self) -> u64 {
4654 self.inner.max_token_count()
4655 }
4656
4657 fn count_tokens(
4658 &self,
4659 request: LanguageModelRequest,
4660 cx: &App,
4661 ) -> BoxFuture<'static, Result<u64>> {
4662 self.inner.count_tokens(request, cx)
4663 }
4664
4665 fn stream_completion(
4666 &self,
4667 request: LanguageModelRequest,
4668 cx: &AsyncApp,
4669 ) -> BoxFuture<
4670 'static,
4671 Result<
4672 BoxStream<
4673 'static,
4674 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4675 >,
4676 LanguageModelCompletionError,
4677 >,
4678 > {
4679 if !*self.failed_once.lock() {
4680 *self.failed_once.lock() = true;
4681 let provider = self.provider_name();
4682 // Return error on first attempt
4683 let stream = futures::stream::once(async move {
4684 Err(LanguageModelCompletionError::ServerOverloaded {
4685 provider,
4686 retry_after: None,
4687 })
4688 });
4689 async move { Ok(stream.boxed()) }.boxed()
4690 } else {
4691 // Succeed on retry
4692 self.inner.stream_completion(request, cx)
4693 }
4694 }
4695
4696 fn as_fake(&self) -> &FakeLanguageModel {
4697 &self.inner
4698 }
4699 }
4700
4701 let model = Arc::new(RetryTestModel {
4702 inner: Arc::new(FakeLanguageModel::default()),
4703 failed_once: Arc::new(Mutex::new(false)),
4704 });
4705
4706 // Insert a user message
4707 thread.update(cx, |thread, cx| {
4708 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4709 });
4710
4711 // Track message deletions
4712 // Track when retry completes successfully
4713 let retry_completed = Arc::new(Mutex::new(false));
4714 let retry_completed_clone = retry_completed.clone();
4715
4716 let _subscription = thread.update(cx, |_, cx| {
4717 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4718 if let ThreadEvent::StreamedCompletion = event {
4719 *retry_completed_clone.lock() = true;
4720 }
4721 })
4722 });
4723
4724 // Start completion
4725 thread.update(cx, |thread, cx| {
4726 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4727 });
4728 cx.run_until_parked();
4729
4730 // Get the retry message ID
4731 let retry_message_id = thread.read_with(cx, |thread, _| {
4732 thread
4733 .messages()
4734 .find(|msg| msg.role == Role::System && msg.ui_only)
4735 .map(|msg| msg.id)
4736 .expect("Should have a retry message")
4737 });
4738
4739 // Wait for retry
4740 cx.executor().advance_clock(BASE_RETRY_DELAY);
4741 cx.run_until_parked();
4742
4743 // Stream some successful content
4744 let fake_model = model.as_fake();
4745 // After the retry, there should be a new pending completion
4746 let pending = fake_model.pending_completions();
4747 assert!(
4748 !pending.is_empty(),
4749 "Should have a pending completion after retry"
4750 );
4751 fake_model.stream_completion_response(&pending[0], "Success!");
4752 fake_model.end_completion_stream(&pending[0]);
4753 cx.run_until_parked();
4754
4755 // Check that the retry completed successfully
4756 assert!(
4757 *retry_completed.lock(),
4758 "Retry should have completed successfully"
4759 );
4760
4761 // Retry message should still exist but be marked as ui_only
4762 thread.read_with(cx, |thread, _| {
4763 let retry_msg = thread
4764 .message(retry_message_id)
4765 .expect("Retry message should still exist");
4766 assert!(retry_msg.ui_only, "Retry message should be ui_only");
4767 assert_eq!(
4768 retry_msg.role,
4769 Role::System,
4770 "Retry message should have System role"
4771 );
4772 });
4773 }
4774
4775 #[gpui::test]
4776 async fn test_successful_completion_clears_retry_state(cx: &mut TestAppContext) {
4777 init_test_settings(cx);
4778
4779 let project = create_test_project(cx, json!({})).await;
4780 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4781
4782 // Enable Burn Mode to allow retries
4783 thread.update(cx, |thread, _| {
4784 thread.set_completion_mode(CompletionMode::Burn);
4785 });
4786
4787 // Create a model that fails once then succeeds
4788 struct FailOnceModel {
4789 inner: Arc<FakeLanguageModel>,
4790 failed_once: Arc<Mutex<bool>>,
4791 }
4792
4793 impl LanguageModel for FailOnceModel {
4794 fn id(&self) -> LanguageModelId {
4795 self.inner.id()
4796 }
4797
4798 fn name(&self) -> LanguageModelName {
4799 self.inner.name()
4800 }
4801
4802 fn provider_id(&self) -> LanguageModelProviderId {
4803 self.inner.provider_id()
4804 }
4805
4806 fn provider_name(&self) -> LanguageModelProviderName {
4807 self.inner.provider_name()
4808 }
4809
4810 fn supports_tools(&self) -> bool {
4811 self.inner.supports_tools()
4812 }
4813
4814 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4815 self.inner.supports_tool_choice(choice)
4816 }
4817
4818 fn supports_images(&self) -> bool {
4819 self.inner.supports_images()
4820 }
4821
4822 fn telemetry_id(&self) -> String {
4823 self.inner.telemetry_id()
4824 }
4825
4826 fn max_token_count(&self) -> u64 {
4827 self.inner.max_token_count()
4828 }
4829
4830 fn count_tokens(
4831 &self,
4832 request: LanguageModelRequest,
4833 cx: &App,
4834 ) -> BoxFuture<'static, Result<u64>> {
4835 self.inner.count_tokens(request, cx)
4836 }
4837
4838 fn stream_completion(
4839 &self,
4840 request: LanguageModelRequest,
4841 cx: &AsyncApp,
4842 ) -> BoxFuture<
4843 'static,
4844 Result<
4845 BoxStream<
4846 'static,
4847 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4848 >,
4849 LanguageModelCompletionError,
4850 >,
4851 > {
4852 if !*self.failed_once.lock() {
4853 *self.failed_once.lock() = true;
4854 let provider = self.provider_name();
4855 // Return error on first attempt
4856 let stream = futures::stream::once(async move {
4857 Err(LanguageModelCompletionError::ServerOverloaded {
4858 provider,
4859 retry_after: None,
4860 })
4861 });
4862 async move { Ok(stream.boxed()) }.boxed()
4863 } else {
4864 // Succeed on retry
4865 self.inner.stream_completion(request, cx)
4866 }
4867 }
4868 }
4869
4870 let fail_once_model = Arc::new(FailOnceModel {
4871 inner: Arc::new(FakeLanguageModel::default()),
4872 failed_once: Arc::new(Mutex::new(false)),
4873 });
4874
4875 // Insert a user message
4876 thread.update(cx, |thread, cx| {
4877 thread.insert_user_message(
4878 "Test message",
4879 ContextLoadResult::default(),
4880 None,
4881 vec![],
4882 cx,
4883 );
4884 });
4885
4886 // Start completion with fail-once model
4887 thread.update(cx, |thread, cx| {
4888 thread.send_to_model(
4889 fail_once_model.clone(),
4890 CompletionIntent::UserPrompt,
4891 None,
4892 cx,
4893 );
4894 });
4895
4896 cx.run_until_parked();
4897
4898 // Verify retry state exists after first failure
4899 thread.read_with(cx, |thread, _| {
4900 assert!(
4901 thread.retry_state.is_some(),
4902 "Should have retry state after failure"
4903 );
4904 });
4905
4906 // Wait for retry delay
4907 cx.executor().advance_clock(BASE_RETRY_DELAY);
4908 cx.run_until_parked();
4909
4910 // The retry should now use our FailOnceModel which should succeed
4911 // We need to help the FakeLanguageModel complete the stream
4912 let inner_fake = fail_once_model.inner.clone();
4913
4914 // Wait a bit for the retry to start
4915 cx.run_until_parked();
4916
4917 // Check for pending completions and complete them
4918 if let Some(pending) = inner_fake.pending_completions().first() {
4919 inner_fake.stream_completion_response(pending, "Success!");
4920 inner_fake.end_completion_stream(pending);
4921 }
4922 cx.run_until_parked();
4923
4924 thread.read_with(cx, |thread, _| {
4925 assert!(
4926 thread.retry_state.is_none(),
4927 "Retry state should be cleared after successful completion"
4928 );
4929
4930 let has_assistant_message = thread
4931 .messages
4932 .iter()
4933 .any(|msg| msg.role == Role::Assistant && !msg.ui_only);
4934 assert!(
4935 has_assistant_message,
4936 "Should have an assistant message after successful retry"
4937 );
4938 });
4939 }
4940
4941 #[gpui::test]
4942 async fn test_rate_limit_retry_single_attempt(cx: &mut TestAppContext) {
4943 init_test_settings(cx);
4944
4945 let project = create_test_project(cx, json!({})).await;
4946 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4947
4948 // Enable Burn Mode to allow retries
4949 thread.update(cx, |thread, _| {
4950 thread.set_completion_mode(CompletionMode::Burn);
4951 });
4952
4953 // Create a model that returns rate limit error with retry_after
4954 struct RateLimitModel {
4955 inner: Arc<FakeLanguageModel>,
4956 }
4957
4958 impl LanguageModel for RateLimitModel {
4959 fn id(&self) -> LanguageModelId {
4960 self.inner.id()
4961 }
4962
4963 fn name(&self) -> LanguageModelName {
4964 self.inner.name()
4965 }
4966
4967 fn provider_id(&self) -> LanguageModelProviderId {
4968 self.inner.provider_id()
4969 }
4970
4971 fn provider_name(&self) -> LanguageModelProviderName {
4972 self.inner.provider_name()
4973 }
4974
4975 fn supports_tools(&self) -> bool {
4976 self.inner.supports_tools()
4977 }
4978
4979 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4980 self.inner.supports_tool_choice(choice)
4981 }
4982
4983 fn supports_images(&self) -> bool {
4984 self.inner.supports_images()
4985 }
4986
4987 fn telemetry_id(&self) -> String {
4988 self.inner.telemetry_id()
4989 }
4990
4991 fn max_token_count(&self) -> u64 {
4992 self.inner.max_token_count()
4993 }
4994
4995 fn count_tokens(
4996 &self,
4997 request: LanguageModelRequest,
4998 cx: &App,
4999 ) -> BoxFuture<'static, Result<u64>> {
5000 self.inner.count_tokens(request, cx)
5001 }
5002
5003 fn stream_completion(
5004 &self,
5005 _request: LanguageModelRequest,
5006 _cx: &AsyncApp,
5007 ) -> BoxFuture<
5008 'static,
5009 Result<
5010 BoxStream<
5011 'static,
5012 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
5013 >,
5014 LanguageModelCompletionError,
5015 >,
5016 > {
5017 let provider = self.provider_name();
5018 async move {
5019 let stream = futures::stream::once(async move {
5020 Err(LanguageModelCompletionError::RateLimitExceeded {
5021 provider,
5022 retry_after: Some(Duration::from_secs(TEST_RATE_LIMIT_RETRY_SECS)),
5023 })
5024 });
5025 Ok(stream.boxed())
5026 }
5027 .boxed()
5028 }
5029
5030 fn as_fake(&self) -> &FakeLanguageModel {
5031 &self.inner
5032 }
5033 }
5034
5035 let model = Arc::new(RateLimitModel {
5036 inner: Arc::new(FakeLanguageModel::default()),
5037 });
5038
5039 // Insert a user message
5040 thread.update(cx, |thread, cx| {
5041 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5042 });
5043
5044 // Start completion
5045 thread.update(cx, |thread, cx| {
5046 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5047 });
5048
5049 cx.run_until_parked();
5050
5051 let retry_count = thread.update(cx, |thread, _| {
5052 thread
5053 .messages
5054 .iter()
5055 .filter(|m| {
5056 m.ui_only
5057 && m.segments.iter().any(|s| {
5058 if let MessageSegment::Text(text) = s {
5059 text.contains("rate limit exceeded")
5060 } else {
5061 false
5062 }
5063 })
5064 })
5065 .count()
5066 });
5067 assert_eq!(retry_count, 1, "Should have scheduled one retry");
5068
5069 thread.read_with(cx, |thread, _| {
5070 assert!(
5071 thread.retry_state.is_some(),
5072 "Rate limit errors should set retry_state"
5073 );
5074 if let Some(retry_state) = &thread.retry_state {
5075 assert_eq!(
5076 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
5077 "Rate limit errors should use MAX_RETRY_ATTEMPTS"
5078 );
5079 }
5080 });
5081
5082 // Verify we have one retry message
5083 thread.read_with(cx, |thread, _| {
5084 let retry_messages = thread
5085 .messages
5086 .iter()
5087 .filter(|msg| {
5088 msg.ui_only
5089 && msg.segments.iter().any(|seg| {
5090 if let MessageSegment::Text(text) = seg {
5091 text.contains("rate limit exceeded")
5092 } else {
5093 false
5094 }
5095 })
5096 })
5097 .count();
5098 assert_eq!(
5099 retry_messages, 1,
5100 "Should have one rate limit retry message"
5101 );
5102 });
5103
5104 // Check that retry message doesn't include attempt count
5105 thread.read_with(cx, |thread, _| {
5106 let retry_message = thread
5107 .messages
5108 .iter()
5109 .find(|msg| msg.role == Role::System && msg.ui_only)
5110 .expect("Should have a retry message");
5111
5112 // Check that the message contains attempt count since we use retry_state
5113 if let Some(MessageSegment::Text(text)) = retry_message.segments.first() {
5114 assert!(
5115 text.contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS)),
5116 "Rate limit retry message should contain attempt count with MAX_RETRY_ATTEMPTS"
5117 );
5118 assert!(
5119 text.contains("Retrying"),
5120 "Rate limit retry message should contain retry text"
5121 );
5122 }
5123 });
5124 }
5125
5126 #[gpui::test]
5127 async fn test_ui_only_messages_not_sent_to_model(cx: &mut TestAppContext) {
5128 init_test_settings(cx);
5129
5130 let project = create_test_project(cx, json!({})).await;
5131 let (_, _, thread, _, model) = setup_test_environment(cx, project.clone()).await;
5132
5133 // Insert a regular user message
5134 thread.update(cx, |thread, cx| {
5135 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5136 });
5137
5138 // Insert a UI-only message (like our retry notifications)
5139 thread.update(cx, |thread, cx| {
5140 let id = thread.next_message_id.post_inc();
5141 thread.messages.push(Message {
5142 id,
5143 role: Role::System,
5144 segments: vec![MessageSegment::Text(
5145 "This is a UI-only message that should not be sent to the model".to_string(),
5146 )],
5147 loaded_context: LoadedContext::default(),
5148 creases: Vec::new(),
5149 is_hidden: true,
5150 ui_only: true,
5151 });
5152 cx.emit(ThreadEvent::MessageAdded(id));
5153 });
5154
5155 // Insert another regular message
5156 thread.update(cx, |thread, cx| {
5157 thread.insert_user_message(
5158 "How are you?",
5159 ContextLoadResult::default(),
5160 None,
5161 vec![],
5162 cx,
5163 );
5164 });
5165
5166 // Generate the completion request
5167 let request = thread.update(cx, |thread, cx| {
5168 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
5169 });
5170
5171 // Verify that the request only contains non-UI-only messages
5172 // Should have system prompt + 2 user messages, but not the UI-only message
5173 let user_messages: Vec<_> = request
5174 .messages
5175 .iter()
5176 .filter(|msg| msg.role == Role::User)
5177 .collect();
5178 assert_eq!(
5179 user_messages.len(),
5180 2,
5181 "Should have exactly 2 user messages"
5182 );
5183
5184 // Verify the UI-only content is not present anywhere in the request
5185 let request_text = request
5186 .messages
5187 .iter()
5188 .flat_map(|msg| &msg.content)
5189 .filter_map(|content| match content {
5190 MessageContent::Text(text) => Some(text.as_str()),
5191 _ => None,
5192 })
5193 .collect::<String>();
5194
5195 assert!(
5196 !request_text.contains("UI-only message"),
5197 "UI-only message content should not be in the request"
5198 );
5199
5200 // Verify the thread still has all 3 messages (including UI-only)
5201 thread.read_with(cx, |thread, _| {
5202 assert_eq!(
5203 thread.messages().count(),
5204 3,
5205 "Thread should have 3 messages"
5206 );
5207 assert_eq!(
5208 thread.messages().filter(|m| m.ui_only).count(),
5209 1,
5210 "Thread should have 1 UI-only message"
5211 );
5212 });
5213
5214 // Verify that UI-only messages are not serialized
5215 let serialized = thread
5216 .update(cx, |thread, cx| thread.serialize(cx))
5217 .await
5218 .unwrap();
5219 assert_eq!(
5220 serialized.messages.len(),
5221 2,
5222 "Serialized thread should only have 2 messages (no UI-only)"
5223 );
5224 }
5225
5226 #[gpui::test]
5227 async fn test_no_retry_without_burn_mode(cx: &mut TestAppContext) {
5228 init_test_settings(cx);
5229
5230 let project = create_test_project(cx, json!({})).await;
5231 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5232
5233 // Ensure we're in Normal mode (not Burn mode)
5234 thread.update(cx, |thread, _| {
5235 thread.set_completion_mode(CompletionMode::Normal);
5236 });
5237
5238 // Track error events
5239 let error_events = Arc::new(Mutex::new(Vec::new()));
5240 let error_events_clone = error_events.clone();
5241
5242 let _subscription = thread.update(cx, |_, cx| {
5243 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
5244 if let ThreadEvent::ShowError(error) = event {
5245 error_events_clone.lock().push(error.clone());
5246 }
5247 })
5248 });
5249
5250 // Create model that returns overloaded error
5251 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5252
5253 // Insert a user message
5254 thread.update(cx, |thread, cx| {
5255 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5256 });
5257
5258 // Start completion
5259 thread.update(cx, |thread, cx| {
5260 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5261 });
5262
5263 cx.run_until_parked();
5264
5265 // Verify no retry state was created
5266 thread.read_with(cx, |thread, _| {
5267 assert!(
5268 thread.retry_state.is_none(),
5269 "Should not have retry state in Normal mode"
5270 );
5271 });
5272
5273 // Check that a retryable error was reported
5274 let errors = error_events.lock();
5275 assert!(!errors.is_empty(), "Should have received an error event");
5276
5277 if let ThreadError::RetryableError {
5278 message: _,
5279 can_enable_burn_mode,
5280 } = &errors[0]
5281 {
5282 assert!(
5283 *can_enable_burn_mode,
5284 "Error should indicate burn mode can be enabled"
5285 );
5286 } else {
5287 panic!("Expected RetryableError, got {:?}", errors[0]);
5288 }
5289
5290 // Verify the thread is no longer generating
5291 thread.read_with(cx, |thread, _| {
5292 assert!(
5293 !thread.is_generating(),
5294 "Should not be generating after error without retry"
5295 );
5296 });
5297 }
5298
5299 #[gpui::test]
5300 async fn test_retry_cancelled_on_stop(cx: &mut TestAppContext) {
5301 init_test_settings(cx);
5302
5303 let project = create_test_project(cx, json!({})).await;
5304 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5305
5306 // Enable Burn Mode to allow retries
5307 thread.update(cx, |thread, _| {
5308 thread.set_completion_mode(CompletionMode::Burn);
5309 });
5310
5311 // Create model that returns overloaded error
5312 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5313
5314 // Insert a user message
5315 thread.update(cx, |thread, cx| {
5316 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5317 });
5318
5319 // Start completion
5320 thread.update(cx, |thread, cx| {
5321 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5322 });
5323
5324 cx.run_until_parked();
5325
5326 // Verify retry was scheduled by checking for retry message
5327 let has_retry_message = thread.read_with(cx, |thread, _| {
5328 thread.messages.iter().any(|m| {
5329 m.ui_only
5330 && m.segments.iter().any(|s| {
5331 if let MessageSegment::Text(text) = s {
5332 text.contains("Retrying") && text.contains("seconds")
5333 } else {
5334 false
5335 }
5336 })
5337 })
5338 });
5339 assert!(has_retry_message, "Should have scheduled a retry");
5340
5341 // Cancel the completion before the retry happens
5342 thread.update(cx, |thread, cx| {
5343 thread.cancel_last_completion(None, cx);
5344 });
5345
5346 cx.run_until_parked();
5347
5348 // The retry should not have happened - no pending completions
5349 let fake_model = model.as_fake();
5350 assert_eq!(
5351 fake_model.pending_completions().len(),
5352 0,
5353 "Should have no pending completions after cancellation"
5354 );
5355
5356 // Verify the retry was cancelled by checking retry state
5357 thread.read_with(cx, |thread, _| {
5358 if let Some(retry_state) = &thread.retry_state {
5359 panic!(
5360 "retry_state should be cleared after cancellation, but found: attempt={}, max_attempts={}, intent={:?}",
5361 retry_state.attempt, retry_state.max_attempts, retry_state.intent
5362 );
5363 }
5364 });
5365 }
5366
5367 fn test_summarize_error(
5368 model: &Arc<dyn LanguageModel>,
5369 thread: &Entity<Thread>,
5370 cx: &mut TestAppContext,
5371 ) {
5372 thread.update(cx, |thread, cx| {
5373 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
5374 thread.send_to_model(
5375 model.clone(),
5376 CompletionIntent::ThreadSummarization,
5377 None,
5378 cx,
5379 );
5380 });
5381
5382 let fake_model = model.as_fake();
5383 simulate_successful_response(&fake_model, cx);
5384
5385 thread.read_with(cx, |thread, _| {
5386 assert!(matches!(thread.summary(), ThreadSummary::Generating));
5387 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5388 });
5389
5390 // Simulate summary request ending
5391 cx.run_until_parked();
5392 fake_model.end_last_completion_stream();
5393 cx.run_until_parked();
5394
5395 // State is set to Error and default message
5396 thread.read_with(cx, |thread, _| {
5397 assert!(matches!(thread.summary(), ThreadSummary::Error));
5398 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5399 });
5400 }
5401
5402 fn simulate_successful_response(fake_model: &FakeLanguageModel, cx: &mut TestAppContext) {
5403 cx.run_until_parked();
5404 fake_model.stream_last_completion_response("Assistant response");
5405 fake_model.end_last_completion_stream();
5406 cx.run_until_parked();
5407 }
5408
5409 fn init_test_settings(cx: &mut TestAppContext) {
5410 cx.update(|cx| {
5411 let settings_store = SettingsStore::test(cx);
5412 cx.set_global(settings_store);
5413 language::init(cx);
5414 Project::init_settings(cx);
5415 AgentSettings::register(cx);
5416 prompt_store::init(cx);
5417 thread_store::init(cx);
5418 workspace::init_settings(cx);
5419 language_model::init_settings(cx);
5420 ThemeSettings::register(cx);
5421 ToolRegistry::default_global(cx);
5422 assistant_tool::init(cx);
5423
5424 let http_client = Arc::new(http_client::HttpClientWithUrl::new(
5425 http_client::FakeHttpClient::with_200_response(),
5426 "http://localhost".to_string(),
5427 None,
5428 ));
5429 assistant_tools::init(http_client, cx);
5430 });
5431 }
5432
5433 // Helper to create a test project with test files
5434 async fn create_test_project(
5435 cx: &mut TestAppContext,
5436 files: serde_json::Value,
5437 ) -> Entity<Project> {
5438 let fs = FakeFs::new(cx.executor());
5439 fs.insert_tree(path!("/test"), files).await;
5440 Project::test(fs, [path!("/test").as_ref()], cx).await
5441 }
5442
5443 async fn setup_test_environment(
5444 cx: &mut TestAppContext,
5445 project: Entity<Project>,
5446 ) -> (
5447 Entity<Workspace>,
5448 Entity<ThreadStore>,
5449 Entity<Thread>,
5450 Entity<ContextStore>,
5451 Arc<dyn LanguageModel>,
5452 ) {
5453 let (workspace, cx) =
5454 cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
5455
5456 let thread_store = cx
5457 .update(|_, cx| {
5458 ThreadStore::load(
5459 project.clone(),
5460 cx.new(|_| ToolWorkingSet::default()),
5461 None,
5462 Arc::new(PromptBuilder::new(None).unwrap()),
5463 cx,
5464 )
5465 })
5466 .await
5467 .unwrap();
5468
5469 let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
5470 let context_store = cx.new(|_cx| ContextStore::new(project.downgrade(), None));
5471
5472 let provider = Arc::new(FakeLanguageModelProvider);
5473 let model = provider.test_model();
5474 let model: Arc<dyn LanguageModel> = Arc::new(model);
5475
5476 cx.update(|_, cx| {
5477 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
5478 registry.set_default_model(
5479 Some(ConfiguredModel {
5480 provider: provider.clone(),
5481 model: model.clone(),
5482 }),
5483 cx,
5484 );
5485 registry.set_thread_summary_model(
5486 Some(ConfiguredModel {
5487 provider,
5488 model: model.clone(),
5489 }),
5490 cx,
5491 );
5492 })
5493 });
5494
5495 (workspace, thread_store, thread, context_store, model)
5496 }
5497
5498 async fn add_file_to_context(
5499 project: &Entity<Project>,
5500 context_store: &Entity<ContextStore>,
5501 path: &str,
5502 cx: &mut TestAppContext,
5503 ) -> Result<Entity<language::Buffer>> {
5504 let buffer_path = project
5505 .read_with(cx, |project, cx| project.find_project_path(path, cx))
5506 .unwrap();
5507
5508 let buffer = project
5509 .update(cx, |project, cx| {
5510 project.open_buffer(buffer_path.clone(), cx)
5511 })
5512 .await
5513 .unwrap();
5514
5515 context_store.update(cx, |context_store, cx| {
5516 context_store.add_file_from_buffer(&buffer_path, buffer.clone(), false, cx);
5517 });
5518
5519 Ok(buffer)
5520 }
5521}