1use crate::{
2 agent_profile::AgentProfile,
3 context::{AgentContext, AgentContextHandle, ContextLoadResult, LoadedContext},
4 thread_store::{
5 SerializedCrease, SerializedLanguageModel, SerializedMessage, SerializedMessageSegment,
6 SerializedThread, SerializedToolResult, SerializedToolUse, SharedProjectContext,
7 ThreadStore,
8 },
9 tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState},
10};
11use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
12use anyhow::{Result, anyhow};
13use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolWorkingSet};
14use chrono::{DateTime, Utc};
15use client::{ModelRequestUsage, RequestUsage};
16use collections::HashMap;
17use feature_flags::{self, FeatureFlagAppExt};
18use futures::{FutureExt, StreamExt as _, future::Shared};
19use git::repository::DiffType;
20use gpui::{
21 AnyWindowHandle, App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task,
22 WeakEntity, Window,
23};
24use http_client::StatusCode;
25use language_model::{
26 ConfiguredModel, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
27 LanguageModelExt as _, LanguageModelId, LanguageModelRegistry, LanguageModelRequest,
28 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
29 LanguageModelToolResultContent, LanguageModelToolUse, LanguageModelToolUseId, MessageContent,
30 ModelRequestLimitReachedError, PaymentRequiredError, Role, SelectedModel, StopReason,
31 TokenUsage,
32};
33use postage::stream::Stream as _;
34use project::{
35 Project,
36 git_store::{GitStore, GitStoreCheckpoint, RepositoryState},
37};
38use prompt_store::{ModelContext, PromptBuilder};
39use proto::Plan;
40use schemars::JsonSchema;
41use serde::{Deserialize, Serialize};
42use settings::Settings;
43use std::{
44 io::Write,
45 ops::Range,
46 sync::Arc,
47 time::{Duration, Instant},
48};
49use thiserror::Error;
50use util::{ResultExt as _, post_inc};
51use uuid::Uuid;
52use zed_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
53
54const MAX_RETRY_ATTEMPTS: u8 = 4;
55const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
56
57#[derive(Debug, Clone)]
58enum RetryStrategy {
59 ExponentialBackoff {
60 initial_delay: Duration,
61 max_attempts: u8,
62 },
63 Fixed {
64 delay: Duration,
65 max_attempts: u8,
66 },
67}
68
69#[derive(
70 Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
71)]
72pub struct ThreadId(Arc<str>);
73
74impl ThreadId {
75 pub fn new() -> Self {
76 Self(Uuid::new_v4().to_string().into())
77 }
78}
79
80impl std::fmt::Display for ThreadId {
81 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
82 write!(f, "{}", self.0)
83 }
84}
85
86impl From<&str> for ThreadId {
87 fn from(value: &str) -> Self {
88 Self(value.into())
89 }
90}
91
92/// The ID of the user prompt that initiated a request.
93///
94/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
95#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
96pub struct PromptId(Arc<str>);
97
98impl PromptId {
99 pub fn new() -> Self {
100 Self(Uuid::new_v4().to_string().into())
101 }
102}
103
104impl std::fmt::Display for PromptId {
105 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
106 write!(f, "{}", self.0)
107 }
108}
109
110#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
111pub struct MessageId(pub(crate) usize);
112
113impl MessageId {
114 fn post_inc(&mut self) -> Self {
115 Self(post_inc(&mut self.0))
116 }
117
118 pub fn as_usize(&self) -> usize {
119 self.0
120 }
121}
122
123/// Stored information that can be used to resurrect a context crease when creating an editor for a past message.
124#[derive(Clone, Debug)]
125pub struct MessageCrease {
126 pub range: Range<usize>,
127 pub icon_path: SharedString,
128 pub label: SharedString,
129 /// None for a deserialized message, Some otherwise.
130 pub context: Option<AgentContextHandle>,
131}
132
133/// A message in a [`Thread`].
134#[derive(Debug, Clone)]
135pub struct Message {
136 pub id: MessageId,
137 pub role: Role,
138 pub segments: Vec<MessageSegment>,
139 pub loaded_context: LoadedContext,
140 pub creases: Vec<MessageCrease>,
141 pub is_hidden: bool,
142 pub ui_only: bool,
143}
144
145impl Message {
146 /// Returns whether the message contains any meaningful text that should be displayed
147 /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
148 pub fn should_display_content(&self) -> bool {
149 self.segments.iter().all(|segment| segment.should_display())
150 }
151
152 pub fn push_thinking(&mut self, text: &str, signature: Option<String>) {
153 if let Some(MessageSegment::Thinking {
154 text: segment,
155 signature: current_signature,
156 }) = self.segments.last_mut()
157 {
158 if let Some(signature) = signature {
159 *current_signature = Some(signature);
160 }
161 segment.push_str(text);
162 } else {
163 self.segments.push(MessageSegment::Thinking {
164 text: text.to_string(),
165 signature,
166 });
167 }
168 }
169
170 pub fn push_redacted_thinking(&mut self, data: String) {
171 self.segments.push(MessageSegment::RedactedThinking(data));
172 }
173
174 pub fn push_text(&mut self, text: &str) {
175 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
176 segment.push_str(text);
177 } else {
178 self.segments.push(MessageSegment::Text(text.to_string()));
179 }
180 }
181
182 pub fn to_string(&self) -> String {
183 let mut result = String::new();
184
185 if !self.loaded_context.text.is_empty() {
186 result.push_str(&self.loaded_context.text);
187 }
188
189 for segment in &self.segments {
190 match segment {
191 MessageSegment::Text(text) => result.push_str(text),
192 MessageSegment::Thinking { text, .. } => {
193 result.push_str("<think>\n");
194 result.push_str(text);
195 result.push_str("\n</think>");
196 }
197 MessageSegment::RedactedThinking(_) => {}
198 }
199 }
200
201 result
202 }
203}
204
205#[derive(Debug, Clone, PartialEq, Eq)]
206pub enum MessageSegment {
207 Text(String),
208 Thinking {
209 text: String,
210 signature: Option<String>,
211 },
212 RedactedThinking(String),
213}
214
215impl MessageSegment {
216 pub fn should_display(&self) -> bool {
217 match self {
218 Self::Text(text) => text.is_empty(),
219 Self::Thinking { text, .. } => text.is_empty(),
220 Self::RedactedThinking(_) => false,
221 }
222 }
223
224 pub fn text(&self) -> Option<&str> {
225 match self {
226 MessageSegment::Text(text) => Some(text),
227 _ => None,
228 }
229 }
230}
231
232#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
233pub struct ProjectSnapshot {
234 pub worktree_snapshots: Vec<WorktreeSnapshot>,
235 pub unsaved_buffer_paths: Vec<String>,
236 pub timestamp: DateTime<Utc>,
237}
238
239#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
240pub struct WorktreeSnapshot {
241 pub worktree_path: String,
242 pub git_state: Option<GitState>,
243}
244
245#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
246pub struct GitState {
247 pub remote_url: Option<String>,
248 pub head_sha: Option<String>,
249 pub current_branch: Option<String>,
250 pub diff: Option<String>,
251}
252
253#[derive(Clone, Debug)]
254pub struct ThreadCheckpoint {
255 message_id: MessageId,
256 git_checkpoint: GitStoreCheckpoint,
257}
258
259#[derive(Copy, Clone, Debug, PartialEq, Eq)]
260pub enum ThreadFeedback {
261 Positive,
262 Negative,
263}
264
265pub enum LastRestoreCheckpoint {
266 Pending {
267 message_id: MessageId,
268 },
269 Error {
270 message_id: MessageId,
271 error: String,
272 },
273}
274
275impl LastRestoreCheckpoint {
276 pub fn message_id(&self) -> MessageId {
277 match self {
278 LastRestoreCheckpoint::Pending { message_id } => *message_id,
279 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
280 }
281 }
282}
283
284#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
285pub enum DetailedSummaryState {
286 #[default]
287 NotGenerated,
288 Generating {
289 message_id: MessageId,
290 },
291 Generated {
292 text: SharedString,
293 message_id: MessageId,
294 },
295}
296
297impl DetailedSummaryState {
298 fn text(&self) -> Option<SharedString> {
299 if let Self::Generated { text, .. } = self {
300 Some(text.clone())
301 } else {
302 None
303 }
304 }
305}
306
307#[derive(Default, Debug)]
308pub struct TotalTokenUsage {
309 pub total: u64,
310 pub max: u64,
311}
312
313impl TotalTokenUsage {
314 pub fn ratio(&self) -> TokenUsageRatio {
315 #[cfg(debug_assertions)]
316 let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
317 .unwrap_or("0.8".to_string())
318 .parse()
319 .unwrap();
320 #[cfg(not(debug_assertions))]
321 let warning_threshold: f32 = 0.8;
322
323 // When the maximum is unknown because there is no selected model,
324 // avoid showing the token limit warning.
325 if self.max == 0 {
326 TokenUsageRatio::Normal
327 } else if self.total >= self.max {
328 TokenUsageRatio::Exceeded
329 } else if self.total as f32 / self.max as f32 >= warning_threshold {
330 TokenUsageRatio::Warning
331 } else {
332 TokenUsageRatio::Normal
333 }
334 }
335
336 pub fn add(&self, tokens: u64) -> TotalTokenUsage {
337 TotalTokenUsage {
338 total: self.total + tokens,
339 max: self.max,
340 }
341 }
342}
343
344#[derive(Debug, Default, PartialEq, Eq)]
345pub enum TokenUsageRatio {
346 #[default]
347 Normal,
348 Warning,
349 Exceeded,
350}
351
352#[derive(Debug, Clone, Copy)]
353pub enum QueueState {
354 Sending,
355 Queued { position: usize },
356 Started,
357}
358
359/// A thread of conversation with the LLM.
360pub struct Thread {
361 id: ThreadId,
362 updated_at: DateTime<Utc>,
363 summary: ThreadSummary,
364 pending_summary: Task<Option<()>>,
365 detailed_summary_task: Task<Option<()>>,
366 detailed_summary_tx: postage::watch::Sender<DetailedSummaryState>,
367 detailed_summary_rx: postage::watch::Receiver<DetailedSummaryState>,
368 completion_mode: agent_settings::CompletionMode,
369 messages: Vec<Message>,
370 next_message_id: MessageId,
371 last_prompt_id: PromptId,
372 project_context: SharedProjectContext,
373 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
374 completion_count: usize,
375 pending_completions: Vec<PendingCompletion>,
376 project: Entity<Project>,
377 prompt_builder: Arc<PromptBuilder>,
378 tools: Entity<ToolWorkingSet>,
379 tool_use: ToolUseState,
380 action_log: Entity<ActionLog>,
381 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
382 pending_checkpoint: Option<ThreadCheckpoint>,
383 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
384 request_token_usage: Vec<TokenUsage>,
385 cumulative_token_usage: TokenUsage,
386 exceeded_window_error: Option<ExceededWindowError>,
387 tool_use_limit_reached: bool,
388 feedback: Option<ThreadFeedback>,
389 retry_state: Option<RetryState>,
390 message_feedback: HashMap<MessageId, ThreadFeedback>,
391 last_auto_capture_at: Option<Instant>,
392 last_received_chunk_at: Option<Instant>,
393 request_callback: Option<
394 Box<dyn FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>])>,
395 >,
396 remaining_turns: u32,
397 configured_model: Option<ConfiguredModel>,
398 profile: AgentProfile,
399 last_error_context: Option<(Arc<dyn LanguageModel>, CompletionIntent)>,
400}
401
402#[derive(Clone, Debug)]
403struct RetryState {
404 attempt: u8,
405 max_attempts: u8,
406 intent: CompletionIntent,
407}
408
409#[derive(Clone, Debug, PartialEq, Eq)]
410pub enum ThreadSummary {
411 Pending,
412 Generating,
413 Ready(SharedString),
414 Error,
415}
416
417impl ThreadSummary {
418 pub const DEFAULT: SharedString = SharedString::new_static("New Thread");
419
420 pub fn or_default(&self) -> SharedString {
421 self.unwrap_or(Self::DEFAULT)
422 }
423
424 pub fn unwrap_or(&self, message: impl Into<SharedString>) -> SharedString {
425 self.ready().unwrap_or_else(|| message.into())
426 }
427
428 pub fn ready(&self) -> Option<SharedString> {
429 match self {
430 ThreadSummary::Ready(summary) => Some(summary.clone()),
431 ThreadSummary::Pending | ThreadSummary::Generating | ThreadSummary::Error => None,
432 }
433 }
434}
435
436#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
437pub struct ExceededWindowError {
438 /// Model used when last message exceeded context window
439 model_id: LanguageModelId,
440 /// Token count including last message
441 token_count: u64,
442}
443
444impl Thread {
445 pub fn new(
446 project: Entity<Project>,
447 tools: Entity<ToolWorkingSet>,
448 prompt_builder: Arc<PromptBuilder>,
449 system_prompt: SharedProjectContext,
450 cx: &mut Context<Self>,
451 ) -> Self {
452 let (detailed_summary_tx, detailed_summary_rx) = postage::watch::channel();
453 let configured_model = LanguageModelRegistry::read_global(cx).default_model();
454 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
455
456 Self {
457 id: ThreadId::new(),
458 updated_at: Utc::now(),
459 summary: ThreadSummary::Pending,
460 pending_summary: Task::ready(None),
461 detailed_summary_task: Task::ready(None),
462 detailed_summary_tx,
463 detailed_summary_rx,
464 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
465 messages: Vec::new(),
466 next_message_id: MessageId(0),
467 last_prompt_id: PromptId::new(),
468 project_context: system_prompt,
469 checkpoints_by_message: HashMap::default(),
470 completion_count: 0,
471 pending_completions: Vec::new(),
472 project: project.clone(),
473 prompt_builder,
474 tools: tools.clone(),
475 last_restore_checkpoint: None,
476 pending_checkpoint: None,
477 tool_use: ToolUseState::new(tools.clone()),
478 action_log: cx.new(|_| ActionLog::new(project.clone())),
479 initial_project_snapshot: {
480 let project_snapshot = Self::project_snapshot(project, cx);
481 cx.foreground_executor()
482 .spawn(async move { Some(project_snapshot.await) })
483 .shared()
484 },
485 request_token_usage: Vec::new(),
486 cumulative_token_usage: TokenUsage::default(),
487 exceeded_window_error: None,
488 tool_use_limit_reached: false,
489 feedback: None,
490 retry_state: None,
491 message_feedback: HashMap::default(),
492 last_auto_capture_at: None,
493 last_error_context: None,
494 last_received_chunk_at: None,
495 request_callback: None,
496 remaining_turns: u32::MAX,
497 configured_model: configured_model.clone(),
498 profile: AgentProfile::new(profile_id, tools),
499 }
500 }
501
502 pub fn deserialize(
503 id: ThreadId,
504 serialized: SerializedThread,
505 project: Entity<Project>,
506 tools: Entity<ToolWorkingSet>,
507 prompt_builder: Arc<PromptBuilder>,
508 project_context: SharedProjectContext,
509 window: Option<&mut Window>, // None in headless mode
510 cx: &mut Context<Self>,
511 ) -> Self {
512 let next_message_id = MessageId(
513 serialized
514 .messages
515 .last()
516 .map(|message| message.id.0 + 1)
517 .unwrap_or(0),
518 );
519 let tool_use = ToolUseState::from_serialized_messages(
520 tools.clone(),
521 &serialized.messages,
522 project.clone(),
523 window,
524 cx,
525 );
526 let (detailed_summary_tx, detailed_summary_rx) =
527 postage::watch::channel_with(serialized.detailed_summary_state);
528
529 let configured_model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
530 serialized
531 .model
532 .and_then(|model| {
533 let model = SelectedModel {
534 provider: model.provider.clone().into(),
535 model: model.model.clone().into(),
536 };
537 registry.select_model(&model, cx)
538 })
539 .or_else(|| registry.default_model())
540 });
541
542 let completion_mode = serialized
543 .completion_mode
544 .unwrap_or_else(|| AgentSettings::get_global(cx).preferred_completion_mode);
545 let profile_id = serialized
546 .profile
547 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
548
549 Self {
550 id,
551 updated_at: serialized.updated_at,
552 summary: ThreadSummary::Ready(serialized.summary),
553 pending_summary: Task::ready(None),
554 detailed_summary_task: Task::ready(None),
555 detailed_summary_tx,
556 detailed_summary_rx,
557 completion_mode,
558 retry_state: None,
559 messages: serialized
560 .messages
561 .into_iter()
562 .map(|message| Message {
563 id: message.id,
564 role: message.role,
565 segments: message
566 .segments
567 .into_iter()
568 .map(|segment| match segment {
569 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
570 SerializedMessageSegment::Thinking { text, signature } => {
571 MessageSegment::Thinking { text, signature }
572 }
573 SerializedMessageSegment::RedactedThinking { data } => {
574 MessageSegment::RedactedThinking(data)
575 }
576 })
577 .collect(),
578 loaded_context: LoadedContext {
579 contexts: Vec::new(),
580 text: message.context,
581 images: Vec::new(),
582 },
583 creases: message
584 .creases
585 .into_iter()
586 .map(|crease| MessageCrease {
587 range: crease.start..crease.end,
588 icon_path: crease.icon_path,
589 label: crease.label,
590 context: None,
591 })
592 .collect(),
593 is_hidden: message.is_hidden,
594 ui_only: false, // UI-only messages are not persisted
595 })
596 .collect(),
597 next_message_id,
598 last_prompt_id: PromptId::new(),
599 project_context,
600 checkpoints_by_message: HashMap::default(),
601 completion_count: 0,
602 pending_completions: Vec::new(),
603 last_restore_checkpoint: None,
604 pending_checkpoint: None,
605 project: project.clone(),
606 prompt_builder,
607 tools: tools.clone(),
608 tool_use,
609 action_log: cx.new(|_| ActionLog::new(project)),
610 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
611 request_token_usage: serialized.request_token_usage,
612 cumulative_token_usage: serialized.cumulative_token_usage,
613 exceeded_window_error: None,
614 tool_use_limit_reached: serialized.tool_use_limit_reached,
615 feedback: None,
616 message_feedback: HashMap::default(),
617 last_auto_capture_at: None,
618 last_error_context: None,
619 last_received_chunk_at: None,
620 request_callback: None,
621 remaining_turns: u32::MAX,
622 configured_model,
623 profile: AgentProfile::new(profile_id, tools),
624 }
625 }
626
627 pub fn set_request_callback(
628 &mut self,
629 callback: impl 'static
630 + FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>]),
631 ) {
632 self.request_callback = Some(Box::new(callback));
633 }
634
635 pub fn id(&self) -> &ThreadId {
636 &self.id
637 }
638
639 pub fn profile(&self) -> &AgentProfile {
640 &self.profile
641 }
642
643 pub fn set_profile(&mut self, id: AgentProfileId, cx: &mut Context<Self>) {
644 if &id != self.profile.id() {
645 self.profile = AgentProfile::new(id, self.tools.clone());
646 cx.emit(ThreadEvent::ProfileChanged);
647 }
648 }
649
650 pub fn is_empty(&self) -> bool {
651 self.messages.is_empty()
652 }
653
654 pub fn updated_at(&self) -> DateTime<Utc> {
655 self.updated_at
656 }
657
658 pub fn touch_updated_at(&mut self) {
659 self.updated_at = Utc::now();
660 }
661
662 pub fn advance_prompt_id(&mut self) {
663 self.last_prompt_id = PromptId::new();
664 }
665
666 pub fn project_context(&self) -> SharedProjectContext {
667 self.project_context.clone()
668 }
669
670 pub fn get_or_init_configured_model(&mut self, cx: &App) -> Option<ConfiguredModel> {
671 if self.configured_model.is_none() {
672 self.configured_model = LanguageModelRegistry::read_global(cx).default_model();
673 }
674 self.configured_model.clone()
675 }
676
677 pub fn configured_model(&self) -> Option<ConfiguredModel> {
678 self.configured_model.clone()
679 }
680
681 pub fn set_configured_model(&mut self, model: Option<ConfiguredModel>, cx: &mut Context<Self>) {
682 self.configured_model = model;
683 cx.notify();
684 }
685
686 pub fn summary(&self) -> &ThreadSummary {
687 &self.summary
688 }
689
690 pub fn set_summary(&mut self, new_summary: impl Into<SharedString>, cx: &mut Context<Self>) {
691 let current_summary = match &self.summary {
692 ThreadSummary::Pending | ThreadSummary::Generating => return,
693 ThreadSummary::Ready(summary) => summary,
694 ThreadSummary::Error => &ThreadSummary::DEFAULT,
695 };
696
697 let mut new_summary = new_summary.into();
698
699 if new_summary.is_empty() {
700 new_summary = ThreadSummary::DEFAULT;
701 }
702
703 if current_summary != &new_summary {
704 self.summary = ThreadSummary::Ready(new_summary);
705 cx.emit(ThreadEvent::SummaryChanged);
706 }
707 }
708
709 pub fn completion_mode(&self) -> CompletionMode {
710 self.completion_mode
711 }
712
713 pub fn set_completion_mode(&mut self, mode: CompletionMode) {
714 self.completion_mode = mode;
715 }
716
717 pub fn message(&self, id: MessageId) -> Option<&Message> {
718 let index = self
719 .messages
720 .binary_search_by(|message| message.id.cmp(&id))
721 .ok()?;
722
723 self.messages.get(index)
724 }
725
726 pub fn messages(&self) -> impl ExactSizeIterator<Item = &Message> {
727 self.messages.iter()
728 }
729
730 pub fn is_generating(&self) -> bool {
731 !self.pending_completions.is_empty() || !self.all_tools_finished()
732 }
733
734 /// Indicates whether streaming of language model events is stale.
735 /// When `is_generating()` is false, this method returns `None`.
736 pub fn is_generation_stale(&self) -> Option<bool> {
737 const STALE_THRESHOLD: u128 = 250;
738
739 self.last_received_chunk_at
740 .map(|instant| instant.elapsed().as_millis() > STALE_THRESHOLD)
741 }
742
743 fn received_chunk(&mut self) {
744 self.last_received_chunk_at = Some(Instant::now());
745 }
746
747 pub fn queue_state(&self) -> Option<QueueState> {
748 self.pending_completions
749 .first()
750 .map(|pending_completion| pending_completion.queue_state)
751 }
752
753 pub fn tools(&self) -> &Entity<ToolWorkingSet> {
754 &self.tools
755 }
756
757 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
758 self.tool_use
759 .pending_tool_uses()
760 .into_iter()
761 .find(|tool_use| &tool_use.id == id)
762 }
763
764 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
765 self.tool_use
766 .pending_tool_uses()
767 .into_iter()
768 .filter(|tool_use| tool_use.status.needs_confirmation())
769 }
770
771 pub fn has_pending_tool_uses(&self) -> bool {
772 !self.tool_use.pending_tool_uses().is_empty()
773 }
774
775 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
776 self.checkpoints_by_message.get(&id).cloned()
777 }
778
779 pub fn restore_checkpoint(
780 &mut self,
781 checkpoint: ThreadCheckpoint,
782 cx: &mut Context<Self>,
783 ) -> Task<Result<()>> {
784 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
785 message_id: checkpoint.message_id,
786 });
787 cx.emit(ThreadEvent::CheckpointChanged);
788 cx.notify();
789
790 let git_store = self.project().read(cx).git_store().clone();
791 let restore = git_store.update(cx, |git_store, cx| {
792 git_store.restore_checkpoint(checkpoint.git_checkpoint.clone(), cx)
793 });
794
795 cx.spawn(async move |this, cx| {
796 let result = restore.await;
797 this.update(cx, |this, cx| {
798 if let Err(err) = result.as_ref() {
799 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
800 message_id: checkpoint.message_id,
801 error: err.to_string(),
802 });
803 } else {
804 this.truncate(checkpoint.message_id, cx);
805 this.last_restore_checkpoint = None;
806 }
807 this.pending_checkpoint = None;
808 cx.emit(ThreadEvent::CheckpointChanged);
809 cx.notify();
810 })?;
811 result
812 })
813 }
814
815 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
816 let pending_checkpoint = if self.is_generating() {
817 return;
818 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
819 checkpoint
820 } else {
821 return;
822 };
823
824 self.finalize_checkpoint(pending_checkpoint, cx);
825 }
826
827 fn finalize_checkpoint(
828 &mut self,
829 pending_checkpoint: ThreadCheckpoint,
830 cx: &mut Context<Self>,
831 ) {
832 let git_store = self.project.read(cx).git_store().clone();
833 let final_checkpoint = git_store.update(cx, |git_store, cx| git_store.checkpoint(cx));
834 cx.spawn(async move |this, cx| match final_checkpoint.await {
835 Ok(final_checkpoint) => {
836 let equal = git_store
837 .update(cx, |store, cx| {
838 store.compare_checkpoints(
839 pending_checkpoint.git_checkpoint.clone(),
840 final_checkpoint.clone(),
841 cx,
842 )
843 })?
844 .await
845 .unwrap_or(false);
846
847 if !equal {
848 this.update(cx, |this, cx| {
849 this.insert_checkpoint(pending_checkpoint, cx)
850 })?;
851 }
852
853 Ok(())
854 }
855 Err(_) => this.update(cx, |this, cx| {
856 this.insert_checkpoint(pending_checkpoint, cx)
857 }),
858 })
859 .detach();
860 }
861
862 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
863 self.checkpoints_by_message
864 .insert(checkpoint.message_id, checkpoint);
865 cx.emit(ThreadEvent::CheckpointChanged);
866 cx.notify();
867 }
868
869 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
870 self.last_restore_checkpoint.as_ref()
871 }
872
873 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
874 let Some(message_ix) = self
875 .messages
876 .iter()
877 .rposition(|message| message.id == message_id)
878 else {
879 return;
880 };
881 for deleted_message in self.messages.drain(message_ix..) {
882 self.checkpoints_by_message.remove(&deleted_message.id);
883 }
884 cx.notify();
885 }
886
887 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AgentContext> {
888 self.messages
889 .iter()
890 .find(|message| message.id == id)
891 .into_iter()
892 .flat_map(|message| message.loaded_context.contexts.iter())
893 }
894
895 pub fn is_turn_end(&self, ix: usize) -> bool {
896 if self.messages.is_empty() {
897 return false;
898 }
899
900 if !self.is_generating() && ix == self.messages.len() - 1 {
901 return true;
902 }
903
904 let Some(message) = self.messages.get(ix) else {
905 return false;
906 };
907
908 if message.role != Role::Assistant {
909 return false;
910 }
911
912 self.messages
913 .get(ix + 1)
914 .and_then(|message| {
915 self.message(message.id)
916 .map(|next_message| next_message.role == Role::User && !next_message.is_hidden)
917 })
918 .unwrap_or(false)
919 }
920
921 pub fn tool_use_limit_reached(&self) -> bool {
922 self.tool_use_limit_reached
923 }
924
925 /// Returns whether all of the tool uses have finished running.
926 pub fn all_tools_finished(&self) -> bool {
927 // If the only pending tool uses left are the ones with errors, then
928 // that means that we've finished running all of the pending tools.
929 self.tool_use
930 .pending_tool_uses()
931 .iter()
932 .all(|pending_tool_use| pending_tool_use.status.is_error())
933 }
934
935 /// Returns whether any pending tool uses may perform edits
936 pub fn has_pending_edit_tool_uses(&self) -> bool {
937 self.tool_use
938 .pending_tool_uses()
939 .iter()
940 .filter(|pending_tool_use| !pending_tool_use.status.is_error())
941 .any(|pending_tool_use| pending_tool_use.may_perform_edits)
942 }
943
944 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
945 self.tool_use.tool_uses_for_message(id, cx)
946 }
947
948 pub fn tool_results_for_message(
949 &self,
950 assistant_message_id: MessageId,
951 ) -> Vec<&LanguageModelToolResult> {
952 self.tool_use.tool_results_for_message(assistant_message_id)
953 }
954
955 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
956 self.tool_use.tool_result(id)
957 }
958
959 pub fn output_for_tool(&self, id: &LanguageModelToolUseId) -> Option<&Arc<str>> {
960 match &self.tool_use.tool_result(id)?.content {
961 LanguageModelToolResultContent::Text(text) => Some(text),
962 LanguageModelToolResultContent::Image(_) => {
963 // TODO: We should display image
964 None
965 }
966 }
967 }
968
969 pub fn card_for_tool(&self, id: &LanguageModelToolUseId) -> Option<AnyToolCard> {
970 self.tool_use.tool_result_card(id).cloned()
971 }
972
973 /// Return tools that are both enabled and supported by the model
974 pub fn available_tools(
975 &self,
976 cx: &App,
977 model: Arc<dyn LanguageModel>,
978 ) -> Vec<LanguageModelRequestTool> {
979 if model.supports_tools() {
980 self.profile
981 .enabled_tools(cx)
982 .into_iter()
983 .filter_map(|(name, tool)| {
984 // Skip tools that cannot be supported
985 let input_schema = tool.input_schema(model.tool_input_format()).ok()?;
986 Some(LanguageModelRequestTool {
987 name: name.into(),
988 description: tool.description(),
989 input_schema,
990 })
991 })
992 .collect()
993 } else {
994 Vec::default()
995 }
996 }
997
998 pub fn insert_user_message(
999 &mut self,
1000 text: impl Into<String>,
1001 loaded_context: ContextLoadResult,
1002 git_checkpoint: Option<GitStoreCheckpoint>,
1003 creases: Vec<MessageCrease>,
1004 cx: &mut Context<Self>,
1005 ) -> MessageId {
1006 if !loaded_context.referenced_buffers.is_empty() {
1007 self.action_log.update(cx, |log, cx| {
1008 for buffer in loaded_context.referenced_buffers {
1009 log.buffer_read(buffer, cx);
1010 }
1011 });
1012 }
1013
1014 let message_id = self.insert_message(
1015 Role::User,
1016 vec![MessageSegment::Text(text.into())],
1017 loaded_context.loaded_context,
1018 creases,
1019 false,
1020 cx,
1021 );
1022
1023 if let Some(git_checkpoint) = git_checkpoint {
1024 self.pending_checkpoint = Some(ThreadCheckpoint {
1025 message_id,
1026 git_checkpoint,
1027 });
1028 }
1029
1030 self.auto_capture_telemetry(cx);
1031
1032 message_id
1033 }
1034
1035 pub fn insert_invisible_continue_message(&mut self, cx: &mut Context<Self>) -> MessageId {
1036 let id = self.insert_message(
1037 Role::User,
1038 vec![MessageSegment::Text("Continue where you left off".into())],
1039 LoadedContext::default(),
1040 vec![],
1041 true,
1042 cx,
1043 );
1044 self.pending_checkpoint = None;
1045
1046 id
1047 }
1048
1049 pub fn insert_assistant_message(
1050 &mut self,
1051 segments: Vec<MessageSegment>,
1052 cx: &mut Context<Self>,
1053 ) -> MessageId {
1054 self.insert_message(
1055 Role::Assistant,
1056 segments,
1057 LoadedContext::default(),
1058 Vec::new(),
1059 false,
1060 cx,
1061 )
1062 }
1063
1064 pub fn insert_message(
1065 &mut self,
1066 role: Role,
1067 segments: Vec<MessageSegment>,
1068 loaded_context: LoadedContext,
1069 creases: Vec<MessageCrease>,
1070 is_hidden: bool,
1071 cx: &mut Context<Self>,
1072 ) -> MessageId {
1073 let id = self.next_message_id.post_inc();
1074 self.messages.push(Message {
1075 id,
1076 role,
1077 segments,
1078 loaded_context,
1079 creases,
1080 is_hidden,
1081 ui_only: false,
1082 });
1083 self.touch_updated_at();
1084 cx.emit(ThreadEvent::MessageAdded(id));
1085 id
1086 }
1087
1088 pub fn edit_message(
1089 &mut self,
1090 id: MessageId,
1091 new_role: Role,
1092 new_segments: Vec<MessageSegment>,
1093 creases: Vec<MessageCrease>,
1094 loaded_context: Option<LoadedContext>,
1095 checkpoint: Option<GitStoreCheckpoint>,
1096 cx: &mut Context<Self>,
1097 ) -> bool {
1098 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
1099 return false;
1100 };
1101 message.role = new_role;
1102 message.segments = new_segments;
1103 message.creases = creases;
1104 if let Some(context) = loaded_context {
1105 message.loaded_context = context;
1106 }
1107 if let Some(git_checkpoint) = checkpoint {
1108 self.checkpoints_by_message.insert(
1109 id,
1110 ThreadCheckpoint {
1111 message_id: id,
1112 git_checkpoint,
1113 },
1114 );
1115 }
1116 self.touch_updated_at();
1117 cx.emit(ThreadEvent::MessageEdited(id));
1118 true
1119 }
1120
1121 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
1122 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
1123 return false;
1124 };
1125 self.messages.remove(index);
1126 self.touch_updated_at();
1127 cx.emit(ThreadEvent::MessageDeleted(id));
1128 true
1129 }
1130
1131 /// Returns the representation of this [`Thread`] in a textual form.
1132 ///
1133 /// This is the representation we use when attaching a thread as context to another thread.
1134 pub fn text(&self) -> String {
1135 let mut text = String::new();
1136
1137 for message in &self.messages {
1138 text.push_str(match message.role {
1139 language_model::Role::User => "User:",
1140 language_model::Role::Assistant => "Agent:",
1141 language_model::Role::System => "System:",
1142 });
1143 text.push('\n');
1144
1145 for segment in &message.segments {
1146 match segment {
1147 MessageSegment::Text(content) => text.push_str(content),
1148 MessageSegment::Thinking { text: content, .. } => {
1149 text.push_str(&format!("<think>{}</think>", content))
1150 }
1151 MessageSegment::RedactedThinking(_) => {}
1152 }
1153 }
1154 text.push('\n');
1155 }
1156
1157 text
1158 }
1159
1160 /// Serializes this thread into a format for storage or telemetry.
1161 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
1162 let initial_project_snapshot = self.initial_project_snapshot.clone();
1163 cx.spawn(async move |this, cx| {
1164 let initial_project_snapshot = initial_project_snapshot.await;
1165 this.read_with(cx, |this, cx| SerializedThread {
1166 version: SerializedThread::VERSION.to_string(),
1167 summary: this.summary().or_default(),
1168 updated_at: this.updated_at(),
1169 messages: this
1170 .messages()
1171 .filter(|message| !message.ui_only)
1172 .map(|message| SerializedMessage {
1173 id: message.id,
1174 role: message.role,
1175 segments: message
1176 .segments
1177 .iter()
1178 .map(|segment| match segment {
1179 MessageSegment::Text(text) => {
1180 SerializedMessageSegment::Text { text: text.clone() }
1181 }
1182 MessageSegment::Thinking { text, signature } => {
1183 SerializedMessageSegment::Thinking {
1184 text: text.clone(),
1185 signature: signature.clone(),
1186 }
1187 }
1188 MessageSegment::RedactedThinking(data) => {
1189 SerializedMessageSegment::RedactedThinking {
1190 data: data.clone(),
1191 }
1192 }
1193 })
1194 .collect(),
1195 tool_uses: this
1196 .tool_uses_for_message(message.id, cx)
1197 .into_iter()
1198 .map(|tool_use| SerializedToolUse {
1199 id: tool_use.id,
1200 name: tool_use.name,
1201 input: tool_use.input,
1202 })
1203 .collect(),
1204 tool_results: this
1205 .tool_results_for_message(message.id)
1206 .into_iter()
1207 .map(|tool_result| SerializedToolResult {
1208 tool_use_id: tool_result.tool_use_id.clone(),
1209 is_error: tool_result.is_error,
1210 content: tool_result.content.clone(),
1211 output: tool_result.output.clone(),
1212 })
1213 .collect(),
1214 context: message.loaded_context.text.clone(),
1215 creases: message
1216 .creases
1217 .iter()
1218 .map(|crease| SerializedCrease {
1219 start: crease.range.start,
1220 end: crease.range.end,
1221 icon_path: crease.icon_path.clone(),
1222 label: crease.label.clone(),
1223 })
1224 .collect(),
1225 is_hidden: message.is_hidden,
1226 })
1227 .collect(),
1228 initial_project_snapshot,
1229 cumulative_token_usage: this.cumulative_token_usage,
1230 request_token_usage: this.request_token_usage.clone(),
1231 detailed_summary_state: this.detailed_summary_rx.borrow().clone(),
1232 exceeded_window_error: this.exceeded_window_error.clone(),
1233 model: this
1234 .configured_model
1235 .as_ref()
1236 .map(|model| SerializedLanguageModel {
1237 provider: model.provider.id().0.to_string(),
1238 model: model.model.id().0.to_string(),
1239 }),
1240 completion_mode: Some(this.completion_mode),
1241 tool_use_limit_reached: this.tool_use_limit_reached,
1242 profile: Some(this.profile.id().clone()),
1243 })
1244 })
1245 }
1246
1247 pub fn remaining_turns(&self) -> u32 {
1248 self.remaining_turns
1249 }
1250
1251 pub fn set_remaining_turns(&mut self, remaining_turns: u32) {
1252 self.remaining_turns = remaining_turns;
1253 }
1254
1255 pub fn send_to_model(
1256 &mut self,
1257 model: Arc<dyn LanguageModel>,
1258 intent: CompletionIntent,
1259 window: Option<AnyWindowHandle>,
1260 cx: &mut Context<Self>,
1261 ) {
1262 if self.remaining_turns == 0 {
1263 return;
1264 }
1265
1266 self.remaining_turns -= 1;
1267
1268 self.flush_notifications(model.clone(), intent, cx);
1269
1270 let _checkpoint = self.finalize_pending_checkpoint(cx);
1271 self.stream_completion(
1272 self.to_completion_request(model.clone(), intent, cx),
1273 model,
1274 intent,
1275 window,
1276 cx,
1277 );
1278 }
1279
1280 pub fn retry_last_completion(
1281 &mut self,
1282 window: Option<AnyWindowHandle>,
1283 cx: &mut Context<Self>,
1284 ) {
1285 // Clear any existing error state
1286 self.retry_state = None;
1287
1288 // Use the last error context if available, otherwise fall back to configured model
1289 let (model, intent) = if let Some((model, intent)) = self.last_error_context.take() {
1290 (model, intent)
1291 } else if let Some(configured_model) = self.configured_model.as_ref() {
1292 let model = configured_model.model.clone();
1293 let intent = if self.has_pending_tool_uses() {
1294 CompletionIntent::ToolResults
1295 } else {
1296 CompletionIntent::UserPrompt
1297 };
1298 (model, intent)
1299 } else if let Some(configured_model) = self.get_or_init_configured_model(cx) {
1300 let model = configured_model.model.clone();
1301 let intent = if self.has_pending_tool_uses() {
1302 CompletionIntent::ToolResults
1303 } else {
1304 CompletionIntent::UserPrompt
1305 };
1306 (model, intent)
1307 } else {
1308 return;
1309 };
1310
1311 self.send_to_model(model, intent, window, cx);
1312 }
1313
1314 pub fn enable_burn_mode_and_retry(
1315 &mut self,
1316 window: Option<AnyWindowHandle>,
1317 cx: &mut Context<Self>,
1318 ) {
1319 self.completion_mode = CompletionMode::Burn;
1320 cx.emit(ThreadEvent::ProfileChanged);
1321 self.retry_last_completion(window, cx);
1322 }
1323
1324 pub fn used_tools_since_last_user_message(&self) -> bool {
1325 for message in self.messages.iter().rev() {
1326 if self.tool_use.message_has_tool_results(message.id) {
1327 return true;
1328 } else if message.role == Role::User {
1329 return false;
1330 }
1331 }
1332
1333 false
1334 }
1335
1336 pub fn to_completion_request(
1337 &self,
1338 model: Arc<dyn LanguageModel>,
1339 intent: CompletionIntent,
1340 cx: &mut Context<Self>,
1341 ) -> LanguageModelRequest {
1342 let mut request = LanguageModelRequest {
1343 thread_id: Some(self.id.to_string()),
1344 prompt_id: Some(self.last_prompt_id.to_string()),
1345 intent: Some(intent),
1346 mode: None,
1347 messages: vec![],
1348 tools: Vec::new(),
1349 tool_choice: None,
1350 stop: Vec::new(),
1351 temperature: AgentSettings::temperature_for_model(&model, cx),
1352 thinking_allowed: true,
1353 };
1354
1355 let available_tools = self.available_tools(cx, model.clone());
1356 let available_tool_names = available_tools
1357 .iter()
1358 .map(|tool| tool.name.clone())
1359 .collect();
1360
1361 let model_context = &ModelContext {
1362 available_tools: available_tool_names,
1363 };
1364
1365 if let Some(project_context) = self.project_context.borrow().as_ref() {
1366 match self
1367 .prompt_builder
1368 .generate_assistant_system_prompt(project_context, model_context)
1369 {
1370 Err(err) => {
1371 let message = format!("{err:?}").into();
1372 log::error!("{message}");
1373 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1374 header: "Error generating system prompt".into(),
1375 message,
1376 }));
1377 }
1378 Ok(system_prompt) => {
1379 request.messages.push(LanguageModelRequestMessage {
1380 role: Role::System,
1381 content: vec![MessageContent::Text(system_prompt)],
1382 cache: true,
1383 });
1384 }
1385 }
1386 } else {
1387 let message = "Context for system prompt unexpectedly not ready.".into();
1388 log::error!("{message}");
1389 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1390 header: "Error generating system prompt".into(),
1391 message,
1392 }));
1393 }
1394
1395 let mut message_ix_to_cache = None;
1396 for message in &self.messages {
1397 // ui_only messages are for the UI only, not for the model
1398 if message.ui_only {
1399 continue;
1400 }
1401
1402 let mut request_message = LanguageModelRequestMessage {
1403 role: message.role,
1404 content: Vec::new(),
1405 cache: false,
1406 };
1407
1408 message
1409 .loaded_context
1410 .add_to_request_message(&mut request_message);
1411
1412 for segment in &message.segments {
1413 match segment {
1414 MessageSegment::Text(text) => {
1415 let text = text.trim_end();
1416 if !text.is_empty() {
1417 request_message
1418 .content
1419 .push(MessageContent::Text(text.into()));
1420 }
1421 }
1422 MessageSegment::Thinking { text, signature } => {
1423 if !text.is_empty() {
1424 request_message.content.push(MessageContent::Thinking {
1425 text: text.into(),
1426 signature: signature.clone(),
1427 });
1428 }
1429 }
1430 MessageSegment::RedactedThinking(data) => {
1431 request_message
1432 .content
1433 .push(MessageContent::RedactedThinking(data.clone()));
1434 }
1435 };
1436 }
1437
1438 let mut cache_message = true;
1439 let mut tool_results_message = LanguageModelRequestMessage {
1440 role: Role::User,
1441 content: Vec::new(),
1442 cache: false,
1443 };
1444 for (tool_use, tool_result) in self.tool_use.tool_results(message.id) {
1445 if let Some(tool_result) = tool_result {
1446 request_message
1447 .content
1448 .push(MessageContent::ToolUse(tool_use.clone()));
1449 tool_results_message
1450 .content
1451 .push(MessageContent::ToolResult(LanguageModelToolResult {
1452 tool_use_id: tool_use.id.clone(),
1453 tool_name: tool_result.tool_name.clone(),
1454 is_error: tool_result.is_error,
1455 content: if tool_result.content.is_empty() {
1456 // Surprisingly, the API fails if we return an empty string here.
1457 // It thinks we are sending a tool use without a tool result.
1458 "<Tool returned an empty string>".into()
1459 } else {
1460 tool_result.content.clone()
1461 },
1462 output: None,
1463 }));
1464 } else {
1465 cache_message = false;
1466 log::debug!(
1467 "skipped tool use {:?} because it is still pending",
1468 tool_use
1469 );
1470 }
1471 }
1472
1473 if cache_message {
1474 message_ix_to_cache = Some(request.messages.len());
1475 }
1476 request.messages.push(request_message);
1477
1478 if !tool_results_message.content.is_empty() {
1479 if cache_message {
1480 message_ix_to_cache = Some(request.messages.len());
1481 }
1482 request.messages.push(tool_results_message);
1483 }
1484 }
1485
1486 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
1487 if let Some(message_ix_to_cache) = message_ix_to_cache {
1488 request.messages[message_ix_to_cache].cache = true;
1489 }
1490
1491 request.tools = available_tools;
1492 request.mode = if model.supports_burn_mode() {
1493 Some(self.completion_mode.into())
1494 } else {
1495 Some(CompletionMode::Normal.into())
1496 };
1497
1498 request
1499 }
1500
1501 fn to_summarize_request(
1502 &self,
1503 model: &Arc<dyn LanguageModel>,
1504 intent: CompletionIntent,
1505 added_user_message: String,
1506 cx: &App,
1507 ) -> LanguageModelRequest {
1508 let mut request = LanguageModelRequest {
1509 thread_id: None,
1510 prompt_id: None,
1511 intent: Some(intent),
1512 mode: None,
1513 messages: vec![],
1514 tools: Vec::new(),
1515 tool_choice: None,
1516 stop: Vec::new(),
1517 temperature: AgentSettings::temperature_for_model(model, cx),
1518 thinking_allowed: false,
1519 };
1520
1521 for message in &self.messages {
1522 let mut request_message = LanguageModelRequestMessage {
1523 role: message.role,
1524 content: Vec::new(),
1525 cache: false,
1526 };
1527
1528 for segment in &message.segments {
1529 match segment {
1530 MessageSegment::Text(text) => request_message
1531 .content
1532 .push(MessageContent::Text(text.clone())),
1533 MessageSegment::Thinking { .. } => {}
1534 MessageSegment::RedactedThinking(_) => {}
1535 }
1536 }
1537
1538 if request_message.content.is_empty() {
1539 continue;
1540 }
1541
1542 request.messages.push(request_message);
1543 }
1544
1545 request.messages.push(LanguageModelRequestMessage {
1546 role: Role::User,
1547 content: vec![MessageContent::Text(added_user_message)],
1548 cache: false,
1549 });
1550
1551 request
1552 }
1553
1554 /// Insert auto-generated notifications (if any) to the thread
1555 fn flush_notifications(
1556 &mut self,
1557 model: Arc<dyn LanguageModel>,
1558 intent: CompletionIntent,
1559 cx: &mut Context<Self>,
1560 ) {
1561 match intent {
1562 CompletionIntent::UserPrompt | CompletionIntent::ToolResults => {
1563 if let Some(pending_tool_use) = self.attach_tracked_files_state(model, cx) {
1564 cx.emit(ThreadEvent::ToolFinished {
1565 tool_use_id: pending_tool_use.id.clone(),
1566 pending_tool_use: Some(pending_tool_use),
1567 });
1568 }
1569 }
1570 CompletionIntent::ThreadSummarization
1571 | CompletionIntent::ThreadContextSummarization
1572 | CompletionIntent::CreateFile
1573 | CompletionIntent::EditFile
1574 | CompletionIntent::InlineAssist
1575 | CompletionIntent::TerminalInlineAssist
1576 | CompletionIntent::GenerateGitCommitMessage => {}
1577 };
1578 }
1579
1580 fn attach_tracked_files_state(
1581 &mut self,
1582 model: Arc<dyn LanguageModel>,
1583 cx: &mut App,
1584 ) -> Option<PendingToolUse> {
1585 // Represent notification as a simulated `project_notifications` tool call
1586 let tool_name = Arc::from("project_notifications");
1587 let tool = self.tools.read(cx).tool(&tool_name, cx)?;
1588
1589 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
1590 return None;
1591 }
1592
1593 if self
1594 .action_log
1595 .update(cx, |log, cx| log.unnotified_user_edits(cx).is_none())
1596 {
1597 return None;
1598 }
1599
1600 let input = serde_json::json!({});
1601 let request = Arc::new(LanguageModelRequest::default()); // unused
1602 let window = None;
1603 let tool_result = tool.run(
1604 input,
1605 request,
1606 self.project.clone(),
1607 self.action_log.clone(),
1608 model.clone(),
1609 window,
1610 cx,
1611 );
1612
1613 let tool_use_id =
1614 LanguageModelToolUseId::from(format!("project_notifications_{}", self.messages.len()));
1615
1616 let tool_use = LanguageModelToolUse {
1617 id: tool_use_id.clone(),
1618 name: tool_name.clone(),
1619 raw_input: "{}".to_string(),
1620 input: serde_json::json!({}),
1621 is_input_complete: true,
1622 };
1623
1624 let tool_output = cx.background_executor().block(tool_result.output);
1625
1626 // Attach a project_notification tool call to the latest existing
1627 // Assistant message. We cannot create a new Assistant message
1628 // because thinking models require a `thinking` block that we
1629 // cannot mock. We cannot send a notification as a normal
1630 // (non-tool-use) User message because this distracts Agent
1631 // too much.
1632 let tool_message_id = self
1633 .messages
1634 .iter()
1635 .enumerate()
1636 .rfind(|(_, message)| message.role == Role::Assistant)
1637 .map(|(_, message)| message.id)?;
1638
1639 let tool_use_metadata = ToolUseMetadata {
1640 model: model.clone(),
1641 thread_id: self.id.clone(),
1642 prompt_id: self.last_prompt_id.clone(),
1643 };
1644
1645 self.tool_use
1646 .request_tool_use(tool_message_id, tool_use, tool_use_metadata.clone(), cx);
1647
1648 let pending_tool_use = self.tool_use.insert_tool_output(
1649 tool_use_id.clone(),
1650 tool_name,
1651 tool_output,
1652 self.configured_model.as_ref(),
1653 self.completion_mode,
1654 );
1655
1656 pending_tool_use
1657 }
1658
1659 pub fn stream_completion(
1660 &mut self,
1661 request: LanguageModelRequest,
1662 model: Arc<dyn LanguageModel>,
1663 intent: CompletionIntent,
1664 window: Option<AnyWindowHandle>,
1665 cx: &mut Context<Self>,
1666 ) {
1667 self.tool_use_limit_reached = false;
1668
1669 let pending_completion_id = post_inc(&mut self.completion_count);
1670 let mut request_callback_parameters = if self.request_callback.is_some() {
1671 Some((request.clone(), Vec::new()))
1672 } else {
1673 None
1674 };
1675 let prompt_id = self.last_prompt_id.clone();
1676 let tool_use_metadata = ToolUseMetadata {
1677 model: model.clone(),
1678 thread_id: self.id.clone(),
1679 prompt_id: prompt_id.clone(),
1680 };
1681
1682 let completion_mode = request
1683 .mode
1684 .unwrap_or(zed_llm_client::CompletionMode::Normal);
1685
1686 self.last_received_chunk_at = Some(Instant::now());
1687
1688 let task = cx.spawn(async move |thread, cx| {
1689 let stream_completion_future = model.stream_completion(request, &cx);
1690 let initial_token_usage =
1691 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage);
1692 let stream_completion = async {
1693 let mut events = stream_completion_future.await?;
1694
1695 let mut stop_reason = StopReason::EndTurn;
1696 let mut current_token_usage = TokenUsage::default();
1697
1698 thread
1699 .update(cx, |_thread, cx| {
1700 cx.emit(ThreadEvent::NewRequest);
1701 })
1702 .ok();
1703
1704 let mut request_assistant_message_id = None;
1705
1706 while let Some(event) = events.next().await {
1707 if let Some((_, response_events)) = request_callback_parameters.as_mut() {
1708 response_events
1709 .push(event.as_ref().map_err(|error| error.to_string()).cloned());
1710 }
1711
1712 thread.update(cx, |thread, cx| {
1713 match event? {
1714 LanguageModelCompletionEvent::StartMessage { .. } => {
1715 request_assistant_message_id =
1716 Some(thread.insert_assistant_message(
1717 vec![MessageSegment::Text(String::new())],
1718 cx,
1719 ));
1720 }
1721 LanguageModelCompletionEvent::Stop(reason) => {
1722 stop_reason = reason;
1723 }
1724 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1725 thread.update_token_usage_at_last_message(token_usage);
1726 thread.cumulative_token_usage = thread.cumulative_token_usage
1727 + token_usage
1728 - current_token_usage;
1729 current_token_usage = token_usage;
1730 }
1731 LanguageModelCompletionEvent::Text(chunk) => {
1732 thread.received_chunk();
1733
1734 cx.emit(ThreadEvent::ReceivedTextChunk);
1735 if let Some(last_message) = thread.messages.last_mut() {
1736 if last_message.role == Role::Assistant
1737 && !thread.tool_use.has_tool_results(last_message.id)
1738 {
1739 last_message.push_text(&chunk);
1740 cx.emit(ThreadEvent::StreamedAssistantText(
1741 last_message.id,
1742 chunk,
1743 ));
1744 } else {
1745 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1746 // of a new Assistant response.
1747 //
1748 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1749 // will result in duplicating the text of the chunk in the rendered Markdown.
1750 request_assistant_message_id =
1751 Some(thread.insert_assistant_message(
1752 vec![MessageSegment::Text(chunk.to_string())],
1753 cx,
1754 ));
1755 };
1756 }
1757 }
1758 LanguageModelCompletionEvent::Thinking {
1759 text: chunk,
1760 signature,
1761 } => {
1762 thread.received_chunk();
1763
1764 if let Some(last_message) = thread.messages.last_mut() {
1765 if last_message.role == Role::Assistant
1766 && !thread.tool_use.has_tool_results(last_message.id)
1767 {
1768 last_message.push_thinking(&chunk, signature);
1769 cx.emit(ThreadEvent::StreamedAssistantThinking(
1770 last_message.id,
1771 chunk,
1772 ));
1773 } else {
1774 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1775 // of a new Assistant response.
1776 //
1777 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1778 // will result in duplicating the text of the chunk in the rendered Markdown.
1779 request_assistant_message_id =
1780 Some(thread.insert_assistant_message(
1781 vec![MessageSegment::Thinking {
1782 text: chunk.to_string(),
1783 signature,
1784 }],
1785 cx,
1786 ));
1787 };
1788 }
1789 }
1790 LanguageModelCompletionEvent::RedactedThinking { data } => {
1791 thread.received_chunk();
1792
1793 if let Some(last_message) = thread.messages.last_mut() {
1794 if last_message.role == Role::Assistant
1795 && !thread.tool_use.has_tool_results(last_message.id)
1796 {
1797 last_message.push_redacted_thinking(data);
1798 } else {
1799 request_assistant_message_id =
1800 Some(thread.insert_assistant_message(
1801 vec![MessageSegment::RedactedThinking(data)],
1802 cx,
1803 ));
1804 };
1805 }
1806 }
1807 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1808 let last_assistant_message_id = request_assistant_message_id
1809 .unwrap_or_else(|| {
1810 let new_assistant_message_id =
1811 thread.insert_assistant_message(vec![], cx);
1812 request_assistant_message_id =
1813 Some(new_assistant_message_id);
1814 new_assistant_message_id
1815 });
1816
1817 let tool_use_id = tool_use.id.clone();
1818 let streamed_input = if tool_use.is_input_complete {
1819 None
1820 } else {
1821 Some((&tool_use.input).clone())
1822 };
1823
1824 let ui_text = thread.tool_use.request_tool_use(
1825 last_assistant_message_id,
1826 tool_use,
1827 tool_use_metadata.clone(),
1828 cx,
1829 );
1830
1831 if let Some(input) = streamed_input {
1832 cx.emit(ThreadEvent::StreamedToolUse {
1833 tool_use_id,
1834 ui_text,
1835 input,
1836 });
1837 }
1838 }
1839 LanguageModelCompletionEvent::ToolUseJsonParseError {
1840 id,
1841 tool_name,
1842 raw_input: invalid_input_json,
1843 json_parse_error,
1844 } => {
1845 thread.receive_invalid_tool_json(
1846 id,
1847 tool_name,
1848 invalid_input_json,
1849 json_parse_error,
1850 window,
1851 cx,
1852 );
1853 }
1854 LanguageModelCompletionEvent::StatusUpdate(status_update) => {
1855 if let Some(completion) = thread
1856 .pending_completions
1857 .iter_mut()
1858 .find(|completion| completion.id == pending_completion_id)
1859 {
1860 match status_update {
1861 CompletionRequestStatus::Queued { position } => {
1862 completion.queue_state =
1863 QueueState::Queued { position };
1864 }
1865 CompletionRequestStatus::Started => {
1866 completion.queue_state = QueueState::Started;
1867 }
1868 CompletionRequestStatus::Failed {
1869 code,
1870 message,
1871 request_id: _,
1872 retry_after,
1873 } => {
1874 return Err(
1875 LanguageModelCompletionError::from_cloud_failure(
1876 model.upstream_provider_name(),
1877 code,
1878 message,
1879 retry_after.map(Duration::from_secs_f64),
1880 ),
1881 );
1882 }
1883 CompletionRequestStatus::UsageUpdated { amount, limit } => {
1884 thread.update_model_request_usage(
1885 amount as u32,
1886 limit,
1887 cx,
1888 );
1889 }
1890 CompletionRequestStatus::ToolUseLimitReached => {
1891 thread.tool_use_limit_reached = true;
1892 cx.emit(ThreadEvent::ToolUseLimitReached);
1893 }
1894 }
1895 }
1896 }
1897 }
1898
1899 thread.touch_updated_at();
1900 cx.emit(ThreadEvent::StreamedCompletion);
1901 cx.notify();
1902
1903 thread.auto_capture_telemetry(cx);
1904 Ok(())
1905 })??;
1906
1907 smol::future::yield_now().await;
1908 }
1909
1910 thread.update(cx, |thread, cx| {
1911 thread.last_received_chunk_at = None;
1912 thread
1913 .pending_completions
1914 .retain(|completion| completion.id != pending_completion_id);
1915
1916 // If there is a response without tool use, summarize the message. Otherwise,
1917 // allow two tool uses before summarizing.
1918 if matches!(thread.summary, ThreadSummary::Pending)
1919 && thread.messages.len() >= 2
1920 && (!thread.has_pending_tool_uses() || thread.messages.len() >= 6)
1921 {
1922 thread.summarize(cx);
1923 }
1924 })?;
1925
1926 anyhow::Ok(stop_reason)
1927 };
1928
1929 let result = stream_completion.await;
1930 let mut retry_scheduled = false;
1931
1932 thread
1933 .update(cx, |thread, cx| {
1934 thread.finalize_pending_checkpoint(cx);
1935 match result.as_ref() {
1936 Ok(stop_reason) => {
1937 match stop_reason {
1938 StopReason::ToolUse => {
1939 let tool_uses =
1940 thread.use_pending_tools(window, model.clone(), cx);
1941 cx.emit(ThreadEvent::UsePendingTools { tool_uses });
1942 }
1943 StopReason::EndTurn | StopReason::MaxTokens => {
1944 thread.project.update(cx, |project, cx| {
1945 project.set_agent_location(None, cx);
1946 });
1947 }
1948 StopReason::Refusal => {
1949 thread.project.update(cx, |project, cx| {
1950 project.set_agent_location(None, cx);
1951 });
1952
1953 // Remove the turn that was refused.
1954 //
1955 // https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/handle-streaming-refusals#reset-context-after-refusal
1956 {
1957 let mut messages_to_remove = Vec::new();
1958
1959 for (ix, message) in
1960 thread.messages.iter().enumerate().rev()
1961 {
1962 messages_to_remove.push(message.id);
1963
1964 if message.role == Role::User {
1965 if ix == 0 {
1966 break;
1967 }
1968
1969 if let Some(prev_message) =
1970 thread.messages.get(ix - 1)
1971 {
1972 if prev_message.role == Role::Assistant {
1973 break;
1974 }
1975 }
1976 }
1977 }
1978
1979 for message_id in messages_to_remove {
1980 thread.delete_message(message_id, cx);
1981 }
1982 }
1983
1984 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1985 header: "Language model refusal".into(),
1986 message:
1987 "Model refused to generate content for safety reasons."
1988 .into(),
1989 }));
1990 }
1991 }
1992
1993 // We successfully completed, so cancel any remaining retries.
1994 thread.retry_state = None;
1995 }
1996 Err(error) => {
1997 thread.project.update(cx, |project, cx| {
1998 project.set_agent_location(None, cx);
1999 });
2000
2001 if error.is::<PaymentRequiredError>() {
2002 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
2003 } else if let Some(error) =
2004 error.downcast_ref::<ModelRequestLimitReachedError>()
2005 {
2006 cx.emit(ThreadEvent::ShowError(
2007 ThreadError::ModelRequestLimitReached { plan: error.plan },
2008 ));
2009 } else if let Some(completion_error) =
2010 error.downcast_ref::<LanguageModelCompletionError>()
2011 {
2012 match &completion_error {
2013 LanguageModelCompletionError::PromptTooLarge {
2014 tokens, ..
2015 } => {
2016 let tokens = tokens.unwrap_or_else(|| {
2017 // We didn't get an exact token count from the API, so fall back on our estimate.
2018 thread
2019 .total_token_usage()
2020 .map(|usage| usage.total)
2021 .unwrap_or(0)
2022 // We know the context window was exceeded in practice, so if our estimate was
2023 // lower than max tokens, the estimate was wrong; return that we exceeded by 1.
2024 .max(
2025 model
2026 .max_token_count_for_mode(completion_mode)
2027 .saturating_add(1),
2028 )
2029 });
2030 thread.exceeded_window_error = Some(ExceededWindowError {
2031 model_id: model.id(),
2032 token_count: tokens,
2033 });
2034 cx.notify();
2035 }
2036 _ => {
2037 if let Some(retry_strategy) =
2038 Thread::get_retry_strategy(completion_error)
2039 {
2040 log::info!(
2041 "Retrying with {:?} for language model completion error {:?}",
2042 retry_strategy,
2043 completion_error
2044 );
2045
2046 retry_scheduled = thread
2047 .handle_retryable_error_with_delay(
2048 &completion_error,
2049 Some(retry_strategy),
2050 model.clone(),
2051 intent,
2052 window,
2053 cx,
2054 );
2055 }
2056 }
2057 }
2058 }
2059
2060 if !retry_scheduled {
2061 thread.cancel_last_completion(window, cx);
2062 }
2063 }
2064 }
2065
2066 if !retry_scheduled {
2067 cx.emit(ThreadEvent::Stopped(result.map_err(Arc::new)));
2068 }
2069
2070 if let Some((request_callback, (request, response_events))) = thread
2071 .request_callback
2072 .as_mut()
2073 .zip(request_callback_parameters.as_ref())
2074 {
2075 request_callback(request, response_events);
2076 }
2077
2078 thread.auto_capture_telemetry(cx);
2079
2080 if let Ok(initial_usage) = initial_token_usage {
2081 let usage = thread.cumulative_token_usage - initial_usage;
2082
2083 telemetry::event!(
2084 "Assistant Thread Completion",
2085 thread_id = thread.id().to_string(),
2086 prompt_id = prompt_id,
2087 model = model.telemetry_id(),
2088 model_provider = model.provider_id().to_string(),
2089 input_tokens = usage.input_tokens,
2090 output_tokens = usage.output_tokens,
2091 cache_creation_input_tokens = usage.cache_creation_input_tokens,
2092 cache_read_input_tokens = usage.cache_read_input_tokens,
2093 );
2094 }
2095 })
2096 .ok();
2097 });
2098
2099 self.pending_completions.push(PendingCompletion {
2100 id: pending_completion_id,
2101 queue_state: QueueState::Sending,
2102 _task: task,
2103 });
2104 }
2105
2106 pub fn summarize(&mut self, cx: &mut Context<Self>) {
2107 let Some(model) = LanguageModelRegistry::read_global(cx).thread_summary_model() else {
2108 println!("No thread summary model");
2109 return;
2110 };
2111
2112 if !model.provider.is_authenticated(cx) {
2113 return;
2114 }
2115
2116 let added_user_message = include_str!("./prompts/summarize_thread_prompt.txt");
2117
2118 let request = self.to_summarize_request(
2119 &model.model,
2120 CompletionIntent::ThreadSummarization,
2121 added_user_message.into(),
2122 cx,
2123 );
2124
2125 self.summary = ThreadSummary::Generating;
2126
2127 self.pending_summary = cx.spawn(async move |this, cx| {
2128 let result = async {
2129 let mut messages = model.model.stream_completion(request, &cx).await?;
2130
2131 let mut new_summary = String::new();
2132 while let Some(event) = messages.next().await {
2133 let Ok(event) = event else {
2134 continue;
2135 };
2136 let text = match event {
2137 LanguageModelCompletionEvent::Text(text) => text,
2138 LanguageModelCompletionEvent::StatusUpdate(
2139 CompletionRequestStatus::UsageUpdated { amount, limit },
2140 ) => {
2141 this.update(cx, |thread, cx| {
2142 thread.update_model_request_usage(amount as u32, limit, cx);
2143 })?;
2144 continue;
2145 }
2146 _ => continue,
2147 };
2148
2149 let mut lines = text.lines();
2150 new_summary.extend(lines.next());
2151
2152 // Stop if the LLM generated multiple lines.
2153 if lines.next().is_some() {
2154 break;
2155 }
2156 }
2157
2158 anyhow::Ok(new_summary)
2159 }
2160 .await;
2161
2162 this.update(cx, |this, cx| {
2163 match result {
2164 Ok(new_summary) => {
2165 if new_summary.is_empty() {
2166 this.summary = ThreadSummary::Error;
2167 } else {
2168 this.summary = ThreadSummary::Ready(new_summary.into());
2169 }
2170 }
2171 Err(err) => {
2172 this.summary = ThreadSummary::Error;
2173 log::error!("Failed to generate thread summary: {}", err);
2174 }
2175 }
2176 cx.emit(ThreadEvent::SummaryGenerated);
2177 })
2178 .log_err()?;
2179
2180 Some(())
2181 });
2182 }
2183
2184 fn get_retry_strategy(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2185 use LanguageModelCompletionError::*;
2186
2187 // General strategy here:
2188 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2189 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2190 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2191 match error {
2192 HttpResponseError {
2193 status_code: StatusCode::TOO_MANY_REQUESTS,
2194 ..
2195 } => Some(RetryStrategy::ExponentialBackoff {
2196 initial_delay: BASE_RETRY_DELAY,
2197 max_attempts: MAX_RETRY_ATTEMPTS,
2198 }),
2199 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2200 Some(RetryStrategy::Fixed {
2201 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2202 max_attempts: MAX_RETRY_ATTEMPTS,
2203 })
2204 }
2205 UpstreamProviderError {
2206 status,
2207 retry_after,
2208 ..
2209 } => match *status {
2210 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2211 Some(RetryStrategy::Fixed {
2212 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2213 max_attempts: MAX_RETRY_ATTEMPTS,
2214 })
2215 }
2216 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2217 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2218 // Internal Server Error could be anything, retry up to 3 times.
2219 max_attempts: 3,
2220 }),
2221 status => {
2222 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2223 // but we frequently get them in practice. See https://http.dev/529
2224 if status.as_u16() == 529 {
2225 Some(RetryStrategy::Fixed {
2226 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2227 max_attempts: MAX_RETRY_ATTEMPTS,
2228 })
2229 } else {
2230 Some(RetryStrategy::Fixed {
2231 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2232 max_attempts: 2,
2233 })
2234 }
2235 }
2236 },
2237 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2238 delay: BASE_RETRY_DELAY,
2239 max_attempts: 3,
2240 }),
2241 ApiReadResponseError { .. }
2242 | HttpSend { .. }
2243 | DeserializeResponse { .. }
2244 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2245 delay: BASE_RETRY_DELAY,
2246 max_attempts: 3,
2247 }),
2248 // Retrying these errors definitely shouldn't help.
2249 HttpResponseError {
2250 status_code:
2251 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2252 ..
2253 }
2254 | AuthenticationError { .. }
2255 | PermissionError { .. }
2256 | NoApiKey { .. }
2257 | ApiEndpointNotFound { .. }
2258 | PromptTooLarge { .. } => None,
2259 // These errors might be transient, so retry them
2260 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2261 delay: BASE_RETRY_DELAY,
2262 max_attempts: 1,
2263 }),
2264 // Retry all other 4xx and 5xx errors once.
2265 HttpResponseError { status_code, .. }
2266 if status_code.is_client_error() || status_code.is_server_error() =>
2267 {
2268 Some(RetryStrategy::Fixed {
2269 delay: BASE_RETRY_DELAY,
2270 max_attempts: 3,
2271 })
2272 }
2273 // Conservatively assume that any other errors are non-retryable
2274 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2275 delay: BASE_RETRY_DELAY,
2276 max_attempts: 2,
2277 }),
2278 }
2279 }
2280
2281 fn handle_retryable_error_with_delay(
2282 &mut self,
2283 error: &LanguageModelCompletionError,
2284 strategy: Option<RetryStrategy>,
2285 model: Arc<dyn LanguageModel>,
2286 intent: CompletionIntent,
2287 window: Option<AnyWindowHandle>,
2288 cx: &mut Context<Self>,
2289 ) -> bool {
2290 // Store context for the Retry button
2291 self.last_error_context = Some((model.clone(), intent));
2292
2293 // Only auto-retry if Burn Mode is enabled
2294 if self.completion_mode != CompletionMode::Burn {
2295 // Show error with retry options
2296 cx.emit(ThreadEvent::ShowError(ThreadError::RetryableError {
2297 message: format!(
2298 "{}\n\nTo automatically retry when similar errors happen, enable Burn Mode.",
2299 error
2300 )
2301 .into(),
2302 can_enable_burn_mode: true,
2303 }));
2304 return false;
2305 }
2306
2307 let Some(strategy) = strategy.or_else(|| Self::get_retry_strategy(error)) else {
2308 return false;
2309 };
2310
2311 let max_attempts = match &strategy {
2312 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
2313 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
2314 };
2315
2316 let retry_state = self.retry_state.get_or_insert(RetryState {
2317 attempt: 0,
2318 max_attempts,
2319 intent,
2320 });
2321
2322 retry_state.attempt += 1;
2323 let attempt = retry_state.attempt;
2324 let max_attempts = retry_state.max_attempts;
2325 let intent = retry_state.intent;
2326
2327 if attempt <= max_attempts {
2328 let delay = match &strategy {
2329 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
2330 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
2331 Duration::from_secs(delay_secs)
2332 }
2333 RetryStrategy::Fixed { delay, .. } => *delay,
2334 };
2335
2336 // Add a transient message to inform the user
2337 let delay_secs = delay.as_secs();
2338 let retry_message = if max_attempts == 1 {
2339 format!("{error}. Retrying in {delay_secs} seconds...")
2340 } else {
2341 format!(
2342 "{error}. Retrying (attempt {attempt} of {max_attempts}) \
2343 in {delay_secs} seconds..."
2344 )
2345 };
2346 log::warn!(
2347 "Retrying completion request (attempt {attempt} of {max_attempts}) \
2348 in {delay_secs} seconds: {error:?}",
2349 );
2350
2351 // Add a UI-only message instead of a regular message
2352 let id = self.next_message_id.post_inc();
2353 self.messages.push(Message {
2354 id,
2355 role: Role::System,
2356 segments: vec![MessageSegment::Text(retry_message)],
2357 loaded_context: LoadedContext::default(),
2358 creases: Vec::new(),
2359 is_hidden: false,
2360 ui_only: true,
2361 });
2362 cx.emit(ThreadEvent::MessageAdded(id));
2363
2364 // Schedule the retry
2365 let thread_handle = cx.entity().downgrade();
2366
2367 cx.spawn(async move |_thread, cx| {
2368 cx.background_executor().timer(delay).await;
2369
2370 thread_handle
2371 .update(cx, |thread, cx| {
2372 // Retry the completion
2373 thread.send_to_model(model, intent, window, cx);
2374 })
2375 .log_err();
2376 })
2377 .detach();
2378
2379 true
2380 } else {
2381 // Max retries exceeded
2382 self.retry_state = None;
2383
2384 // Stop generating since we're giving up on retrying.
2385 self.pending_completions.clear();
2386
2387 // Show error alongside a Retry button, but no
2388 // Enable Burn Mode button (since it's already enabled)
2389 cx.emit(ThreadEvent::ShowError(ThreadError::RetryableError {
2390 message: format!("Failed after retrying: {}", error).into(),
2391 can_enable_burn_mode: false,
2392 }));
2393
2394 false
2395 }
2396 }
2397
2398 pub fn start_generating_detailed_summary_if_needed(
2399 &mut self,
2400 thread_store: WeakEntity<ThreadStore>,
2401 cx: &mut Context<Self>,
2402 ) {
2403 let Some(last_message_id) = self.messages.last().map(|message| message.id) else {
2404 return;
2405 };
2406
2407 match &*self.detailed_summary_rx.borrow() {
2408 DetailedSummaryState::Generating { message_id, .. }
2409 | DetailedSummaryState::Generated { message_id, .. }
2410 if *message_id == last_message_id =>
2411 {
2412 // Already up-to-date
2413 return;
2414 }
2415 _ => {}
2416 }
2417
2418 let Some(ConfiguredModel { model, provider }) =
2419 LanguageModelRegistry::read_global(cx).thread_summary_model()
2420 else {
2421 return;
2422 };
2423
2424 if !provider.is_authenticated(cx) {
2425 return;
2426 }
2427
2428 let added_user_message = include_str!("./prompts/summarize_thread_detailed_prompt.txt");
2429
2430 let request = self.to_summarize_request(
2431 &model,
2432 CompletionIntent::ThreadContextSummarization,
2433 added_user_message.into(),
2434 cx,
2435 );
2436
2437 *self.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generating {
2438 message_id: last_message_id,
2439 };
2440
2441 // Replace the detailed summarization task if there is one, cancelling it. It would probably
2442 // be better to allow the old task to complete, but this would require logic for choosing
2443 // which result to prefer (the old task could complete after the new one, resulting in a
2444 // stale summary).
2445 self.detailed_summary_task = cx.spawn(async move |thread, cx| {
2446 let stream = model.stream_completion_text(request, &cx);
2447 let Some(mut messages) = stream.await.log_err() else {
2448 thread
2449 .update(cx, |thread, _cx| {
2450 *thread.detailed_summary_tx.borrow_mut() =
2451 DetailedSummaryState::NotGenerated;
2452 })
2453 .ok()?;
2454 return None;
2455 };
2456
2457 let mut new_detailed_summary = String::new();
2458
2459 while let Some(chunk) = messages.stream.next().await {
2460 if let Some(chunk) = chunk.log_err() {
2461 new_detailed_summary.push_str(&chunk);
2462 }
2463 }
2464
2465 thread
2466 .update(cx, |thread, _cx| {
2467 *thread.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generated {
2468 text: new_detailed_summary.into(),
2469 message_id: last_message_id,
2470 };
2471 })
2472 .ok()?;
2473
2474 // Save thread so its summary can be reused later
2475 if let Some(thread) = thread.upgrade() {
2476 if let Ok(Ok(save_task)) = cx.update(|cx| {
2477 thread_store
2478 .update(cx, |thread_store, cx| thread_store.save_thread(&thread, cx))
2479 }) {
2480 save_task.await.log_err();
2481 }
2482 }
2483
2484 Some(())
2485 });
2486 }
2487
2488 pub async fn wait_for_detailed_summary_or_text(
2489 this: &Entity<Self>,
2490 cx: &mut AsyncApp,
2491 ) -> Option<SharedString> {
2492 let mut detailed_summary_rx = this
2493 .read_with(cx, |this, _cx| this.detailed_summary_rx.clone())
2494 .ok()?;
2495 loop {
2496 match detailed_summary_rx.recv().await? {
2497 DetailedSummaryState::Generating { .. } => {}
2498 DetailedSummaryState::NotGenerated => {
2499 return this.read_with(cx, |this, _cx| this.text().into()).ok();
2500 }
2501 DetailedSummaryState::Generated { text, .. } => return Some(text),
2502 }
2503 }
2504 }
2505
2506 pub fn latest_detailed_summary_or_text(&self) -> SharedString {
2507 self.detailed_summary_rx
2508 .borrow()
2509 .text()
2510 .unwrap_or_else(|| self.text().into())
2511 }
2512
2513 pub fn is_generating_detailed_summary(&self) -> bool {
2514 matches!(
2515 &*self.detailed_summary_rx.borrow(),
2516 DetailedSummaryState::Generating { .. }
2517 )
2518 }
2519
2520 pub fn use_pending_tools(
2521 &mut self,
2522 window: Option<AnyWindowHandle>,
2523 model: Arc<dyn LanguageModel>,
2524 cx: &mut Context<Self>,
2525 ) -> Vec<PendingToolUse> {
2526 self.auto_capture_telemetry(cx);
2527 let request =
2528 Arc::new(self.to_completion_request(model.clone(), CompletionIntent::ToolResults, cx));
2529 let pending_tool_uses = self
2530 .tool_use
2531 .pending_tool_uses()
2532 .into_iter()
2533 .filter(|tool_use| tool_use.status.is_idle())
2534 .cloned()
2535 .collect::<Vec<_>>();
2536
2537 for tool_use in pending_tool_uses.iter() {
2538 self.use_pending_tool(tool_use.clone(), request.clone(), model.clone(), window, cx);
2539 }
2540
2541 pending_tool_uses
2542 }
2543
2544 fn use_pending_tool(
2545 &mut self,
2546 tool_use: PendingToolUse,
2547 request: Arc<LanguageModelRequest>,
2548 model: Arc<dyn LanguageModel>,
2549 window: Option<AnyWindowHandle>,
2550 cx: &mut Context<Self>,
2551 ) {
2552 let Some(tool) = self.tools.read(cx).tool(&tool_use.name, cx) else {
2553 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2554 };
2555
2556 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
2557 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2558 }
2559
2560 if tool.needs_confirmation(&tool_use.input, cx)
2561 && !AgentSettings::get_global(cx).always_allow_tool_actions
2562 {
2563 self.tool_use.confirm_tool_use(
2564 tool_use.id,
2565 tool_use.ui_text,
2566 tool_use.input,
2567 request,
2568 tool,
2569 );
2570 cx.emit(ThreadEvent::ToolConfirmationNeeded);
2571 } else {
2572 self.run_tool(
2573 tool_use.id,
2574 tool_use.ui_text,
2575 tool_use.input,
2576 request,
2577 tool,
2578 model,
2579 window,
2580 cx,
2581 );
2582 }
2583 }
2584
2585 pub fn handle_hallucinated_tool_use(
2586 &mut self,
2587 tool_use_id: LanguageModelToolUseId,
2588 hallucinated_tool_name: Arc<str>,
2589 window: Option<AnyWindowHandle>,
2590 cx: &mut Context<Thread>,
2591 ) {
2592 let available_tools = self.profile.enabled_tools(cx);
2593
2594 let tool_list = available_tools
2595 .iter()
2596 .map(|(name, tool)| format!("- {}: {}", name, tool.description()))
2597 .collect::<Vec<_>>()
2598 .join("\n");
2599
2600 let error_message = format!(
2601 "The tool '{}' doesn't exist or is not enabled. Available tools:\n{}",
2602 hallucinated_tool_name, tool_list
2603 );
2604
2605 let pending_tool_use = self.tool_use.insert_tool_output(
2606 tool_use_id.clone(),
2607 hallucinated_tool_name,
2608 Err(anyhow!("Missing tool call: {error_message}")),
2609 self.configured_model.as_ref(),
2610 self.completion_mode,
2611 );
2612
2613 cx.emit(ThreadEvent::MissingToolUse {
2614 tool_use_id: tool_use_id.clone(),
2615 ui_text: error_message.into(),
2616 });
2617
2618 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2619 }
2620
2621 pub fn receive_invalid_tool_json(
2622 &mut self,
2623 tool_use_id: LanguageModelToolUseId,
2624 tool_name: Arc<str>,
2625 invalid_json: Arc<str>,
2626 error: String,
2627 window: Option<AnyWindowHandle>,
2628 cx: &mut Context<Thread>,
2629 ) {
2630 log::error!("The model returned invalid input JSON: {invalid_json}");
2631
2632 let pending_tool_use = self.tool_use.insert_tool_output(
2633 tool_use_id.clone(),
2634 tool_name,
2635 Err(anyhow!("Error parsing input JSON: {error}")),
2636 self.configured_model.as_ref(),
2637 self.completion_mode,
2638 );
2639 let ui_text = if let Some(pending_tool_use) = &pending_tool_use {
2640 pending_tool_use.ui_text.clone()
2641 } else {
2642 log::error!(
2643 "There was no pending tool use for tool use {tool_use_id}, even though it finished (with invalid input JSON)."
2644 );
2645 format!("Unknown tool {}", tool_use_id).into()
2646 };
2647
2648 cx.emit(ThreadEvent::InvalidToolInput {
2649 tool_use_id: tool_use_id.clone(),
2650 ui_text,
2651 invalid_input_json: invalid_json,
2652 });
2653
2654 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2655 }
2656
2657 pub fn run_tool(
2658 &mut self,
2659 tool_use_id: LanguageModelToolUseId,
2660 ui_text: impl Into<SharedString>,
2661 input: serde_json::Value,
2662 request: Arc<LanguageModelRequest>,
2663 tool: Arc<dyn Tool>,
2664 model: Arc<dyn LanguageModel>,
2665 window: Option<AnyWindowHandle>,
2666 cx: &mut Context<Thread>,
2667 ) {
2668 let task =
2669 self.spawn_tool_use(tool_use_id.clone(), request, input, tool, model, window, cx);
2670 self.tool_use
2671 .run_pending_tool(tool_use_id, ui_text.into(), task);
2672 }
2673
2674 fn spawn_tool_use(
2675 &mut self,
2676 tool_use_id: LanguageModelToolUseId,
2677 request: Arc<LanguageModelRequest>,
2678 input: serde_json::Value,
2679 tool: Arc<dyn Tool>,
2680 model: Arc<dyn LanguageModel>,
2681 window: Option<AnyWindowHandle>,
2682 cx: &mut Context<Thread>,
2683 ) -> Task<()> {
2684 let tool_name: Arc<str> = tool.name().into();
2685
2686 let tool_result = tool.run(
2687 input,
2688 request,
2689 self.project.clone(),
2690 self.action_log.clone(),
2691 model,
2692 window,
2693 cx,
2694 );
2695
2696 // Store the card separately if it exists
2697 if let Some(card) = tool_result.card.clone() {
2698 self.tool_use
2699 .insert_tool_result_card(tool_use_id.clone(), card);
2700 }
2701
2702 cx.spawn({
2703 async move |thread: WeakEntity<Thread>, cx| {
2704 let output = tool_result.output.await;
2705
2706 thread
2707 .update(cx, |thread, cx| {
2708 let pending_tool_use = thread.tool_use.insert_tool_output(
2709 tool_use_id.clone(),
2710 tool_name,
2711 output,
2712 thread.configured_model.as_ref(),
2713 thread.completion_mode,
2714 );
2715 thread.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2716 })
2717 .ok();
2718 }
2719 })
2720 }
2721
2722 fn tool_finished(
2723 &mut self,
2724 tool_use_id: LanguageModelToolUseId,
2725 pending_tool_use: Option<PendingToolUse>,
2726 canceled: bool,
2727 window: Option<AnyWindowHandle>,
2728 cx: &mut Context<Self>,
2729 ) {
2730 if self.all_tools_finished() {
2731 if let Some(ConfiguredModel { model, .. }) = self.configured_model.as_ref() {
2732 if !canceled {
2733 self.send_to_model(model.clone(), CompletionIntent::ToolResults, window, cx);
2734 }
2735 self.auto_capture_telemetry(cx);
2736 }
2737 }
2738
2739 cx.emit(ThreadEvent::ToolFinished {
2740 tool_use_id,
2741 pending_tool_use,
2742 });
2743 }
2744
2745 /// Cancels the last pending completion, if there are any pending.
2746 ///
2747 /// Returns whether a completion was canceled.
2748 pub fn cancel_last_completion(
2749 &mut self,
2750 window: Option<AnyWindowHandle>,
2751 cx: &mut Context<Self>,
2752 ) -> bool {
2753 let mut canceled = self.pending_completions.pop().is_some() || self.retry_state.is_some();
2754
2755 self.retry_state = None;
2756
2757 for pending_tool_use in self.tool_use.cancel_pending() {
2758 canceled = true;
2759 self.tool_finished(
2760 pending_tool_use.id.clone(),
2761 Some(pending_tool_use),
2762 true,
2763 window,
2764 cx,
2765 );
2766 }
2767
2768 if canceled {
2769 cx.emit(ThreadEvent::CompletionCanceled);
2770
2771 // When canceled, we always want to insert the checkpoint.
2772 // (We skip over finalize_pending_checkpoint, because it
2773 // would conclude we didn't have anything to insert here.)
2774 if let Some(checkpoint) = self.pending_checkpoint.take() {
2775 self.insert_checkpoint(checkpoint, cx);
2776 }
2777 } else {
2778 self.finalize_pending_checkpoint(cx);
2779 }
2780
2781 canceled
2782 }
2783
2784 /// Signals that any in-progress editing should be canceled.
2785 ///
2786 /// This method is used to notify listeners (like ActiveThread) that
2787 /// they should cancel any editing operations.
2788 pub fn cancel_editing(&mut self, cx: &mut Context<Self>) {
2789 cx.emit(ThreadEvent::CancelEditing);
2790 }
2791
2792 pub fn feedback(&self) -> Option<ThreadFeedback> {
2793 self.feedback
2794 }
2795
2796 pub fn message_feedback(&self, message_id: MessageId) -> Option<ThreadFeedback> {
2797 self.message_feedback.get(&message_id).copied()
2798 }
2799
2800 pub fn report_message_feedback(
2801 &mut self,
2802 message_id: MessageId,
2803 feedback: ThreadFeedback,
2804 cx: &mut Context<Self>,
2805 ) -> Task<Result<()>> {
2806 if self.message_feedback.get(&message_id) == Some(&feedback) {
2807 return Task::ready(Ok(()));
2808 }
2809
2810 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2811 let serialized_thread = self.serialize(cx);
2812 let thread_id = self.id().clone();
2813 let client = self.project.read(cx).client();
2814
2815 let enabled_tool_names: Vec<String> = self
2816 .profile
2817 .enabled_tools(cx)
2818 .iter()
2819 .map(|(name, _)| name.clone().into())
2820 .collect();
2821
2822 self.message_feedback.insert(message_id, feedback);
2823
2824 cx.notify();
2825
2826 let message_content = self
2827 .message(message_id)
2828 .map(|msg| msg.to_string())
2829 .unwrap_or_default();
2830
2831 cx.background_spawn(async move {
2832 let final_project_snapshot = final_project_snapshot.await;
2833 let serialized_thread = serialized_thread.await?;
2834 let thread_data =
2835 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
2836
2837 let rating = match feedback {
2838 ThreadFeedback::Positive => "positive",
2839 ThreadFeedback::Negative => "negative",
2840 };
2841 telemetry::event!(
2842 "Assistant Thread Rated",
2843 rating,
2844 thread_id,
2845 enabled_tool_names,
2846 message_id = message_id.0,
2847 message_content,
2848 thread_data,
2849 final_project_snapshot
2850 );
2851 client.telemetry().flush_events().await;
2852
2853 Ok(())
2854 })
2855 }
2856
2857 pub fn report_feedback(
2858 &mut self,
2859 feedback: ThreadFeedback,
2860 cx: &mut Context<Self>,
2861 ) -> Task<Result<()>> {
2862 let last_assistant_message_id = self
2863 .messages
2864 .iter()
2865 .rev()
2866 .find(|msg| msg.role == Role::Assistant)
2867 .map(|msg| msg.id);
2868
2869 if let Some(message_id) = last_assistant_message_id {
2870 self.report_message_feedback(message_id, feedback, cx)
2871 } else {
2872 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2873 let serialized_thread = self.serialize(cx);
2874 let thread_id = self.id().clone();
2875 let client = self.project.read(cx).client();
2876 self.feedback = Some(feedback);
2877 cx.notify();
2878
2879 cx.background_spawn(async move {
2880 let final_project_snapshot = final_project_snapshot.await;
2881 let serialized_thread = serialized_thread.await?;
2882 let thread_data = serde_json::to_value(serialized_thread)
2883 .unwrap_or_else(|_| serde_json::Value::Null);
2884
2885 let rating = match feedback {
2886 ThreadFeedback::Positive => "positive",
2887 ThreadFeedback::Negative => "negative",
2888 };
2889 telemetry::event!(
2890 "Assistant Thread Rated",
2891 rating,
2892 thread_id,
2893 thread_data,
2894 final_project_snapshot
2895 );
2896 client.telemetry().flush_events().await;
2897
2898 Ok(())
2899 })
2900 }
2901 }
2902
2903 /// Create a snapshot of the current project state including git information and unsaved buffers.
2904 fn project_snapshot(
2905 project: Entity<Project>,
2906 cx: &mut Context<Self>,
2907 ) -> Task<Arc<ProjectSnapshot>> {
2908 let git_store = project.read(cx).git_store().clone();
2909 let worktree_snapshots: Vec<_> = project
2910 .read(cx)
2911 .visible_worktrees(cx)
2912 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
2913 .collect();
2914
2915 cx.spawn(async move |_, cx| {
2916 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
2917
2918 let mut unsaved_buffers = Vec::new();
2919 cx.update(|app_cx| {
2920 let buffer_store = project.read(app_cx).buffer_store();
2921 for buffer_handle in buffer_store.read(app_cx).buffers() {
2922 let buffer = buffer_handle.read(app_cx);
2923 if buffer.is_dirty() {
2924 if let Some(file) = buffer.file() {
2925 let path = file.path().to_string_lossy().to_string();
2926 unsaved_buffers.push(path);
2927 }
2928 }
2929 }
2930 })
2931 .ok();
2932
2933 Arc::new(ProjectSnapshot {
2934 worktree_snapshots,
2935 unsaved_buffer_paths: unsaved_buffers,
2936 timestamp: Utc::now(),
2937 })
2938 })
2939 }
2940
2941 fn worktree_snapshot(
2942 worktree: Entity<project::Worktree>,
2943 git_store: Entity<GitStore>,
2944 cx: &App,
2945 ) -> Task<WorktreeSnapshot> {
2946 cx.spawn(async move |cx| {
2947 // Get worktree path and snapshot
2948 let worktree_info = cx.update(|app_cx| {
2949 let worktree = worktree.read(app_cx);
2950 let path = worktree.abs_path().to_string_lossy().to_string();
2951 let snapshot = worktree.snapshot();
2952 (path, snapshot)
2953 });
2954
2955 let Ok((worktree_path, _snapshot)) = worktree_info else {
2956 return WorktreeSnapshot {
2957 worktree_path: String::new(),
2958 git_state: None,
2959 };
2960 };
2961
2962 let git_state = git_store
2963 .update(cx, |git_store, cx| {
2964 git_store
2965 .repositories()
2966 .values()
2967 .find(|repo| {
2968 repo.read(cx)
2969 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
2970 .is_some()
2971 })
2972 .cloned()
2973 })
2974 .ok()
2975 .flatten()
2976 .map(|repo| {
2977 repo.update(cx, |repo, _| {
2978 let current_branch =
2979 repo.branch.as_ref().map(|branch| branch.name().to_owned());
2980 repo.send_job(None, |state, _| async move {
2981 let RepositoryState::Local { backend, .. } = state else {
2982 return GitState {
2983 remote_url: None,
2984 head_sha: None,
2985 current_branch,
2986 diff: None,
2987 };
2988 };
2989
2990 let remote_url = backend.remote_url("origin");
2991 let head_sha = backend.head_sha().await;
2992 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
2993
2994 GitState {
2995 remote_url,
2996 head_sha,
2997 current_branch,
2998 diff,
2999 }
3000 })
3001 })
3002 });
3003
3004 let git_state = match git_state {
3005 Some(git_state) => match git_state.ok() {
3006 Some(git_state) => git_state.await.ok(),
3007 None => None,
3008 },
3009 None => None,
3010 };
3011
3012 WorktreeSnapshot {
3013 worktree_path,
3014 git_state,
3015 }
3016 })
3017 }
3018
3019 pub fn to_markdown(&self, cx: &App) -> Result<String> {
3020 let mut markdown = Vec::new();
3021
3022 let summary = self.summary().or_default();
3023 writeln!(markdown, "# {summary}\n")?;
3024
3025 for message in self.messages() {
3026 writeln!(
3027 markdown,
3028 "## {role}\n",
3029 role = match message.role {
3030 Role::User => "User",
3031 Role::Assistant => "Agent",
3032 Role::System => "System",
3033 }
3034 )?;
3035
3036 if !message.loaded_context.text.is_empty() {
3037 writeln!(markdown, "{}", message.loaded_context.text)?;
3038 }
3039
3040 if !message.loaded_context.images.is_empty() {
3041 writeln!(
3042 markdown,
3043 "\n{} images attached as context.\n",
3044 message.loaded_context.images.len()
3045 )?;
3046 }
3047
3048 for segment in &message.segments {
3049 match segment {
3050 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
3051 MessageSegment::Thinking { text, .. } => {
3052 writeln!(markdown, "<think>\n{}\n</think>\n", text)?
3053 }
3054 MessageSegment::RedactedThinking(_) => {}
3055 }
3056 }
3057
3058 for tool_use in self.tool_uses_for_message(message.id, cx) {
3059 writeln!(
3060 markdown,
3061 "**Use Tool: {} ({})**",
3062 tool_use.name, tool_use.id
3063 )?;
3064 writeln!(markdown, "```json")?;
3065 writeln!(
3066 markdown,
3067 "{}",
3068 serde_json::to_string_pretty(&tool_use.input)?
3069 )?;
3070 writeln!(markdown, "```")?;
3071 }
3072
3073 for tool_result in self.tool_results_for_message(message.id) {
3074 write!(markdown, "\n**Tool Results: {}", tool_result.tool_use_id)?;
3075 if tool_result.is_error {
3076 write!(markdown, " (Error)")?;
3077 }
3078
3079 writeln!(markdown, "**\n")?;
3080 match &tool_result.content {
3081 LanguageModelToolResultContent::Text(text) => {
3082 writeln!(markdown, "{text}")?;
3083 }
3084 LanguageModelToolResultContent::Image(image) => {
3085 writeln!(markdown, "", image.source)?;
3086 }
3087 }
3088
3089 if let Some(output) = tool_result.output.as_ref() {
3090 writeln!(
3091 markdown,
3092 "\n\nDebug Output:\n\n```json\n{}\n```\n",
3093 serde_json::to_string_pretty(output)?
3094 )?;
3095 }
3096 }
3097 }
3098
3099 Ok(String::from_utf8_lossy(&markdown).to_string())
3100 }
3101
3102 pub fn keep_edits_in_range(
3103 &mut self,
3104 buffer: Entity<language::Buffer>,
3105 buffer_range: Range<language::Anchor>,
3106 cx: &mut Context<Self>,
3107 ) {
3108 self.action_log.update(cx, |action_log, cx| {
3109 action_log.keep_edits_in_range(buffer, buffer_range, cx)
3110 });
3111 }
3112
3113 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
3114 self.action_log
3115 .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
3116 }
3117
3118 pub fn reject_edits_in_ranges(
3119 &mut self,
3120 buffer: Entity<language::Buffer>,
3121 buffer_ranges: Vec<Range<language::Anchor>>,
3122 cx: &mut Context<Self>,
3123 ) -> Task<Result<()>> {
3124 self.action_log.update(cx, |action_log, cx| {
3125 action_log.reject_edits_in_ranges(buffer, buffer_ranges, cx)
3126 })
3127 }
3128
3129 pub fn action_log(&self) -> &Entity<ActionLog> {
3130 &self.action_log
3131 }
3132
3133 pub fn project(&self) -> &Entity<Project> {
3134 &self.project
3135 }
3136
3137 pub fn auto_capture_telemetry(&mut self, cx: &mut Context<Self>) {
3138 if !cx.has_flag::<feature_flags::ThreadAutoCaptureFeatureFlag>() {
3139 return;
3140 }
3141
3142 let now = Instant::now();
3143 if let Some(last) = self.last_auto_capture_at {
3144 if now.duration_since(last).as_secs() < 10 {
3145 return;
3146 }
3147 }
3148
3149 self.last_auto_capture_at = Some(now);
3150
3151 let thread_id = self.id().clone();
3152 let github_login = self
3153 .project
3154 .read(cx)
3155 .user_store()
3156 .read(cx)
3157 .current_user()
3158 .map(|user| user.github_login.clone());
3159 let client = self.project.read(cx).client();
3160 let serialize_task = self.serialize(cx);
3161
3162 cx.background_executor()
3163 .spawn(async move {
3164 if let Ok(serialized_thread) = serialize_task.await {
3165 if let Ok(thread_data) = serde_json::to_value(serialized_thread) {
3166 telemetry::event!(
3167 "Agent Thread Auto-Captured",
3168 thread_id = thread_id.to_string(),
3169 thread_data = thread_data,
3170 auto_capture_reason = "tracked_user",
3171 github_login = github_login
3172 );
3173
3174 client.telemetry().flush_events().await;
3175 }
3176 }
3177 })
3178 .detach();
3179 }
3180
3181 pub fn cumulative_token_usage(&self) -> TokenUsage {
3182 self.cumulative_token_usage
3183 }
3184
3185 pub fn token_usage_up_to_message(&self, message_id: MessageId) -> TotalTokenUsage {
3186 let Some(model) = self.configured_model.as_ref() else {
3187 return TotalTokenUsage::default();
3188 };
3189
3190 let max = model
3191 .model
3192 .max_token_count_for_mode(self.completion_mode().into());
3193
3194 let index = self
3195 .messages
3196 .iter()
3197 .position(|msg| msg.id == message_id)
3198 .unwrap_or(0);
3199
3200 if index == 0 {
3201 return TotalTokenUsage { total: 0, max };
3202 }
3203
3204 let token_usage = &self
3205 .request_token_usage
3206 .get(index - 1)
3207 .cloned()
3208 .unwrap_or_default();
3209
3210 TotalTokenUsage {
3211 total: token_usage.total_tokens(),
3212 max,
3213 }
3214 }
3215
3216 pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
3217 let model = self.configured_model.as_ref()?;
3218
3219 let max = model
3220 .model
3221 .max_token_count_for_mode(self.completion_mode().into());
3222
3223 if let Some(exceeded_error) = &self.exceeded_window_error {
3224 if model.model.id() == exceeded_error.model_id {
3225 return Some(TotalTokenUsage {
3226 total: exceeded_error.token_count,
3227 max,
3228 });
3229 }
3230 }
3231
3232 let total = self
3233 .token_usage_at_last_message()
3234 .unwrap_or_default()
3235 .total_tokens();
3236
3237 Some(TotalTokenUsage { total, max })
3238 }
3239
3240 fn token_usage_at_last_message(&self) -> Option<TokenUsage> {
3241 self.request_token_usage
3242 .get(self.messages.len().saturating_sub(1))
3243 .or_else(|| self.request_token_usage.last())
3244 .cloned()
3245 }
3246
3247 fn update_token_usage_at_last_message(&mut self, token_usage: TokenUsage) {
3248 let placeholder = self.token_usage_at_last_message().unwrap_or_default();
3249 self.request_token_usage
3250 .resize(self.messages.len(), placeholder);
3251
3252 if let Some(last) = self.request_token_usage.last_mut() {
3253 *last = token_usage;
3254 }
3255 }
3256
3257 fn update_model_request_usage(&self, amount: u32, limit: UsageLimit, cx: &mut Context<Self>) {
3258 self.project.update(cx, |project, cx| {
3259 project.user_store().update(cx, |user_store, cx| {
3260 user_store.update_model_request_usage(
3261 ModelRequestUsage(RequestUsage {
3262 amount: amount as i32,
3263 limit,
3264 }),
3265 cx,
3266 )
3267 })
3268 });
3269 }
3270
3271 pub fn deny_tool_use(
3272 &mut self,
3273 tool_use_id: LanguageModelToolUseId,
3274 tool_name: Arc<str>,
3275 window: Option<AnyWindowHandle>,
3276 cx: &mut Context<Self>,
3277 ) {
3278 let err = Err(anyhow::anyhow!(
3279 "Permission to run tool action denied by user"
3280 ));
3281
3282 self.tool_use.insert_tool_output(
3283 tool_use_id.clone(),
3284 tool_name,
3285 err,
3286 self.configured_model.as_ref(),
3287 self.completion_mode,
3288 );
3289 self.tool_finished(tool_use_id.clone(), None, true, window, cx);
3290 }
3291}
3292
3293#[derive(Debug, Clone, Error)]
3294pub enum ThreadError {
3295 #[error("Payment required")]
3296 PaymentRequired,
3297 #[error("Model request limit reached")]
3298 ModelRequestLimitReached { plan: Plan },
3299 #[error("Message {header}: {message}")]
3300 Message {
3301 header: SharedString,
3302 message: SharedString,
3303 },
3304 #[error("Retryable error: {message}")]
3305 RetryableError {
3306 message: SharedString,
3307 can_enable_burn_mode: bool,
3308 },
3309}
3310
3311#[derive(Debug, Clone)]
3312pub enum ThreadEvent {
3313 ShowError(ThreadError),
3314 StreamedCompletion,
3315 ReceivedTextChunk,
3316 NewRequest,
3317 StreamedAssistantText(MessageId, String),
3318 StreamedAssistantThinking(MessageId, String),
3319 StreamedToolUse {
3320 tool_use_id: LanguageModelToolUseId,
3321 ui_text: Arc<str>,
3322 input: serde_json::Value,
3323 },
3324 MissingToolUse {
3325 tool_use_id: LanguageModelToolUseId,
3326 ui_text: Arc<str>,
3327 },
3328 InvalidToolInput {
3329 tool_use_id: LanguageModelToolUseId,
3330 ui_text: Arc<str>,
3331 invalid_input_json: Arc<str>,
3332 },
3333 Stopped(Result<StopReason, Arc<anyhow::Error>>),
3334 MessageAdded(MessageId),
3335 MessageEdited(MessageId),
3336 MessageDeleted(MessageId),
3337 SummaryGenerated,
3338 SummaryChanged,
3339 UsePendingTools {
3340 tool_uses: Vec<PendingToolUse>,
3341 },
3342 ToolFinished {
3343 #[allow(unused)]
3344 tool_use_id: LanguageModelToolUseId,
3345 /// The pending tool use that corresponds to this tool.
3346 pending_tool_use: Option<PendingToolUse>,
3347 },
3348 CheckpointChanged,
3349 ToolConfirmationNeeded,
3350 ToolUseLimitReached,
3351 CancelEditing,
3352 CompletionCanceled,
3353 ProfileChanged,
3354}
3355
3356impl EventEmitter<ThreadEvent> for Thread {}
3357
3358struct PendingCompletion {
3359 id: usize,
3360 queue_state: QueueState,
3361 _task: Task<()>,
3362}
3363
3364#[cfg(test)]
3365mod tests {
3366 use super::*;
3367 use crate::{
3368 context::load_context, context_store::ContextStore, thread_store, thread_store::ThreadStore,
3369 };
3370
3371 // Test-specific constants
3372 const TEST_RATE_LIMIT_RETRY_SECS: u64 = 30;
3373 use agent_settings::{AgentProfileId, AgentSettings, LanguageModelParameters};
3374 use assistant_tool::ToolRegistry;
3375 use assistant_tools;
3376 use futures::StreamExt;
3377 use futures::future::BoxFuture;
3378 use futures::stream::BoxStream;
3379 use gpui::TestAppContext;
3380 use http_client;
3381 use language_model::fake_provider::{FakeLanguageModel, FakeLanguageModelProvider};
3382 use language_model::{
3383 LanguageModelCompletionError, LanguageModelName, LanguageModelProviderId,
3384 LanguageModelProviderName, LanguageModelToolChoice,
3385 };
3386 use parking_lot::Mutex;
3387 use project::{FakeFs, Project};
3388 use prompt_store::PromptBuilder;
3389 use serde_json::json;
3390 use settings::{Settings, SettingsStore};
3391 use std::sync::Arc;
3392 use std::time::Duration;
3393 use theme::ThemeSettings;
3394 use util::path;
3395 use workspace::Workspace;
3396
3397 #[gpui::test]
3398 async fn test_message_with_context(cx: &mut TestAppContext) {
3399 init_test_settings(cx);
3400
3401 let project = create_test_project(
3402 cx,
3403 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3404 )
3405 .await;
3406
3407 let (_workspace, _thread_store, thread, context_store, model) =
3408 setup_test_environment(cx, project.clone()).await;
3409
3410 add_file_to_context(&project, &context_store, "test/code.rs", cx)
3411 .await
3412 .unwrap();
3413
3414 let context =
3415 context_store.read_with(cx, |store, _| store.context().next().cloned().unwrap());
3416 let loaded_context = cx
3417 .update(|cx| load_context(vec![context], &project, &None, cx))
3418 .await;
3419
3420 // Insert user message with context
3421 let message_id = thread.update(cx, |thread, cx| {
3422 thread.insert_user_message(
3423 "Please explain this code",
3424 loaded_context,
3425 None,
3426 Vec::new(),
3427 cx,
3428 )
3429 });
3430
3431 // Check content and context in message object
3432 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3433
3434 // Use different path format strings based on platform for the test
3435 #[cfg(windows)]
3436 let path_part = r"test\code.rs";
3437 #[cfg(not(windows))]
3438 let path_part = "test/code.rs";
3439
3440 let expected_context = format!(
3441 r#"
3442<context>
3443The following items were attached by the user. They are up-to-date and don't need to be re-read.
3444
3445<files>
3446```rs {path_part}
3447fn main() {{
3448 println!("Hello, world!");
3449}}
3450```
3451</files>
3452</context>
3453"#
3454 );
3455
3456 assert_eq!(message.role, Role::User);
3457 assert_eq!(message.segments.len(), 1);
3458 assert_eq!(
3459 message.segments[0],
3460 MessageSegment::Text("Please explain this code".to_string())
3461 );
3462 assert_eq!(message.loaded_context.text, expected_context);
3463
3464 // Check message in request
3465 let request = thread.update(cx, |thread, cx| {
3466 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3467 });
3468
3469 assert_eq!(request.messages.len(), 2);
3470 let expected_full_message = format!("{}Please explain this code", expected_context);
3471 assert_eq!(request.messages[1].string_contents(), expected_full_message);
3472 }
3473
3474 #[gpui::test]
3475 async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
3476 init_test_settings(cx);
3477
3478 let project = create_test_project(
3479 cx,
3480 json!({
3481 "file1.rs": "fn function1() {}\n",
3482 "file2.rs": "fn function2() {}\n",
3483 "file3.rs": "fn function3() {}\n",
3484 "file4.rs": "fn function4() {}\n",
3485 }),
3486 )
3487 .await;
3488
3489 let (_, _thread_store, thread, context_store, model) =
3490 setup_test_environment(cx, project.clone()).await;
3491
3492 // First message with context 1
3493 add_file_to_context(&project, &context_store, "test/file1.rs", cx)
3494 .await
3495 .unwrap();
3496 let new_contexts = context_store.update(cx, |store, cx| {
3497 store.new_context_for_thread(thread.read(cx), None)
3498 });
3499 assert_eq!(new_contexts.len(), 1);
3500 let loaded_context = cx
3501 .update(|cx| load_context(new_contexts, &project, &None, cx))
3502 .await;
3503 let message1_id = thread.update(cx, |thread, cx| {
3504 thread.insert_user_message("Message 1", loaded_context, None, Vec::new(), cx)
3505 });
3506
3507 // Second message with contexts 1 and 2 (context 1 should be skipped as it's already included)
3508 add_file_to_context(&project, &context_store, "test/file2.rs", cx)
3509 .await
3510 .unwrap();
3511 let new_contexts = context_store.update(cx, |store, cx| {
3512 store.new_context_for_thread(thread.read(cx), None)
3513 });
3514 assert_eq!(new_contexts.len(), 1);
3515 let loaded_context = cx
3516 .update(|cx| load_context(new_contexts, &project, &None, cx))
3517 .await;
3518 let message2_id = thread.update(cx, |thread, cx| {
3519 thread.insert_user_message("Message 2", loaded_context, None, Vec::new(), cx)
3520 });
3521
3522 // Third message with all three contexts (contexts 1 and 2 should be skipped)
3523 //
3524 add_file_to_context(&project, &context_store, "test/file3.rs", cx)
3525 .await
3526 .unwrap();
3527 let new_contexts = context_store.update(cx, |store, cx| {
3528 store.new_context_for_thread(thread.read(cx), None)
3529 });
3530 assert_eq!(new_contexts.len(), 1);
3531 let loaded_context = cx
3532 .update(|cx| load_context(new_contexts, &project, &None, cx))
3533 .await;
3534 let message3_id = thread.update(cx, |thread, cx| {
3535 thread.insert_user_message("Message 3", loaded_context, None, Vec::new(), cx)
3536 });
3537
3538 // Check what contexts are included in each message
3539 let (message1, message2, message3) = thread.read_with(cx, |thread, _| {
3540 (
3541 thread.message(message1_id).unwrap().clone(),
3542 thread.message(message2_id).unwrap().clone(),
3543 thread.message(message3_id).unwrap().clone(),
3544 )
3545 });
3546
3547 // First message should include context 1
3548 assert!(message1.loaded_context.text.contains("file1.rs"));
3549
3550 // Second message should include only context 2 (not 1)
3551 assert!(!message2.loaded_context.text.contains("file1.rs"));
3552 assert!(message2.loaded_context.text.contains("file2.rs"));
3553
3554 // Third message should include only context 3 (not 1 or 2)
3555 assert!(!message3.loaded_context.text.contains("file1.rs"));
3556 assert!(!message3.loaded_context.text.contains("file2.rs"));
3557 assert!(message3.loaded_context.text.contains("file3.rs"));
3558
3559 // Check entire request to make sure all contexts are properly included
3560 let request = thread.update(cx, |thread, cx| {
3561 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3562 });
3563
3564 // The request should contain all 3 messages
3565 assert_eq!(request.messages.len(), 4);
3566
3567 // Check that the contexts are properly formatted in each message
3568 assert!(request.messages[1].string_contents().contains("file1.rs"));
3569 assert!(!request.messages[1].string_contents().contains("file2.rs"));
3570 assert!(!request.messages[1].string_contents().contains("file3.rs"));
3571
3572 assert!(!request.messages[2].string_contents().contains("file1.rs"));
3573 assert!(request.messages[2].string_contents().contains("file2.rs"));
3574 assert!(!request.messages[2].string_contents().contains("file3.rs"));
3575
3576 assert!(!request.messages[3].string_contents().contains("file1.rs"));
3577 assert!(!request.messages[3].string_contents().contains("file2.rs"));
3578 assert!(request.messages[3].string_contents().contains("file3.rs"));
3579
3580 add_file_to_context(&project, &context_store, "test/file4.rs", cx)
3581 .await
3582 .unwrap();
3583 let new_contexts = context_store.update(cx, |store, cx| {
3584 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3585 });
3586 assert_eq!(new_contexts.len(), 3);
3587 let loaded_context = cx
3588 .update(|cx| load_context(new_contexts, &project, &None, cx))
3589 .await
3590 .loaded_context;
3591
3592 assert!(!loaded_context.text.contains("file1.rs"));
3593 assert!(loaded_context.text.contains("file2.rs"));
3594 assert!(loaded_context.text.contains("file3.rs"));
3595 assert!(loaded_context.text.contains("file4.rs"));
3596
3597 let new_contexts = context_store.update(cx, |store, cx| {
3598 // Remove file4.rs
3599 store.remove_context(&loaded_context.contexts[2].handle(), cx);
3600 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3601 });
3602 assert_eq!(new_contexts.len(), 2);
3603 let loaded_context = cx
3604 .update(|cx| load_context(new_contexts, &project, &None, cx))
3605 .await
3606 .loaded_context;
3607
3608 assert!(!loaded_context.text.contains("file1.rs"));
3609 assert!(loaded_context.text.contains("file2.rs"));
3610 assert!(loaded_context.text.contains("file3.rs"));
3611 assert!(!loaded_context.text.contains("file4.rs"));
3612
3613 let new_contexts = context_store.update(cx, |store, cx| {
3614 // Remove file3.rs
3615 store.remove_context(&loaded_context.contexts[1].handle(), cx);
3616 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3617 });
3618 assert_eq!(new_contexts.len(), 1);
3619 let loaded_context = cx
3620 .update(|cx| load_context(new_contexts, &project, &None, cx))
3621 .await
3622 .loaded_context;
3623
3624 assert!(!loaded_context.text.contains("file1.rs"));
3625 assert!(loaded_context.text.contains("file2.rs"));
3626 assert!(!loaded_context.text.contains("file3.rs"));
3627 assert!(!loaded_context.text.contains("file4.rs"));
3628 }
3629
3630 #[gpui::test]
3631 async fn test_message_without_files(cx: &mut TestAppContext) {
3632 init_test_settings(cx);
3633
3634 let project = create_test_project(
3635 cx,
3636 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3637 )
3638 .await;
3639
3640 let (_, _thread_store, thread, _context_store, model) =
3641 setup_test_environment(cx, project.clone()).await;
3642
3643 // Insert user message without any context (empty context vector)
3644 let message_id = thread.update(cx, |thread, cx| {
3645 thread.insert_user_message(
3646 "What is the best way to learn Rust?",
3647 ContextLoadResult::default(),
3648 None,
3649 Vec::new(),
3650 cx,
3651 )
3652 });
3653
3654 // Check content and context in message object
3655 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3656
3657 // Context should be empty when no files are included
3658 assert_eq!(message.role, Role::User);
3659 assert_eq!(message.segments.len(), 1);
3660 assert_eq!(
3661 message.segments[0],
3662 MessageSegment::Text("What is the best way to learn Rust?".to_string())
3663 );
3664 assert_eq!(message.loaded_context.text, "");
3665
3666 // Check message in request
3667 let request = thread.update(cx, |thread, cx| {
3668 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3669 });
3670
3671 assert_eq!(request.messages.len(), 2);
3672 assert_eq!(
3673 request.messages[1].string_contents(),
3674 "What is the best way to learn Rust?"
3675 );
3676
3677 // Add second message, also without context
3678 let message2_id = thread.update(cx, |thread, cx| {
3679 thread.insert_user_message(
3680 "Are there any good books?",
3681 ContextLoadResult::default(),
3682 None,
3683 Vec::new(),
3684 cx,
3685 )
3686 });
3687
3688 let message2 =
3689 thread.read_with(cx, |thread, _| thread.message(message2_id).unwrap().clone());
3690 assert_eq!(message2.loaded_context.text, "");
3691
3692 // Check that both messages appear in the request
3693 let request = thread.update(cx, |thread, cx| {
3694 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3695 });
3696
3697 assert_eq!(request.messages.len(), 3);
3698 assert_eq!(
3699 request.messages[1].string_contents(),
3700 "What is the best way to learn Rust?"
3701 );
3702 assert_eq!(
3703 request.messages[2].string_contents(),
3704 "Are there any good books?"
3705 );
3706 }
3707
3708 #[gpui::test]
3709 #[ignore] // turn this test on when project_notifications tool is re-enabled
3710 async fn test_stale_buffer_notification(cx: &mut TestAppContext) {
3711 init_test_settings(cx);
3712
3713 let project = create_test_project(
3714 cx,
3715 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3716 )
3717 .await;
3718
3719 let (_workspace, _thread_store, thread, context_store, model) =
3720 setup_test_environment(cx, project.clone()).await;
3721
3722 // Add a buffer to the context. This will be a tracked buffer
3723 let buffer = add_file_to_context(&project, &context_store, "test/code.rs", cx)
3724 .await
3725 .unwrap();
3726
3727 let context = context_store
3728 .read_with(cx, |store, _| store.context().next().cloned())
3729 .unwrap();
3730 let loaded_context = cx
3731 .update(|cx| load_context(vec![context], &project, &None, cx))
3732 .await;
3733
3734 // Insert user message and assistant response
3735 thread.update(cx, |thread, cx| {
3736 thread.insert_user_message("Explain this code", loaded_context, None, Vec::new(), cx);
3737 thread.insert_assistant_message(
3738 vec![MessageSegment::Text("This code prints 42.".into())],
3739 cx,
3740 );
3741 });
3742 cx.run_until_parked();
3743
3744 // We shouldn't have a stale buffer notification yet
3745 let notifications = thread.read_with(cx, |thread, _| {
3746 find_tool_uses(thread, "project_notifications")
3747 });
3748 assert!(
3749 notifications.is_empty(),
3750 "Should not have stale buffer notification before buffer is modified"
3751 );
3752
3753 // Modify the buffer
3754 buffer.update(cx, |buffer, cx| {
3755 buffer.edit(
3756 [(1..1, "\n println!(\"Added a new line\");\n")],
3757 None,
3758 cx,
3759 );
3760 });
3761
3762 // Insert another user message
3763 thread.update(cx, |thread, cx| {
3764 thread.insert_user_message(
3765 "What does the code do now?",
3766 ContextLoadResult::default(),
3767 None,
3768 Vec::new(),
3769 cx,
3770 )
3771 });
3772 cx.run_until_parked();
3773
3774 // Check for the stale buffer warning
3775 thread.update(cx, |thread, cx| {
3776 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3777 });
3778 cx.run_until_parked();
3779
3780 let notifications = thread.read_with(cx, |thread, _cx| {
3781 find_tool_uses(thread, "project_notifications")
3782 });
3783
3784 let [notification] = notifications.as_slice() else {
3785 panic!("Should have a `project_notifications` tool use");
3786 };
3787
3788 let Some(notification_content) = notification.content.to_str() else {
3789 panic!("`project_notifications` should return text");
3790 };
3791
3792 assert!(notification_content.contains("These files have changed since the last read:"));
3793 assert!(notification_content.contains("code.rs"));
3794
3795 // Insert another user message and flush notifications again
3796 thread.update(cx, |thread, cx| {
3797 thread.insert_user_message(
3798 "Can you tell me more?",
3799 ContextLoadResult::default(),
3800 None,
3801 Vec::new(),
3802 cx,
3803 )
3804 });
3805
3806 thread.update(cx, |thread, cx| {
3807 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3808 });
3809 cx.run_until_parked();
3810
3811 // There should be no new notifications (we already flushed one)
3812 let notifications = thread.read_with(cx, |thread, _cx| {
3813 find_tool_uses(thread, "project_notifications")
3814 });
3815
3816 assert_eq!(
3817 notifications.len(),
3818 1,
3819 "Should still have only one notification after second flush - no duplicates"
3820 );
3821 }
3822
3823 fn find_tool_uses(thread: &Thread, tool_name: &str) -> Vec<LanguageModelToolResult> {
3824 thread
3825 .messages()
3826 .flat_map(|message| {
3827 thread
3828 .tool_results_for_message(message.id)
3829 .into_iter()
3830 .filter(|result| result.tool_name == tool_name.into())
3831 .cloned()
3832 .collect::<Vec<_>>()
3833 })
3834 .collect()
3835 }
3836
3837 #[gpui::test]
3838 async fn test_storing_profile_setting_per_thread(cx: &mut TestAppContext) {
3839 init_test_settings(cx);
3840
3841 let project = create_test_project(
3842 cx,
3843 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3844 )
3845 .await;
3846
3847 let (_workspace, thread_store, thread, _context_store, _model) =
3848 setup_test_environment(cx, project.clone()).await;
3849
3850 // Check that we are starting with the default profile
3851 let profile = cx.read(|cx| thread.read(cx).profile.clone());
3852 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3853 assert_eq!(
3854 profile,
3855 AgentProfile::new(AgentProfileId::default(), tool_set)
3856 );
3857 }
3858
3859 #[gpui::test]
3860 async fn test_serializing_thread_profile(cx: &mut TestAppContext) {
3861 init_test_settings(cx);
3862
3863 let project = create_test_project(
3864 cx,
3865 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3866 )
3867 .await;
3868
3869 let (_workspace, thread_store, thread, _context_store, _model) =
3870 setup_test_environment(cx, project.clone()).await;
3871
3872 // Profile gets serialized with default values
3873 let serialized = thread
3874 .update(cx, |thread, cx| thread.serialize(cx))
3875 .await
3876 .unwrap();
3877
3878 assert_eq!(serialized.profile, Some(AgentProfileId::default()));
3879
3880 let deserialized = cx.update(|cx| {
3881 thread.update(cx, |thread, cx| {
3882 Thread::deserialize(
3883 thread.id.clone(),
3884 serialized,
3885 thread.project.clone(),
3886 thread.tools.clone(),
3887 thread.prompt_builder.clone(),
3888 thread.project_context.clone(),
3889 None,
3890 cx,
3891 )
3892 })
3893 });
3894 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3895
3896 assert_eq!(
3897 deserialized.profile,
3898 AgentProfile::new(AgentProfileId::default(), tool_set)
3899 );
3900 }
3901
3902 #[gpui::test]
3903 async fn test_temperature_setting(cx: &mut TestAppContext) {
3904 init_test_settings(cx);
3905
3906 let project = create_test_project(
3907 cx,
3908 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3909 )
3910 .await;
3911
3912 let (_workspace, _thread_store, thread, _context_store, model) =
3913 setup_test_environment(cx, project.clone()).await;
3914
3915 // Both model and provider
3916 cx.update(|cx| {
3917 AgentSettings::override_global(
3918 AgentSettings {
3919 model_parameters: vec![LanguageModelParameters {
3920 provider: Some(model.provider_id().0.to_string().into()),
3921 model: Some(model.id().0.clone()),
3922 temperature: Some(0.66),
3923 }],
3924 ..AgentSettings::get_global(cx).clone()
3925 },
3926 cx,
3927 );
3928 });
3929
3930 let request = thread.update(cx, |thread, cx| {
3931 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3932 });
3933 assert_eq!(request.temperature, Some(0.66));
3934
3935 // Only model
3936 cx.update(|cx| {
3937 AgentSettings::override_global(
3938 AgentSettings {
3939 model_parameters: vec![LanguageModelParameters {
3940 provider: None,
3941 model: Some(model.id().0.clone()),
3942 temperature: Some(0.66),
3943 }],
3944 ..AgentSettings::get_global(cx).clone()
3945 },
3946 cx,
3947 );
3948 });
3949
3950 let request = thread.update(cx, |thread, cx| {
3951 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3952 });
3953 assert_eq!(request.temperature, Some(0.66));
3954
3955 // Only provider
3956 cx.update(|cx| {
3957 AgentSettings::override_global(
3958 AgentSettings {
3959 model_parameters: vec![LanguageModelParameters {
3960 provider: Some(model.provider_id().0.to_string().into()),
3961 model: None,
3962 temperature: Some(0.66),
3963 }],
3964 ..AgentSettings::get_global(cx).clone()
3965 },
3966 cx,
3967 );
3968 });
3969
3970 let request = thread.update(cx, |thread, cx| {
3971 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3972 });
3973 assert_eq!(request.temperature, Some(0.66));
3974
3975 // Same model name, different provider
3976 cx.update(|cx| {
3977 AgentSettings::override_global(
3978 AgentSettings {
3979 model_parameters: vec![LanguageModelParameters {
3980 provider: Some("anthropic".into()),
3981 model: Some(model.id().0.clone()),
3982 temperature: Some(0.66),
3983 }],
3984 ..AgentSettings::get_global(cx).clone()
3985 },
3986 cx,
3987 );
3988 });
3989
3990 let request = thread.update(cx, |thread, cx| {
3991 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3992 });
3993 assert_eq!(request.temperature, None);
3994 }
3995
3996 #[gpui::test]
3997 async fn test_thread_summary(cx: &mut TestAppContext) {
3998 init_test_settings(cx);
3999
4000 let project = create_test_project(cx, json!({})).await;
4001
4002 let (_, _thread_store, thread, _context_store, model) =
4003 setup_test_environment(cx, project.clone()).await;
4004
4005 // Initial state should be pending
4006 thread.read_with(cx, |thread, _| {
4007 assert!(matches!(thread.summary(), ThreadSummary::Pending));
4008 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4009 });
4010
4011 // Manually setting the summary should not be allowed in this state
4012 thread.update(cx, |thread, cx| {
4013 thread.set_summary("This should not work", cx);
4014 });
4015
4016 thread.read_with(cx, |thread, _| {
4017 assert!(matches!(thread.summary(), ThreadSummary::Pending));
4018 });
4019
4020 // Send a message
4021 thread.update(cx, |thread, cx| {
4022 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
4023 thread.send_to_model(
4024 model.clone(),
4025 CompletionIntent::ThreadSummarization,
4026 None,
4027 cx,
4028 );
4029 });
4030
4031 let fake_model = model.as_fake();
4032 simulate_successful_response(&fake_model, cx);
4033
4034 // Should start generating summary when there are >= 2 messages
4035 thread.read_with(cx, |thread, _| {
4036 assert_eq!(*thread.summary(), ThreadSummary::Generating);
4037 });
4038
4039 // Should not be able to set the summary while generating
4040 thread.update(cx, |thread, cx| {
4041 thread.set_summary("This should not work either", cx);
4042 });
4043
4044 thread.read_with(cx, |thread, _| {
4045 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4046 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4047 });
4048
4049 cx.run_until_parked();
4050 fake_model.stream_last_completion_response("Brief");
4051 fake_model.stream_last_completion_response(" Introduction");
4052 fake_model.end_last_completion_stream();
4053 cx.run_until_parked();
4054
4055 // Summary should be set
4056 thread.read_with(cx, |thread, _| {
4057 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4058 assert_eq!(thread.summary().or_default(), "Brief Introduction");
4059 });
4060
4061 // Now we should be able to set a summary
4062 thread.update(cx, |thread, cx| {
4063 thread.set_summary("Brief Intro", cx);
4064 });
4065
4066 thread.read_with(cx, |thread, _| {
4067 assert_eq!(thread.summary().or_default(), "Brief Intro");
4068 });
4069
4070 // Test setting an empty summary (should default to DEFAULT)
4071 thread.update(cx, |thread, cx| {
4072 thread.set_summary("", cx);
4073 });
4074
4075 thread.read_with(cx, |thread, _| {
4076 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4077 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4078 });
4079 }
4080
4081 #[gpui::test]
4082 async fn test_thread_summary_error_set_manually(cx: &mut TestAppContext) {
4083 init_test_settings(cx);
4084
4085 let project = create_test_project(cx, json!({})).await;
4086
4087 let (_, _thread_store, thread, _context_store, model) =
4088 setup_test_environment(cx, project.clone()).await;
4089
4090 test_summarize_error(&model, &thread, cx);
4091
4092 // Now we should be able to set a summary
4093 thread.update(cx, |thread, cx| {
4094 thread.set_summary("Brief Intro", cx);
4095 });
4096
4097 thread.read_with(cx, |thread, _| {
4098 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4099 assert_eq!(thread.summary().or_default(), "Brief Intro");
4100 });
4101 }
4102
4103 #[gpui::test]
4104 async fn test_thread_summary_error_retry(cx: &mut TestAppContext) {
4105 init_test_settings(cx);
4106
4107 let project = create_test_project(cx, json!({})).await;
4108
4109 let (_, _thread_store, thread, _context_store, model) =
4110 setup_test_environment(cx, project.clone()).await;
4111
4112 test_summarize_error(&model, &thread, cx);
4113
4114 // Sending another message should not trigger another summarize request
4115 thread.update(cx, |thread, cx| {
4116 thread.insert_user_message(
4117 "How are you?",
4118 ContextLoadResult::default(),
4119 None,
4120 vec![],
4121 cx,
4122 );
4123 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4124 });
4125
4126 let fake_model = model.as_fake();
4127 simulate_successful_response(&fake_model, cx);
4128
4129 thread.read_with(cx, |thread, _| {
4130 // State is still Error, not Generating
4131 assert!(matches!(thread.summary(), ThreadSummary::Error));
4132 });
4133
4134 // But the summarize request can be invoked manually
4135 thread.update(cx, |thread, cx| {
4136 thread.summarize(cx);
4137 });
4138
4139 thread.read_with(cx, |thread, _| {
4140 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4141 });
4142
4143 cx.run_until_parked();
4144 fake_model.stream_last_completion_response("A successful summary");
4145 fake_model.end_last_completion_stream();
4146 cx.run_until_parked();
4147
4148 thread.read_with(cx, |thread, _| {
4149 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4150 assert_eq!(thread.summary().or_default(), "A successful summary");
4151 });
4152 }
4153
4154 // Helper to create a model that returns errors
4155 enum TestError {
4156 Overloaded,
4157 InternalServerError,
4158 }
4159
4160 struct ErrorInjector {
4161 inner: Arc<FakeLanguageModel>,
4162 error_type: TestError,
4163 }
4164
4165 impl ErrorInjector {
4166 fn new(error_type: TestError) -> Self {
4167 Self {
4168 inner: Arc::new(FakeLanguageModel::default()),
4169 error_type,
4170 }
4171 }
4172 }
4173
4174 impl LanguageModel for ErrorInjector {
4175 fn id(&self) -> LanguageModelId {
4176 self.inner.id()
4177 }
4178
4179 fn name(&self) -> LanguageModelName {
4180 self.inner.name()
4181 }
4182
4183 fn provider_id(&self) -> LanguageModelProviderId {
4184 self.inner.provider_id()
4185 }
4186
4187 fn provider_name(&self) -> LanguageModelProviderName {
4188 self.inner.provider_name()
4189 }
4190
4191 fn supports_tools(&self) -> bool {
4192 self.inner.supports_tools()
4193 }
4194
4195 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4196 self.inner.supports_tool_choice(choice)
4197 }
4198
4199 fn supports_images(&self) -> bool {
4200 self.inner.supports_images()
4201 }
4202
4203 fn telemetry_id(&self) -> String {
4204 self.inner.telemetry_id()
4205 }
4206
4207 fn max_token_count(&self) -> u64 {
4208 self.inner.max_token_count()
4209 }
4210
4211 fn count_tokens(
4212 &self,
4213 request: LanguageModelRequest,
4214 cx: &App,
4215 ) -> BoxFuture<'static, Result<u64>> {
4216 self.inner.count_tokens(request, cx)
4217 }
4218
4219 fn stream_completion(
4220 &self,
4221 _request: LanguageModelRequest,
4222 _cx: &AsyncApp,
4223 ) -> BoxFuture<
4224 'static,
4225 Result<
4226 BoxStream<
4227 'static,
4228 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4229 >,
4230 LanguageModelCompletionError,
4231 >,
4232 > {
4233 let error = match self.error_type {
4234 TestError::Overloaded => LanguageModelCompletionError::ServerOverloaded {
4235 provider: self.provider_name(),
4236 retry_after: None,
4237 },
4238 TestError::InternalServerError => {
4239 LanguageModelCompletionError::ApiInternalServerError {
4240 provider: self.provider_name(),
4241 message: "I'm a teapot orbiting the sun".to_string(),
4242 }
4243 }
4244 };
4245 async move {
4246 let stream = futures::stream::once(async move { Err(error) });
4247 Ok(stream.boxed())
4248 }
4249 .boxed()
4250 }
4251
4252 fn as_fake(&self) -> &FakeLanguageModel {
4253 &self.inner
4254 }
4255 }
4256
4257 #[gpui::test]
4258 async fn test_retry_on_overloaded_error(cx: &mut TestAppContext) {
4259 init_test_settings(cx);
4260
4261 let project = create_test_project(cx, json!({})).await;
4262 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4263
4264 // Enable Burn Mode to allow retries
4265 thread.update(cx, |thread, _| {
4266 thread.set_completion_mode(CompletionMode::Burn);
4267 });
4268
4269 // Create model that returns overloaded error
4270 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4271
4272 // Insert a user message
4273 thread.update(cx, |thread, cx| {
4274 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4275 });
4276
4277 // Start completion
4278 thread.update(cx, |thread, cx| {
4279 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4280 });
4281
4282 cx.run_until_parked();
4283
4284 thread.read_with(cx, |thread, _| {
4285 assert!(thread.retry_state.is_some(), "Should have retry state");
4286 let retry_state = thread.retry_state.as_ref().unwrap();
4287 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4288 assert_eq!(
4289 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
4290 "Should retry MAX_RETRY_ATTEMPTS times for overloaded errors"
4291 );
4292 });
4293
4294 // Check that a retry message was added
4295 thread.read_with(cx, |thread, _| {
4296 let mut messages = thread.messages();
4297 assert!(
4298 messages.any(|msg| {
4299 msg.role == Role::System
4300 && msg.ui_only
4301 && msg.segments.iter().any(|seg| {
4302 if let MessageSegment::Text(text) = seg {
4303 text.contains("overloaded")
4304 && text
4305 .contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS))
4306 } else {
4307 false
4308 }
4309 })
4310 }),
4311 "Should have added a system retry message"
4312 );
4313 });
4314
4315 let retry_count = thread.update(cx, |thread, _| {
4316 thread
4317 .messages
4318 .iter()
4319 .filter(|m| {
4320 m.ui_only
4321 && m.segments.iter().any(|s| {
4322 if let MessageSegment::Text(text) = s {
4323 text.contains("Retrying") && text.contains("seconds")
4324 } else {
4325 false
4326 }
4327 })
4328 })
4329 .count()
4330 });
4331
4332 assert_eq!(retry_count, 1, "Should have one retry message");
4333 }
4334
4335 #[gpui::test]
4336 async fn test_retry_on_internal_server_error(cx: &mut TestAppContext) {
4337 init_test_settings(cx);
4338
4339 let project = create_test_project(cx, json!({})).await;
4340 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4341
4342 // Enable Burn Mode to allow retries
4343 thread.update(cx, |thread, _| {
4344 thread.set_completion_mode(CompletionMode::Burn);
4345 });
4346
4347 // Create model that returns internal server error
4348 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4349
4350 // Insert a user message
4351 thread.update(cx, |thread, cx| {
4352 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4353 });
4354
4355 // Start completion
4356 thread.update(cx, |thread, cx| {
4357 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4358 });
4359
4360 cx.run_until_parked();
4361
4362 // Check retry state on thread
4363 thread.read_with(cx, |thread, _| {
4364 assert!(thread.retry_state.is_some(), "Should have retry state");
4365 let retry_state = thread.retry_state.as_ref().unwrap();
4366 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4367 assert_eq!(
4368 retry_state.max_attempts, 3,
4369 "Should have correct max attempts"
4370 );
4371 });
4372
4373 // Check that a retry message was added with provider name
4374 thread.read_with(cx, |thread, _| {
4375 let mut messages = thread.messages();
4376 assert!(
4377 messages.any(|msg| {
4378 msg.role == Role::System
4379 && msg.ui_only
4380 && msg.segments.iter().any(|seg| {
4381 if let MessageSegment::Text(text) = seg {
4382 text.contains("internal")
4383 && text.contains("Fake")
4384 && text.contains("Retrying")
4385 && text.contains("attempt 1 of 3")
4386 && text.contains("seconds")
4387 } else {
4388 false
4389 }
4390 })
4391 }),
4392 "Should have added a system retry message with provider name"
4393 );
4394 });
4395
4396 // Count retry messages
4397 let retry_count = thread.update(cx, |thread, _| {
4398 thread
4399 .messages
4400 .iter()
4401 .filter(|m| {
4402 m.ui_only
4403 && m.segments.iter().any(|s| {
4404 if let MessageSegment::Text(text) = s {
4405 text.contains("Retrying") && text.contains("seconds")
4406 } else {
4407 false
4408 }
4409 })
4410 })
4411 .count()
4412 });
4413
4414 assert_eq!(retry_count, 1, "Should have one retry message");
4415 }
4416
4417 #[gpui::test]
4418 async fn test_exponential_backoff_on_retries(cx: &mut TestAppContext) {
4419 init_test_settings(cx);
4420
4421 let project = create_test_project(cx, json!({})).await;
4422 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4423
4424 // Enable Burn Mode to allow retries
4425 thread.update(cx, |thread, _| {
4426 thread.set_completion_mode(CompletionMode::Burn);
4427 });
4428
4429 // Create model that returns internal server error
4430 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4431
4432 // Insert a user message
4433 thread.update(cx, |thread, cx| {
4434 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4435 });
4436
4437 // Track retry events and completion count
4438 // Track completion events
4439 let completion_count = Arc::new(Mutex::new(0));
4440 let completion_count_clone = completion_count.clone();
4441
4442 let _subscription = thread.update(cx, |_, cx| {
4443 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4444 if let ThreadEvent::NewRequest = event {
4445 *completion_count_clone.lock() += 1;
4446 }
4447 })
4448 });
4449
4450 // First attempt
4451 thread.update(cx, |thread, cx| {
4452 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4453 });
4454 cx.run_until_parked();
4455
4456 // Should have scheduled first retry - count retry messages
4457 let retry_count = thread.update(cx, |thread, _| {
4458 thread
4459 .messages
4460 .iter()
4461 .filter(|m| {
4462 m.ui_only
4463 && m.segments.iter().any(|s| {
4464 if let MessageSegment::Text(text) = s {
4465 text.contains("Retrying") && text.contains("seconds")
4466 } else {
4467 false
4468 }
4469 })
4470 })
4471 .count()
4472 });
4473 assert_eq!(retry_count, 1, "Should have scheduled first retry");
4474
4475 // Check retry state
4476 thread.read_with(cx, |thread, _| {
4477 assert!(thread.retry_state.is_some(), "Should have retry state");
4478 let retry_state = thread.retry_state.as_ref().unwrap();
4479 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4480 assert_eq!(
4481 retry_state.max_attempts, 3,
4482 "Internal server errors should retry up to 3 times"
4483 );
4484 });
4485
4486 // Advance clock for first retry
4487 cx.executor().advance_clock(BASE_RETRY_DELAY);
4488 cx.run_until_parked();
4489
4490 // Advance clock for second retry
4491 cx.executor().advance_clock(BASE_RETRY_DELAY);
4492 cx.run_until_parked();
4493
4494 // Advance clock for third retry
4495 cx.executor().advance_clock(BASE_RETRY_DELAY);
4496 cx.run_until_parked();
4497
4498 // Should have completed all retries - count retry messages
4499 let retry_count = thread.update(cx, |thread, _| {
4500 thread
4501 .messages
4502 .iter()
4503 .filter(|m| {
4504 m.ui_only
4505 && m.segments.iter().any(|s| {
4506 if let MessageSegment::Text(text) = s {
4507 text.contains("Retrying") && text.contains("seconds")
4508 } else {
4509 false
4510 }
4511 })
4512 })
4513 .count()
4514 });
4515 assert_eq!(
4516 retry_count, 3,
4517 "Should have 3 retries for internal server errors"
4518 );
4519
4520 // For internal server errors, we retry 3 times and then give up
4521 // Check that retry_state is cleared after all retries
4522 thread.read_with(cx, |thread, _| {
4523 assert!(
4524 thread.retry_state.is_none(),
4525 "Retry state should be cleared after all retries"
4526 );
4527 });
4528
4529 // Verify total attempts (1 initial + 3 retries)
4530 assert_eq!(
4531 *completion_count.lock(),
4532 4,
4533 "Should have attempted once plus 3 retries"
4534 );
4535 }
4536
4537 #[gpui::test]
4538 async fn test_max_retries_exceeded(cx: &mut TestAppContext) {
4539 init_test_settings(cx);
4540
4541 let project = create_test_project(cx, json!({})).await;
4542 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4543
4544 // Enable Burn Mode to allow retries
4545 thread.update(cx, |thread, _| {
4546 thread.set_completion_mode(CompletionMode::Burn);
4547 });
4548
4549 // Create model that returns overloaded error
4550 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4551
4552 // Insert a user message
4553 thread.update(cx, |thread, cx| {
4554 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4555 });
4556
4557 // Track events
4558 let stopped_with_error = Arc::new(Mutex::new(false));
4559 let stopped_with_error_clone = stopped_with_error.clone();
4560
4561 let _subscription = thread.update(cx, |_, cx| {
4562 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4563 if let ThreadEvent::Stopped(Err(_)) = event {
4564 *stopped_with_error_clone.lock() = true;
4565 }
4566 })
4567 });
4568
4569 // Start initial completion
4570 thread.update(cx, |thread, cx| {
4571 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4572 });
4573 cx.run_until_parked();
4574
4575 // Advance through all retries
4576 for _ in 0..MAX_RETRY_ATTEMPTS {
4577 cx.executor().advance_clock(BASE_RETRY_DELAY);
4578 cx.run_until_parked();
4579 }
4580
4581 let retry_count = thread.update(cx, |thread, _| {
4582 thread
4583 .messages
4584 .iter()
4585 .filter(|m| {
4586 m.ui_only
4587 && m.segments.iter().any(|s| {
4588 if let MessageSegment::Text(text) = s {
4589 text.contains("Retrying") && text.contains("seconds")
4590 } else {
4591 false
4592 }
4593 })
4594 })
4595 .count()
4596 });
4597
4598 // After max retries, should emit Stopped(Err(...)) event
4599 assert_eq!(
4600 retry_count, MAX_RETRY_ATTEMPTS as usize,
4601 "Should have attempted MAX_RETRY_ATTEMPTS retries for overloaded errors"
4602 );
4603 assert!(
4604 *stopped_with_error.lock(),
4605 "Should emit Stopped(Err(...)) event after max retries exceeded"
4606 );
4607
4608 // Retry state should be cleared
4609 thread.read_with(cx, |thread, _| {
4610 assert!(
4611 thread.retry_state.is_none(),
4612 "Retry state should be cleared after max retries"
4613 );
4614
4615 // Verify we have the expected number of retry messages
4616 let retry_messages = thread
4617 .messages
4618 .iter()
4619 .filter(|msg| msg.ui_only && msg.role == Role::System)
4620 .count();
4621 assert_eq!(
4622 retry_messages, MAX_RETRY_ATTEMPTS as usize,
4623 "Should have MAX_RETRY_ATTEMPTS retry messages for overloaded errors"
4624 );
4625 });
4626 }
4627
4628 #[gpui::test]
4629 async fn test_retry_message_removed_on_retry(cx: &mut TestAppContext) {
4630 init_test_settings(cx);
4631
4632 let project = create_test_project(cx, json!({})).await;
4633 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4634
4635 // Enable Burn Mode to allow retries
4636 thread.update(cx, |thread, _| {
4637 thread.set_completion_mode(CompletionMode::Burn);
4638 });
4639
4640 // We'll use a wrapper to switch behavior after first failure
4641 struct RetryTestModel {
4642 inner: Arc<FakeLanguageModel>,
4643 failed_once: Arc<Mutex<bool>>,
4644 }
4645
4646 impl LanguageModel for RetryTestModel {
4647 fn id(&self) -> LanguageModelId {
4648 self.inner.id()
4649 }
4650
4651 fn name(&self) -> LanguageModelName {
4652 self.inner.name()
4653 }
4654
4655 fn provider_id(&self) -> LanguageModelProviderId {
4656 self.inner.provider_id()
4657 }
4658
4659 fn provider_name(&self) -> LanguageModelProviderName {
4660 self.inner.provider_name()
4661 }
4662
4663 fn supports_tools(&self) -> bool {
4664 self.inner.supports_tools()
4665 }
4666
4667 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4668 self.inner.supports_tool_choice(choice)
4669 }
4670
4671 fn supports_images(&self) -> bool {
4672 self.inner.supports_images()
4673 }
4674
4675 fn telemetry_id(&self) -> String {
4676 self.inner.telemetry_id()
4677 }
4678
4679 fn max_token_count(&self) -> u64 {
4680 self.inner.max_token_count()
4681 }
4682
4683 fn count_tokens(
4684 &self,
4685 request: LanguageModelRequest,
4686 cx: &App,
4687 ) -> BoxFuture<'static, Result<u64>> {
4688 self.inner.count_tokens(request, cx)
4689 }
4690
4691 fn stream_completion(
4692 &self,
4693 request: LanguageModelRequest,
4694 cx: &AsyncApp,
4695 ) -> BoxFuture<
4696 'static,
4697 Result<
4698 BoxStream<
4699 'static,
4700 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4701 >,
4702 LanguageModelCompletionError,
4703 >,
4704 > {
4705 if !*self.failed_once.lock() {
4706 *self.failed_once.lock() = true;
4707 let provider = self.provider_name();
4708 // Return error on first attempt
4709 let stream = futures::stream::once(async move {
4710 Err(LanguageModelCompletionError::ServerOverloaded {
4711 provider,
4712 retry_after: None,
4713 })
4714 });
4715 async move { Ok(stream.boxed()) }.boxed()
4716 } else {
4717 // Succeed on retry
4718 self.inner.stream_completion(request, cx)
4719 }
4720 }
4721
4722 fn as_fake(&self) -> &FakeLanguageModel {
4723 &self.inner
4724 }
4725 }
4726
4727 let model = Arc::new(RetryTestModel {
4728 inner: Arc::new(FakeLanguageModel::default()),
4729 failed_once: Arc::new(Mutex::new(false)),
4730 });
4731
4732 // Insert a user message
4733 thread.update(cx, |thread, cx| {
4734 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4735 });
4736
4737 // Track message deletions
4738 // Track when retry completes successfully
4739 let retry_completed = Arc::new(Mutex::new(false));
4740 let retry_completed_clone = retry_completed.clone();
4741
4742 let _subscription = thread.update(cx, |_, cx| {
4743 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4744 if let ThreadEvent::StreamedCompletion = event {
4745 *retry_completed_clone.lock() = true;
4746 }
4747 })
4748 });
4749
4750 // Start completion
4751 thread.update(cx, |thread, cx| {
4752 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4753 });
4754 cx.run_until_parked();
4755
4756 // Get the retry message ID
4757 let retry_message_id = thread.read_with(cx, |thread, _| {
4758 thread
4759 .messages()
4760 .find(|msg| msg.role == Role::System && msg.ui_only)
4761 .map(|msg| msg.id)
4762 .expect("Should have a retry message")
4763 });
4764
4765 // Wait for retry
4766 cx.executor().advance_clock(BASE_RETRY_DELAY);
4767 cx.run_until_parked();
4768
4769 // Stream some successful content
4770 let fake_model = model.as_fake();
4771 // After the retry, there should be a new pending completion
4772 let pending = fake_model.pending_completions();
4773 assert!(
4774 !pending.is_empty(),
4775 "Should have a pending completion after retry"
4776 );
4777 fake_model.stream_completion_response(&pending[0], "Success!");
4778 fake_model.end_completion_stream(&pending[0]);
4779 cx.run_until_parked();
4780
4781 // Check that the retry completed successfully
4782 assert!(
4783 *retry_completed.lock(),
4784 "Retry should have completed successfully"
4785 );
4786
4787 // Retry message should still exist but be marked as ui_only
4788 thread.read_with(cx, |thread, _| {
4789 let retry_msg = thread
4790 .message(retry_message_id)
4791 .expect("Retry message should still exist");
4792 assert!(retry_msg.ui_only, "Retry message should be ui_only");
4793 assert_eq!(
4794 retry_msg.role,
4795 Role::System,
4796 "Retry message should have System role"
4797 );
4798 });
4799 }
4800
4801 #[gpui::test]
4802 async fn test_successful_completion_clears_retry_state(cx: &mut TestAppContext) {
4803 init_test_settings(cx);
4804
4805 let project = create_test_project(cx, json!({})).await;
4806 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4807
4808 // Enable Burn Mode to allow retries
4809 thread.update(cx, |thread, _| {
4810 thread.set_completion_mode(CompletionMode::Burn);
4811 });
4812
4813 // Create a model that fails once then succeeds
4814 struct FailOnceModel {
4815 inner: Arc<FakeLanguageModel>,
4816 failed_once: Arc<Mutex<bool>>,
4817 }
4818
4819 impl LanguageModel for FailOnceModel {
4820 fn id(&self) -> LanguageModelId {
4821 self.inner.id()
4822 }
4823
4824 fn name(&self) -> LanguageModelName {
4825 self.inner.name()
4826 }
4827
4828 fn provider_id(&self) -> LanguageModelProviderId {
4829 self.inner.provider_id()
4830 }
4831
4832 fn provider_name(&self) -> LanguageModelProviderName {
4833 self.inner.provider_name()
4834 }
4835
4836 fn supports_tools(&self) -> bool {
4837 self.inner.supports_tools()
4838 }
4839
4840 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4841 self.inner.supports_tool_choice(choice)
4842 }
4843
4844 fn supports_images(&self) -> bool {
4845 self.inner.supports_images()
4846 }
4847
4848 fn telemetry_id(&self) -> String {
4849 self.inner.telemetry_id()
4850 }
4851
4852 fn max_token_count(&self) -> u64 {
4853 self.inner.max_token_count()
4854 }
4855
4856 fn count_tokens(
4857 &self,
4858 request: LanguageModelRequest,
4859 cx: &App,
4860 ) -> BoxFuture<'static, Result<u64>> {
4861 self.inner.count_tokens(request, cx)
4862 }
4863
4864 fn stream_completion(
4865 &self,
4866 request: LanguageModelRequest,
4867 cx: &AsyncApp,
4868 ) -> BoxFuture<
4869 'static,
4870 Result<
4871 BoxStream<
4872 'static,
4873 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4874 >,
4875 LanguageModelCompletionError,
4876 >,
4877 > {
4878 if !*self.failed_once.lock() {
4879 *self.failed_once.lock() = true;
4880 let provider = self.provider_name();
4881 // Return error on first attempt
4882 let stream = futures::stream::once(async move {
4883 Err(LanguageModelCompletionError::ServerOverloaded {
4884 provider,
4885 retry_after: None,
4886 })
4887 });
4888 async move { Ok(stream.boxed()) }.boxed()
4889 } else {
4890 // Succeed on retry
4891 self.inner.stream_completion(request, cx)
4892 }
4893 }
4894 }
4895
4896 let fail_once_model = Arc::new(FailOnceModel {
4897 inner: Arc::new(FakeLanguageModel::default()),
4898 failed_once: Arc::new(Mutex::new(false)),
4899 });
4900
4901 // Insert a user message
4902 thread.update(cx, |thread, cx| {
4903 thread.insert_user_message(
4904 "Test message",
4905 ContextLoadResult::default(),
4906 None,
4907 vec![],
4908 cx,
4909 );
4910 });
4911
4912 // Start completion with fail-once model
4913 thread.update(cx, |thread, cx| {
4914 thread.send_to_model(
4915 fail_once_model.clone(),
4916 CompletionIntent::UserPrompt,
4917 None,
4918 cx,
4919 );
4920 });
4921
4922 cx.run_until_parked();
4923
4924 // Verify retry state exists after first failure
4925 thread.read_with(cx, |thread, _| {
4926 assert!(
4927 thread.retry_state.is_some(),
4928 "Should have retry state after failure"
4929 );
4930 });
4931
4932 // Wait for retry delay
4933 cx.executor().advance_clock(BASE_RETRY_DELAY);
4934 cx.run_until_parked();
4935
4936 // The retry should now use our FailOnceModel which should succeed
4937 // We need to help the FakeLanguageModel complete the stream
4938 let inner_fake = fail_once_model.inner.clone();
4939
4940 // Wait a bit for the retry to start
4941 cx.run_until_parked();
4942
4943 // Check for pending completions and complete them
4944 if let Some(pending) = inner_fake.pending_completions().first() {
4945 inner_fake.stream_completion_response(pending, "Success!");
4946 inner_fake.end_completion_stream(pending);
4947 }
4948 cx.run_until_parked();
4949
4950 thread.read_with(cx, |thread, _| {
4951 assert!(
4952 thread.retry_state.is_none(),
4953 "Retry state should be cleared after successful completion"
4954 );
4955
4956 let has_assistant_message = thread
4957 .messages
4958 .iter()
4959 .any(|msg| msg.role == Role::Assistant && !msg.ui_only);
4960 assert!(
4961 has_assistant_message,
4962 "Should have an assistant message after successful retry"
4963 );
4964 });
4965 }
4966
4967 #[gpui::test]
4968 async fn test_rate_limit_retry_single_attempt(cx: &mut TestAppContext) {
4969 init_test_settings(cx);
4970
4971 let project = create_test_project(cx, json!({})).await;
4972 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4973
4974 // Enable Burn Mode to allow retries
4975 thread.update(cx, |thread, _| {
4976 thread.set_completion_mode(CompletionMode::Burn);
4977 });
4978
4979 // Create a model that returns rate limit error with retry_after
4980 struct RateLimitModel {
4981 inner: Arc<FakeLanguageModel>,
4982 }
4983
4984 impl LanguageModel for RateLimitModel {
4985 fn id(&self) -> LanguageModelId {
4986 self.inner.id()
4987 }
4988
4989 fn name(&self) -> LanguageModelName {
4990 self.inner.name()
4991 }
4992
4993 fn provider_id(&self) -> LanguageModelProviderId {
4994 self.inner.provider_id()
4995 }
4996
4997 fn provider_name(&self) -> LanguageModelProviderName {
4998 self.inner.provider_name()
4999 }
5000
5001 fn supports_tools(&self) -> bool {
5002 self.inner.supports_tools()
5003 }
5004
5005 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
5006 self.inner.supports_tool_choice(choice)
5007 }
5008
5009 fn supports_images(&self) -> bool {
5010 self.inner.supports_images()
5011 }
5012
5013 fn telemetry_id(&self) -> String {
5014 self.inner.telemetry_id()
5015 }
5016
5017 fn max_token_count(&self) -> u64 {
5018 self.inner.max_token_count()
5019 }
5020
5021 fn count_tokens(
5022 &self,
5023 request: LanguageModelRequest,
5024 cx: &App,
5025 ) -> BoxFuture<'static, Result<u64>> {
5026 self.inner.count_tokens(request, cx)
5027 }
5028
5029 fn stream_completion(
5030 &self,
5031 _request: LanguageModelRequest,
5032 _cx: &AsyncApp,
5033 ) -> BoxFuture<
5034 'static,
5035 Result<
5036 BoxStream<
5037 'static,
5038 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
5039 >,
5040 LanguageModelCompletionError,
5041 >,
5042 > {
5043 let provider = self.provider_name();
5044 async move {
5045 let stream = futures::stream::once(async move {
5046 Err(LanguageModelCompletionError::RateLimitExceeded {
5047 provider,
5048 retry_after: Some(Duration::from_secs(TEST_RATE_LIMIT_RETRY_SECS)),
5049 })
5050 });
5051 Ok(stream.boxed())
5052 }
5053 .boxed()
5054 }
5055
5056 fn as_fake(&self) -> &FakeLanguageModel {
5057 &self.inner
5058 }
5059 }
5060
5061 let model = Arc::new(RateLimitModel {
5062 inner: Arc::new(FakeLanguageModel::default()),
5063 });
5064
5065 // Insert a user message
5066 thread.update(cx, |thread, cx| {
5067 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5068 });
5069
5070 // Start completion
5071 thread.update(cx, |thread, cx| {
5072 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5073 });
5074
5075 cx.run_until_parked();
5076
5077 let retry_count = thread.update(cx, |thread, _| {
5078 thread
5079 .messages
5080 .iter()
5081 .filter(|m| {
5082 m.ui_only
5083 && m.segments.iter().any(|s| {
5084 if let MessageSegment::Text(text) = s {
5085 text.contains("rate limit exceeded")
5086 } else {
5087 false
5088 }
5089 })
5090 })
5091 .count()
5092 });
5093 assert_eq!(retry_count, 1, "Should have scheduled one retry");
5094
5095 thread.read_with(cx, |thread, _| {
5096 assert!(
5097 thread.retry_state.is_some(),
5098 "Rate limit errors should set retry_state"
5099 );
5100 if let Some(retry_state) = &thread.retry_state {
5101 assert_eq!(
5102 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
5103 "Rate limit errors should use MAX_RETRY_ATTEMPTS"
5104 );
5105 }
5106 });
5107
5108 // Verify we have one retry message
5109 thread.read_with(cx, |thread, _| {
5110 let retry_messages = thread
5111 .messages
5112 .iter()
5113 .filter(|msg| {
5114 msg.ui_only
5115 && msg.segments.iter().any(|seg| {
5116 if let MessageSegment::Text(text) = seg {
5117 text.contains("rate limit exceeded")
5118 } else {
5119 false
5120 }
5121 })
5122 })
5123 .count();
5124 assert_eq!(
5125 retry_messages, 1,
5126 "Should have one rate limit retry message"
5127 );
5128 });
5129
5130 // Check that retry message doesn't include attempt count
5131 thread.read_with(cx, |thread, _| {
5132 let retry_message = thread
5133 .messages
5134 .iter()
5135 .find(|msg| msg.role == Role::System && msg.ui_only)
5136 .expect("Should have a retry message");
5137
5138 // Check that the message contains attempt count since we use retry_state
5139 if let Some(MessageSegment::Text(text)) = retry_message.segments.first() {
5140 assert!(
5141 text.contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS)),
5142 "Rate limit retry message should contain attempt count with MAX_RETRY_ATTEMPTS"
5143 );
5144 assert!(
5145 text.contains("Retrying"),
5146 "Rate limit retry message should contain retry text"
5147 );
5148 }
5149 });
5150 }
5151
5152 #[gpui::test]
5153 async fn test_ui_only_messages_not_sent_to_model(cx: &mut TestAppContext) {
5154 init_test_settings(cx);
5155
5156 let project = create_test_project(cx, json!({})).await;
5157 let (_, _, thread, _, model) = setup_test_environment(cx, project.clone()).await;
5158
5159 // Insert a regular user message
5160 thread.update(cx, |thread, cx| {
5161 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5162 });
5163
5164 // Insert a UI-only message (like our retry notifications)
5165 thread.update(cx, |thread, cx| {
5166 let id = thread.next_message_id.post_inc();
5167 thread.messages.push(Message {
5168 id,
5169 role: Role::System,
5170 segments: vec![MessageSegment::Text(
5171 "This is a UI-only message that should not be sent to the model".to_string(),
5172 )],
5173 loaded_context: LoadedContext::default(),
5174 creases: Vec::new(),
5175 is_hidden: true,
5176 ui_only: true,
5177 });
5178 cx.emit(ThreadEvent::MessageAdded(id));
5179 });
5180
5181 // Insert another regular message
5182 thread.update(cx, |thread, cx| {
5183 thread.insert_user_message(
5184 "How are you?",
5185 ContextLoadResult::default(),
5186 None,
5187 vec![],
5188 cx,
5189 );
5190 });
5191
5192 // Generate the completion request
5193 let request = thread.update(cx, |thread, cx| {
5194 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
5195 });
5196
5197 // Verify that the request only contains non-UI-only messages
5198 // Should have system prompt + 2 user messages, but not the UI-only message
5199 let user_messages: Vec<_> = request
5200 .messages
5201 .iter()
5202 .filter(|msg| msg.role == Role::User)
5203 .collect();
5204 assert_eq!(
5205 user_messages.len(),
5206 2,
5207 "Should have exactly 2 user messages"
5208 );
5209
5210 // Verify the UI-only content is not present anywhere in the request
5211 let request_text = request
5212 .messages
5213 .iter()
5214 .flat_map(|msg| &msg.content)
5215 .filter_map(|content| match content {
5216 MessageContent::Text(text) => Some(text.as_str()),
5217 _ => None,
5218 })
5219 .collect::<String>();
5220
5221 assert!(
5222 !request_text.contains("UI-only message"),
5223 "UI-only message content should not be in the request"
5224 );
5225
5226 // Verify the thread still has all 3 messages (including UI-only)
5227 thread.read_with(cx, |thread, _| {
5228 assert_eq!(
5229 thread.messages().count(),
5230 3,
5231 "Thread should have 3 messages"
5232 );
5233 assert_eq!(
5234 thread.messages().filter(|m| m.ui_only).count(),
5235 1,
5236 "Thread should have 1 UI-only message"
5237 );
5238 });
5239
5240 // Verify that UI-only messages are not serialized
5241 let serialized = thread
5242 .update(cx, |thread, cx| thread.serialize(cx))
5243 .await
5244 .unwrap();
5245 assert_eq!(
5246 serialized.messages.len(),
5247 2,
5248 "Serialized thread should only have 2 messages (no UI-only)"
5249 );
5250 }
5251
5252 #[gpui::test]
5253 async fn test_no_retry_without_burn_mode(cx: &mut TestAppContext) {
5254 init_test_settings(cx);
5255
5256 let project = create_test_project(cx, json!({})).await;
5257 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5258
5259 // Ensure we're in Normal mode (not Burn mode)
5260 thread.update(cx, |thread, _| {
5261 thread.set_completion_mode(CompletionMode::Normal);
5262 });
5263
5264 // Track error events
5265 let error_events = Arc::new(Mutex::new(Vec::new()));
5266 let error_events_clone = error_events.clone();
5267
5268 let _subscription = thread.update(cx, |_, cx| {
5269 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
5270 if let ThreadEvent::ShowError(error) = event {
5271 error_events_clone.lock().push(error.clone());
5272 }
5273 })
5274 });
5275
5276 // Create model that returns overloaded error
5277 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5278
5279 // Insert a user message
5280 thread.update(cx, |thread, cx| {
5281 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5282 });
5283
5284 // Start completion
5285 thread.update(cx, |thread, cx| {
5286 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5287 });
5288
5289 cx.run_until_parked();
5290
5291 // Verify no retry state was created
5292 thread.read_with(cx, |thread, _| {
5293 assert!(
5294 thread.retry_state.is_none(),
5295 "Should not have retry state in Normal mode"
5296 );
5297 });
5298
5299 // Check that a retryable error was reported
5300 let errors = error_events.lock();
5301 assert!(!errors.is_empty(), "Should have received an error event");
5302
5303 if let ThreadError::RetryableError {
5304 message: _,
5305 can_enable_burn_mode,
5306 } = &errors[0]
5307 {
5308 assert!(
5309 *can_enable_burn_mode,
5310 "Error should indicate burn mode can be enabled"
5311 );
5312 } else {
5313 panic!("Expected RetryableError, got {:?}", errors[0]);
5314 }
5315
5316 // Verify the thread is no longer generating
5317 thread.read_with(cx, |thread, _| {
5318 assert!(
5319 !thread.is_generating(),
5320 "Should not be generating after error without retry"
5321 );
5322 });
5323 }
5324
5325 #[gpui::test]
5326 async fn test_retry_cancelled_on_stop(cx: &mut TestAppContext) {
5327 init_test_settings(cx);
5328
5329 let project = create_test_project(cx, json!({})).await;
5330 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5331
5332 // Enable Burn Mode to allow retries
5333 thread.update(cx, |thread, _| {
5334 thread.set_completion_mode(CompletionMode::Burn);
5335 });
5336
5337 // Create model that returns overloaded error
5338 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5339
5340 // Insert a user message
5341 thread.update(cx, |thread, cx| {
5342 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5343 });
5344
5345 // Start completion
5346 thread.update(cx, |thread, cx| {
5347 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5348 });
5349
5350 cx.run_until_parked();
5351
5352 // Verify retry was scheduled by checking for retry message
5353 let has_retry_message = thread.read_with(cx, |thread, _| {
5354 thread.messages.iter().any(|m| {
5355 m.ui_only
5356 && m.segments.iter().any(|s| {
5357 if let MessageSegment::Text(text) = s {
5358 text.contains("Retrying") && text.contains("seconds")
5359 } else {
5360 false
5361 }
5362 })
5363 })
5364 });
5365 assert!(has_retry_message, "Should have scheduled a retry");
5366
5367 // Cancel the completion before the retry happens
5368 thread.update(cx, |thread, cx| {
5369 thread.cancel_last_completion(None, cx);
5370 });
5371
5372 cx.run_until_parked();
5373
5374 // The retry should not have happened - no pending completions
5375 let fake_model = model.as_fake();
5376 assert_eq!(
5377 fake_model.pending_completions().len(),
5378 0,
5379 "Should have no pending completions after cancellation"
5380 );
5381
5382 // Verify the retry was cancelled by checking retry state
5383 thread.read_with(cx, |thread, _| {
5384 if let Some(retry_state) = &thread.retry_state {
5385 panic!(
5386 "retry_state should be cleared after cancellation, but found: attempt={}, max_attempts={}, intent={:?}",
5387 retry_state.attempt, retry_state.max_attempts, retry_state.intent
5388 );
5389 }
5390 });
5391 }
5392
5393 fn test_summarize_error(
5394 model: &Arc<dyn LanguageModel>,
5395 thread: &Entity<Thread>,
5396 cx: &mut TestAppContext,
5397 ) {
5398 thread.update(cx, |thread, cx| {
5399 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
5400 thread.send_to_model(
5401 model.clone(),
5402 CompletionIntent::ThreadSummarization,
5403 None,
5404 cx,
5405 );
5406 });
5407
5408 let fake_model = model.as_fake();
5409 simulate_successful_response(&fake_model, cx);
5410
5411 thread.read_with(cx, |thread, _| {
5412 assert!(matches!(thread.summary(), ThreadSummary::Generating));
5413 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5414 });
5415
5416 // Simulate summary request ending
5417 cx.run_until_parked();
5418 fake_model.end_last_completion_stream();
5419 cx.run_until_parked();
5420
5421 // State is set to Error and default message
5422 thread.read_with(cx, |thread, _| {
5423 assert!(matches!(thread.summary(), ThreadSummary::Error));
5424 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5425 });
5426 }
5427
5428 fn simulate_successful_response(fake_model: &FakeLanguageModel, cx: &mut TestAppContext) {
5429 cx.run_until_parked();
5430 fake_model.stream_last_completion_response("Assistant response");
5431 fake_model.end_last_completion_stream();
5432 cx.run_until_parked();
5433 }
5434
5435 fn init_test_settings(cx: &mut TestAppContext) {
5436 cx.update(|cx| {
5437 let settings_store = SettingsStore::test(cx);
5438 cx.set_global(settings_store);
5439 language::init(cx);
5440 Project::init_settings(cx);
5441 AgentSettings::register(cx);
5442 prompt_store::init(cx);
5443 thread_store::init(cx);
5444 workspace::init_settings(cx);
5445 language_model::init_settings(cx);
5446 ThemeSettings::register(cx);
5447 ToolRegistry::default_global(cx);
5448 assistant_tool::init(cx);
5449
5450 let http_client = Arc::new(http_client::HttpClientWithUrl::new(
5451 http_client::FakeHttpClient::with_200_response(),
5452 "http://localhost".to_string(),
5453 None,
5454 ));
5455 assistant_tools::init(http_client, cx);
5456 });
5457 }
5458
5459 // Helper to create a test project with test files
5460 async fn create_test_project(
5461 cx: &mut TestAppContext,
5462 files: serde_json::Value,
5463 ) -> Entity<Project> {
5464 let fs = FakeFs::new(cx.executor());
5465 fs.insert_tree(path!("/test"), files).await;
5466 Project::test(fs, [path!("/test").as_ref()], cx).await
5467 }
5468
5469 async fn setup_test_environment(
5470 cx: &mut TestAppContext,
5471 project: Entity<Project>,
5472 ) -> (
5473 Entity<Workspace>,
5474 Entity<ThreadStore>,
5475 Entity<Thread>,
5476 Entity<ContextStore>,
5477 Arc<dyn LanguageModel>,
5478 ) {
5479 let (workspace, cx) =
5480 cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
5481
5482 let thread_store = cx
5483 .update(|_, cx| {
5484 ThreadStore::load(
5485 project.clone(),
5486 cx.new(|_| ToolWorkingSet::default()),
5487 None,
5488 Arc::new(PromptBuilder::new(None).unwrap()),
5489 cx,
5490 )
5491 })
5492 .await
5493 .unwrap();
5494
5495 let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
5496 let context_store = cx.new(|_cx| ContextStore::new(project.downgrade(), None));
5497
5498 let provider = Arc::new(FakeLanguageModelProvider::default());
5499 let model = provider.test_model();
5500 let model: Arc<dyn LanguageModel> = Arc::new(model);
5501
5502 cx.update(|_, cx| {
5503 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
5504 registry.set_default_model(
5505 Some(ConfiguredModel {
5506 provider: provider.clone(),
5507 model: model.clone(),
5508 }),
5509 cx,
5510 );
5511 registry.set_thread_summary_model(
5512 Some(ConfiguredModel {
5513 provider,
5514 model: model.clone(),
5515 }),
5516 cx,
5517 );
5518 })
5519 });
5520
5521 (workspace, thread_store, thread, context_store, model)
5522 }
5523
5524 async fn add_file_to_context(
5525 project: &Entity<Project>,
5526 context_store: &Entity<ContextStore>,
5527 path: &str,
5528 cx: &mut TestAppContext,
5529 ) -> Result<Entity<language::Buffer>> {
5530 let buffer_path = project
5531 .read_with(cx, |project, cx| project.find_project_path(path, cx))
5532 .unwrap();
5533
5534 let buffer = project
5535 .update(cx, |project, cx| {
5536 project.open_buffer(buffer_path.clone(), cx)
5537 })
5538 .await
5539 .unwrap();
5540
5541 context_store.update(cx, |context_store, cx| {
5542 context_store.add_file_from_buffer(&buffer_path, buffer.clone(), false, cx);
5543 });
5544
5545 Ok(buffer)
5546 }
5547}