1use crate::{
2 agent_profile::AgentProfile,
3 context::{AgentContext, AgentContextHandle, ContextLoadResult, LoadedContext},
4 thread_store::{
5 SerializedCrease, SerializedLanguageModel, SerializedMessage, SerializedMessageSegment,
6 SerializedThread, SerializedToolResult, SerializedToolUse, SharedProjectContext,
7 ThreadStore,
8 },
9 tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState},
10};
11use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
12use anyhow::{Result, anyhow};
13use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolWorkingSet};
14use chrono::{DateTime, Utc};
15use client::{CloudUserStore, ModelRequestUsage, RequestUsage};
16use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
17use collections::HashMap;
18use feature_flags::{self, FeatureFlagAppExt};
19use futures::{FutureExt, StreamExt as _, future::Shared};
20use git::repository::DiffType;
21use gpui::{
22 AnyWindowHandle, App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task,
23 WeakEntity, Window,
24};
25use http_client::StatusCode;
26use language_model::{
27 ConfiguredModel, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
28 LanguageModelExt as _, LanguageModelId, LanguageModelRegistry, LanguageModelRequest,
29 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
30 LanguageModelToolResultContent, LanguageModelToolUse, LanguageModelToolUseId, MessageContent,
31 ModelRequestLimitReachedError, PaymentRequiredError, Role, SelectedModel, StopReason,
32 TokenUsage,
33};
34use postage::stream::Stream as _;
35use project::{
36 Project,
37 git_store::{GitStore, GitStoreCheckpoint, RepositoryState},
38};
39use prompt_store::{ModelContext, PromptBuilder};
40use proto::Plan;
41use schemars::JsonSchema;
42use serde::{Deserialize, Serialize};
43use settings::Settings;
44use std::{
45 io::Write,
46 ops::Range,
47 sync::Arc,
48 time::{Duration, Instant},
49};
50use thiserror::Error;
51use util::{ResultExt as _, post_inc};
52use uuid::Uuid;
53
54const MAX_RETRY_ATTEMPTS: u8 = 4;
55const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
56
57#[derive(Debug, Clone)]
58enum RetryStrategy {
59 ExponentialBackoff {
60 initial_delay: Duration,
61 max_attempts: u8,
62 },
63 Fixed {
64 delay: Duration,
65 max_attempts: u8,
66 },
67}
68
69#[derive(
70 Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
71)]
72pub struct ThreadId(Arc<str>);
73
74impl ThreadId {
75 pub fn new() -> Self {
76 Self(Uuid::new_v4().to_string().into())
77 }
78}
79
80impl std::fmt::Display for ThreadId {
81 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
82 write!(f, "{}", self.0)
83 }
84}
85
86impl From<&str> for ThreadId {
87 fn from(value: &str) -> Self {
88 Self(value.into())
89 }
90}
91
92/// The ID of the user prompt that initiated a request.
93///
94/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
95#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
96pub struct PromptId(Arc<str>);
97
98impl PromptId {
99 pub fn new() -> Self {
100 Self(Uuid::new_v4().to_string().into())
101 }
102}
103
104impl std::fmt::Display for PromptId {
105 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
106 write!(f, "{}", self.0)
107 }
108}
109
110#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
111pub struct MessageId(pub(crate) usize);
112
113impl MessageId {
114 fn post_inc(&mut self) -> Self {
115 Self(post_inc(&mut self.0))
116 }
117
118 pub fn as_usize(&self) -> usize {
119 self.0
120 }
121}
122
123/// Stored information that can be used to resurrect a context crease when creating an editor for a past message.
124#[derive(Clone, Debug)]
125pub struct MessageCrease {
126 pub range: Range<usize>,
127 pub icon_path: SharedString,
128 pub label: SharedString,
129 /// None for a deserialized message, Some otherwise.
130 pub context: Option<AgentContextHandle>,
131}
132
133/// A message in a [`Thread`].
134#[derive(Debug, Clone)]
135pub struct Message {
136 pub id: MessageId,
137 pub role: Role,
138 pub segments: Vec<MessageSegment>,
139 pub loaded_context: LoadedContext,
140 pub creases: Vec<MessageCrease>,
141 pub is_hidden: bool,
142 pub ui_only: bool,
143}
144
145impl Message {
146 /// Returns whether the message contains any meaningful text that should be displayed
147 /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
148 pub fn should_display_content(&self) -> bool {
149 self.segments.iter().all(|segment| segment.should_display())
150 }
151
152 pub fn push_thinking(&mut self, text: &str, signature: Option<String>) {
153 if let Some(MessageSegment::Thinking {
154 text: segment,
155 signature: current_signature,
156 }) = self.segments.last_mut()
157 {
158 if let Some(signature) = signature {
159 *current_signature = Some(signature);
160 }
161 segment.push_str(text);
162 } else {
163 self.segments.push(MessageSegment::Thinking {
164 text: text.to_string(),
165 signature,
166 });
167 }
168 }
169
170 pub fn push_redacted_thinking(&mut self, data: String) {
171 self.segments.push(MessageSegment::RedactedThinking(data));
172 }
173
174 pub fn push_text(&mut self, text: &str) {
175 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
176 segment.push_str(text);
177 } else {
178 self.segments.push(MessageSegment::Text(text.to_string()));
179 }
180 }
181
182 pub fn to_string(&self) -> String {
183 let mut result = String::new();
184
185 if !self.loaded_context.text.is_empty() {
186 result.push_str(&self.loaded_context.text);
187 }
188
189 for segment in &self.segments {
190 match segment {
191 MessageSegment::Text(text) => result.push_str(text),
192 MessageSegment::Thinking { text, .. } => {
193 result.push_str("<think>\n");
194 result.push_str(text);
195 result.push_str("\n</think>");
196 }
197 MessageSegment::RedactedThinking(_) => {}
198 }
199 }
200
201 result
202 }
203}
204
205#[derive(Debug, Clone, PartialEq, Eq)]
206pub enum MessageSegment {
207 Text(String),
208 Thinking {
209 text: String,
210 signature: Option<String>,
211 },
212 RedactedThinking(String),
213}
214
215impl MessageSegment {
216 pub fn should_display(&self) -> bool {
217 match self {
218 Self::Text(text) => text.is_empty(),
219 Self::Thinking { text, .. } => text.is_empty(),
220 Self::RedactedThinking(_) => false,
221 }
222 }
223
224 pub fn text(&self) -> Option<&str> {
225 match self {
226 MessageSegment::Text(text) => Some(text),
227 _ => None,
228 }
229 }
230}
231
232#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
233pub struct ProjectSnapshot {
234 pub worktree_snapshots: Vec<WorktreeSnapshot>,
235 pub unsaved_buffer_paths: Vec<String>,
236 pub timestamp: DateTime<Utc>,
237}
238
239#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
240pub struct WorktreeSnapshot {
241 pub worktree_path: String,
242 pub git_state: Option<GitState>,
243}
244
245#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
246pub struct GitState {
247 pub remote_url: Option<String>,
248 pub head_sha: Option<String>,
249 pub current_branch: Option<String>,
250 pub diff: Option<String>,
251}
252
253#[derive(Clone, Debug)]
254pub struct ThreadCheckpoint {
255 message_id: MessageId,
256 git_checkpoint: GitStoreCheckpoint,
257}
258
259#[derive(Copy, Clone, Debug, PartialEq, Eq)]
260pub enum ThreadFeedback {
261 Positive,
262 Negative,
263}
264
265pub enum LastRestoreCheckpoint {
266 Pending {
267 message_id: MessageId,
268 },
269 Error {
270 message_id: MessageId,
271 error: String,
272 },
273}
274
275impl LastRestoreCheckpoint {
276 pub fn message_id(&self) -> MessageId {
277 match self {
278 LastRestoreCheckpoint::Pending { message_id } => *message_id,
279 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
280 }
281 }
282}
283
284#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
285pub enum DetailedSummaryState {
286 #[default]
287 NotGenerated,
288 Generating {
289 message_id: MessageId,
290 },
291 Generated {
292 text: SharedString,
293 message_id: MessageId,
294 },
295}
296
297impl DetailedSummaryState {
298 fn text(&self) -> Option<SharedString> {
299 if let Self::Generated { text, .. } = self {
300 Some(text.clone())
301 } else {
302 None
303 }
304 }
305}
306
307#[derive(Default, Debug)]
308pub struct TotalTokenUsage {
309 pub total: u64,
310 pub max: u64,
311}
312
313impl TotalTokenUsage {
314 pub fn ratio(&self) -> TokenUsageRatio {
315 #[cfg(debug_assertions)]
316 let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
317 .unwrap_or("0.8".to_string())
318 .parse()
319 .unwrap();
320 #[cfg(not(debug_assertions))]
321 let warning_threshold: f32 = 0.8;
322
323 // When the maximum is unknown because there is no selected model,
324 // avoid showing the token limit warning.
325 if self.max == 0 {
326 TokenUsageRatio::Normal
327 } else if self.total >= self.max {
328 TokenUsageRatio::Exceeded
329 } else if self.total as f32 / self.max as f32 >= warning_threshold {
330 TokenUsageRatio::Warning
331 } else {
332 TokenUsageRatio::Normal
333 }
334 }
335
336 pub fn add(&self, tokens: u64) -> TotalTokenUsage {
337 TotalTokenUsage {
338 total: self.total + tokens,
339 max: self.max,
340 }
341 }
342}
343
344#[derive(Debug, Default, PartialEq, Eq)]
345pub enum TokenUsageRatio {
346 #[default]
347 Normal,
348 Warning,
349 Exceeded,
350}
351
352#[derive(Debug, Clone, Copy)]
353pub enum QueueState {
354 Sending,
355 Queued { position: usize },
356 Started,
357}
358
359/// A thread of conversation with the LLM.
360pub struct Thread {
361 id: ThreadId,
362 updated_at: DateTime<Utc>,
363 summary: ThreadSummary,
364 pending_summary: Task<Option<()>>,
365 detailed_summary_task: Task<Option<()>>,
366 detailed_summary_tx: postage::watch::Sender<DetailedSummaryState>,
367 detailed_summary_rx: postage::watch::Receiver<DetailedSummaryState>,
368 completion_mode: agent_settings::CompletionMode,
369 messages: Vec<Message>,
370 next_message_id: MessageId,
371 last_prompt_id: PromptId,
372 project_context: SharedProjectContext,
373 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
374 completion_count: usize,
375 pending_completions: Vec<PendingCompletion>,
376 project: Entity<Project>,
377 cloud_user_store: Entity<CloudUserStore>,
378 prompt_builder: Arc<PromptBuilder>,
379 tools: Entity<ToolWorkingSet>,
380 tool_use: ToolUseState,
381 action_log: Entity<ActionLog>,
382 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
383 pending_checkpoint: Option<ThreadCheckpoint>,
384 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
385 request_token_usage: Vec<TokenUsage>,
386 cumulative_token_usage: TokenUsage,
387 exceeded_window_error: Option<ExceededWindowError>,
388 tool_use_limit_reached: bool,
389 feedback: Option<ThreadFeedback>,
390 retry_state: Option<RetryState>,
391 message_feedback: HashMap<MessageId, ThreadFeedback>,
392 last_auto_capture_at: Option<Instant>,
393 last_received_chunk_at: Option<Instant>,
394 request_callback: Option<
395 Box<dyn FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>])>,
396 >,
397 remaining_turns: u32,
398 configured_model: Option<ConfiguredModel>,
399 profile: AgentProfile,
400 last_error_context: Option<(Arc<dyn LanguageModel>, CompletionIntent)>,
401}
402
403#[derive(Clone, Debug)]
404struct RetryState {
405 attempt: u8,
406 max_attempts: u8,
407 intent: CompletionIntent,
408}
409
410#[derive(Clone, Debug, PartialEq, Eq)]
411pub enum ThreadSummary {
412 Pending,
413 Generating,
414 Ready(SharedString),
415 Error,
416}
417
418impl ThreadSummary {
419 pub const DEFAULT: SharedString = SharedString::new_static("New Thread");
420
421 pub fn or_default(&self) -> SharedString {
422 self.unwrap_or(Self::DEFAULT)
423 }
424
425 pub fn unwrap_or(&self, message: impl Into<SharedString>) -> SharedString {
426 self.ready().unwrap_or_else(|| message.into())
427 }
428
429 pub fn ready(&self) -> Option<SharedString> {
430 match self {
431 ThreadSummary::Ready(summary) => Some(summary.clone()),
432 ThreadSummary::Pending | ThreadSummary::Generating | ThreadSummary::Error => None,
433 }
434 }
435}
436
437#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
438pub struct ExceededWindowError {
439 /// Model used when last message exceeded context window
440 model_id: LanguageModelId,
441 /// Token count including last message
442 token_count: u64,
443}
444
445impl Thread {
446 pub fn new(
447 project: Entity<Project>,
448 cloud_user_store: Entity<CloudUserStore>,
449 tools: Entity<ToolWorkingSet>,
450 prompt_builder: Arc<PromptBuilder>,
451 system_prompt: SharedProjectContext,
452 cx: &mut Context<Self>,
453 ) -> Self {
454 let (detailed_summary_tx, detailed_summary_rx) = postage::watch::channel();
455 let configured_model = LanguageModelRegistry::read_global(cx).default_model();
456 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
457
458 Self {
459 id: ThreadId::new(),
460 updated_at: Utc::now(),
461 summary: ThreadSummary::Pending,
462 pending_summary: Task::ready(None),
463 detailed_summary_task: Task::ready(None),
464 detailed_summary_tx,
465 detailed_summary_rx,
466 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
467 messages: Vec::new(),
468 next_message_id: MessageId(0),
469 last_prompt_id: PromptId::new(),
470 project_context: system_prompt,
471 checkpoints_by_message: HashMap::default(),
472 completion_count: 0,
473 pending_completions: Vec::new(),
474 project: project.clone(),
475 cloud_user_store,
476 prompt_builder,
477 tools: tools.clone(),
478 last_restore_checkpoint: None,
479 pending_checkpoint: None,
480 tool_use: ToolUseState::new(tools.clone()),
481 action_log: cx.new(|_| ActionLog::new(project.clone())),
482 initial_project_snapshot: {
483 let project_snapshot = Self::project_snapshot(project, cx);
484 cx.foreground_executor()
485 .spawn(async move { Some(project_snapshot.await) })
486 .shared()
487 },
488 request_token_usage: Vec::new(),
489 cumulative_token_usage: TokenUsage::default(),
490 exceeded_window_error: None,
491 tool_use_limit_reached: false,
492 feedback: None,
493 retry_state: None,
494 message_feedback: HashMap::default(),
495 last_auto_capture_at: None,
496 last_error_context: None,
497 last_received_chunk_at: None,
498 request_callback: None,
499 remaining_turns: u32::MAX,
500 configured_model: configured_model.clone(),
501 profile: AgentProfile::new(profile_id, tools),
502 }
503 }
504
505 pub fn deserialize(
506 id: ThreadId,
507 serialized: SerializedThread,
508 project: Entity<Project>,
509 cloud_user_store: Entity<CloudUserStore>,
510 tools: Entity<ToolWorkingSet>,
511 prompt_builder: Arc<PromptBuilder>,
512 project_context: SharedProjectContext,
513 window: Option<&mut Window>, // None in headless mode
514 cx: &mut Context<Self>,
515 ) -> Self {
516 let next_message_id = MessageId(
517 serialized
518 .messages
519 .last()
520 .map(|message| message.id.0 + 1)
521 .unwrap_or(0),
522 );
523 let tool_use = ToolUseState::from_serialized_messages(
524 tools.clone(),
525 &serialized.messages,
526 project.clone(),
527 window,
528 cx,
529 );
530 let (detailed_summary_tx, detailed_summary_rx) =
531 postage::watch::channel_with(serialized.detailed_summary_state);
532
533 let configured_model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
534 serialized
535 .model
536 .and_then(|model| {
537 let model = SelectedModel {
538 provider: model.provider.clone().into(),
539 model: model.model.clone().into(),
540 };
541 registry.select_model(&model, cx)
542 })
543 .or_else(|| registry.default_model())
544 });
545
546 let completion_mode = serialized
547 .completion_mode
548 .unwrap_or_else(|| AgentSettings::get_global(cx).preferred_completion_mode);
549 let profile_id = serialized
550 .profile
551 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
552
553 Self {
554 id,
555 updated_at: serialized.updated_at,
556 summary: ThreadSummary::Ready(serialized.summary),
557 pending_summary: Task::ready(None),
558 detailed_summary_task: Task::ready(None),
559 detailed_summary_tx,
560 detailed_summary_rx,
561 completion_mode,
562 retry_state: None,
563 messages: serialized
564 .messages
565 .into_iter()
566 .map(|message| Message {
567 id: message.id,
568 role: message.role,
569 segments: message
570 .segments
571 .into_iter()
572 .map(|segment| match segment {
573 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
574 SerializedMessageSegment::Thinking { text, signature } => {
575 MessageSegment::Thinking { text, signature }
576 }
577 SerializedMessageSegment::RedactedThinking { data } => {
578 MessageSegment::RedactedThinking(data)
579 }
580 })
581 .collect(),
582 loaded_context: LoadedContext {
583 contexts: Vec::new(),
584 text: message.context,
585 images: Vec::new(),
586 },
587 creases: message
588 .creases
589 .into_iter()
590 .map(|crease| MessageCrease {
591 range: crease.start..crease.end,
592 icon_path: crease.icon_path,
593 label: crease.label,
594 context: None,
595 })
596 .collect(),
597 is_hidden: message.is_hidden,
598 ui_only: false, // UI-only messages are not persisted
599 })
600 .collect(),
601 next_message_id,
602 last_prompt_id: PromptId::new(),
603 project_context,
604 checkpoints_by_message: HashMap::default(),
605 completion_count: 0,
606 pending_completions: Vec::new(),
607 last_restore_checkpoint: None,
608 pending_checkpoint: None,
609 project: project.clone(),
610 cloud_user_store,
611 prompt_builder,
612 tools: tools.clone(),
613 tool_use,
614 action_log: cx.new(|_| ActionLog::new(project)),
615 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
616 request_token_usage: serialized.request_token_usage,
617 cumulative_token_usage: serialized.cumulative_token_usage,
618 exceeded_window_error: None,
619 tool_use_limit_reached: serialized.tool_use_limit_reached,
620 feedback: None,
621 message_feedback: HashMap::default(),
622 last_auto_capture_at: None,
623 last_error_context: None,
624 last_received_chunk_at: None,
625 request_callback: None,
626 remaining_turns: u32::MAX,
627 configured_model,
628 profile: AgentProfile::new(profile_id, tools),
629 }
630 }
631
632 pub fn set_request_callback(
633 &mut self,
634 callback: impl 'static
635 + FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>]),
636 ) {
637 self.request_callback = Some(Box::new(callback));
638 }
639
640 pub fn id(&self) -> &ThreadId {
641 &self.id
642 }
643
644 pub fn profile(&self) -> &AgentProfile {
645 &self.profile
646 }
647
648 pub fn set_profile(&mut self, id: AgentProfileId, cx: &mut Context<Self>) {
649 if &id != self.profile.id() {
650 self.profile = AgentProfile::new(id, self.tools.clone());
651 cx.emit(ThreadEvent::ProfileChanged);
652 }
653 }
654
655 pub fn is_empty(&self) -> bool {
656 self.messages.is_empty()
657 }
658
659 pub fn updated_at(&self) -> DateTime<Utc> {
660 self.updated_at
661 }
662
663 pub fn touch_updated_at(&mut self) {
664 self.updated_at = Utc::now();
665 }
666
667 pub fn advance_prompt_id(&mut self) {
668 self.last_prompt_id = PromptId::new();
669 }
670
671 pub fn project_context(&self) -> SharedProjectContext {
672 self.project_context.clone()
673 }
674
675 pub fn get_or_init_configured_model(&mut self, cx: &App) -> Option<ConfiguredModel> {
676 if self.configured_model.is_none() {
677 self.configured_model = LanguageModelRegistry::read_global(cx).default_model();
678 }
679 self.configured_model.clone()
680 }
681
682 pub fn configured_model(&self) -> Option<ConfiguredModel> {
683 self.configured_model.clone()
684 }
685
686 pub fn set_configured_model(&mut self, model: Option<ConfiguredModel>, cx: &mut Context<Self>) {
687 self.configured_model = model;
688 cx.notify();
689 }
690
691 pub fn summary(&self) -> &ThreadSummary {
692 &self.summary
693 }
694
695 pub fn set_summary(&mut self, new_summary: impl Into<SharedString>, cx: &mut Context<Self>) {
696 let current_summary = match &self.summary {
697 ThreadSummary::Pending | ThreadSummary::Generating => return,
698 ThreadSummary::Ready(summary) => summary,
699 ThreadSummary::Error => &ThreadSummary::DEFAULT,
700 };
701
702 let mut new_summary = new_summary.into();
703
704 if new_summary.is_empty() {
705 new_summary = ThreadSummary::DEFAULT;
706 }
707
708 if current_summary != &new_summary {
709 self.summary = ThreadSummary::Ready(new_summary);
710 cx.emit(ThreadEvent::SummaryChanged);
711 }
712 }
713
714 pub fn completion_mode(&self) -> CompletionMode {
715 self.completion_mode
716 }
717
718 pub fn set_completion_mode(&mut self, mode: CompletionMode) {
719 self.completion_mode = mode;
720 }
721
722 pub fn message(&self, id: MessageId) -> Option<&Message> {
723 let index = self
724 .messages
725 .binary_search_by(|message| message.id.cmp(&id))
726 .ok()?;
727
728 self.messages.get(index)
729 }
730
731 pub fn messages(&self) -> impl ExactSizeIterator<Item = &Message> {
732 self.messages.iter()
733 }
734
735 pub fn is_generating(&self) -> bool {
736 !self.pending_completions.is_empty() || !self.all_tools_finished()
737 }
738
739 /// Indicates whether streaming of language model events is stale.
740 /// When `is_generating()` is false, this method returns `None`.
741 pub fn is_generation_stale(&self) -> Option<bool> {
742 const STALE_THRESHOLD: u128 = 250;
743
744 self.last_received_chunk_at
745 .map(|instant| instant.elapsed().as_millis() > STALE_THRESHOLD)
746 }
747
748 fn received_chunk(&mut self) {
749 self.last_received_chunk_at = Some(Instant::now());
750 }
751
752 pub fn queue_state(&self) -> Option<QueueState> {
753 self.pending_completions
754 .first()
755 .map(|pending_completion| pending_completion.queue_state)
756 }
757
758 pub fn tools(&self) -> &Entity<ToolWorkingSet> {
759 &self.tools
760 }
761
762 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
763 self.tool_use
764 .pending_tool_uses()
765 .into_iter()
766 .find(|tool_use| &tool_use.id == id)
767 }
768
769 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
770 self.tool_use
771 .pending_tool_uses()
772 .into_iter()
773 .filter(|tool_use| tool_use.status.needs_confirmation())
774 }
775
776 pub fn has_pending_tool_uses(&self) -> bool {
777 !self.tool_use.pending_tool_uses().is_empty()
778 }
779
780 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
781 self.checkpoints_by_message.get(&id).cloned()
782 }
783
784 pub fn restore_checkpoint(
785 &mut self,
786 checkpoint: ThreadCheckpoint,
787 cx: &mut Context<Self>,
788 ) -> Task<Result<()>> {
789 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
790 message_id: checkpoint.message_id,
791 });
792 cx.emit(ThreadEvent::CheckpointChanged);
793 cx.notify();
794
795 let git_store = self.project().read(cx).git_store().clone();
796 let restore = git_store.update(cx, |git_store, cx| {
797 git_store.restore_checkpoint(checkpoint.git_checkpoint.clone(), cx)
798 });
799
800 cx.spawn(async move |this, cx| {
801 let result = restore.await;
802 this.update(cx, |this, cx| {
803 if let Err(err) = result.as_ref() {
804 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
805 message_id: checkpoint.message_id,
806 error: err.to_string(),
807 });
808 } else {
809 this.truncate(checkpoint.message_id, cx);
810 this.last_restore_checkpoint = None;
811 }
812 this.pending_checkpoint = None;
813 cx.emit(ThreadEvent::CheckpointChanged);
814 cx.notify();
815 })?;
816 result
817 })
818 }
819
820 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
821 let pending_checkpoint = if self.is_generating() {
822 return;
823 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
824 checkpoint
825 } else {
826 return;
827 };
828
829 self.finalize_checkpoint(pending_checkpoint, cx);
830 }
831
832 fn finalize_checkpoint(
833 &mut self,
834 pending_checkpoint: ThreadCheckpoint,
835 cx: &mut Context<Self>,
836 ) {
837 let git_store = self.project.read(cx).git_store().clone();
838 let final_checkpoint = git_store.update(cx, |git_store, cx| git_store.checkpoint(cx));
839 cx.spawn(async move |this, cx| match final_checkpoint.await {
840 Ok(final_checkpoint) => {
841 let equal = git_store
842 .update(cx, |store, cx| {
843 store.compare_checkpoints(
844 pending_checkpoint.git_checkpoint.clone(),
845 final_checkpoint.clone(),
846 cx,
847 )
848 })?
849 .await
850 .unwrap_or(false);
851
852 if !equal {
853 this.update(cx, |this, cx| {
854 this.insert_checkpoint(pending_checkpoint, cx)
855 })?;
856 }
857
858 Ok(())
859 }
860 Err(_) => this.update(cx, |this, cx| {
861 this.insert_checkpoint(pending_checkpoint, cx)
862 }),
863 })
864 .detach();
865 }
866
867 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
868 self.checkpoints_by_message
869 .insert(checkpoint.message_id, checkpoint);
870 cx.emit(ThreadEvent::CheckpointChanged);
871 cx.notify();
872 }
873
874 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
875 self.last_restore_checkpoint.as_ref()
876 }
877
878 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
879 let Some(message_ix) = self
880 .messages
881 .iter()
882 .rposition(|message| message.id == message_id)
883 else {
884 return;
885 };
886 for deleted_message in self.messages.drain(message_ix..) {
887 self.checkpoints_by_message.remove(&deleted_message.id);
888 }
889 cx.notify();
890 }
891
892 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AgentContext> {
893 self.messages
894 .iter()
895 .find(|message| message.id == id)
896 .into_iter()
897 .flat_map(|message| message.loaded_context.contexts.iter())
898 }
899
900 pub fn is_turn_end(&self, ix: usize) -> bool {
901 if self.messages.is_empty() {
902 return false;
903 }
904
905 if !self.is_generating() && ix == self.messages.len() - 1 {
906 return true;
907 }
908
909 let Some(message) = self.messages.get(ix) else {
910 return false;
911 };
912
913 if message.role != Role::Assistant {
914 return false;
915 }
916
917 self.messages
918 .get(ix + 1)
919 .and_then(|message| {
920 self.message(message.id)
921 .map(|next_message| next_message.role == Role::User && !next_message.is_hidden)
922 })
923 .unwrap_or(false)
924 }
925
926 pub fn tool_use_limit_reached(&self) -> bool {
927 self.tool_use_limit_reached
928 }
929
930 /// Returns whether all of the tool uses have finished running.
931 pub fn all_tools_finished(&self) -> bool {
932 // If the only pending tool uses left are the ones with errors, then
933 // that means that we've finished running all of the pending tools.
934 self.tool_use
935 .pending_tool_uses()
936 .iter()
937 .all(|pending_tool_use| pending_tool_use.status.is_error())
938 }
939
940 /// Returns whether any pending tool uses may perform edits
941 pub fn has_pending_edit_tool_uses(&self) -> bool {
942 self.tool_use
943 .pending_tool_uses()
944 .iter()
945 .filter(|pending_tool_use| !pending_tool_use.status.is_error())
946 .any(|pending_tool_use| pending_tool_use.may_perform_edits)
947 }
948
949 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
950 self.tool_use.tool_uses_for_message(id, &self.project, cx)
951 }
952
953 pub fn tool_results_for_message(
954 &self,
955 assistant_message_id: MessageId,
956 ) -> Vec<&LanguageModelToolResult> {
957 self.tool_use.tool_results_for_message(assistant_message_id)
958 }
959
960 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
961 self.tool_use.tool_result(id)
962 }
963
964 pub fn output_for_tool(&self, id: &LanguageModelToolUseId) -> Option<&Arc<str>> {
965 match &self.tool_use.tool_result(id)?.content {
966 LanguageModelToolResultContent::Text(text) => Some(text),
967 LanguageModelToolResultContent::Image(_) => {
968 // TODO: We should display image
969 None
970 }
971 }
972 }
973
974 pub fn card_for_tool(&self, id: &LanguageModelToolUseId) -> Option<AnyToolCard> {
975 self.tool_use.tool_result_card(id).cloned()
976 }
977
978 /// Return tools that are both enabled and supported by the model
979 pub fn available_tools(
980 &self,
981 cx: &App,
982 model: Arc<dyn LanguageModel>,
983 ) -> Vec<LanguageModelRequestTool> {
984 if model.supports_tools() {
985 self.profile
986 .enabled_tools(cx)
987 .into_iter()
988 .filter_map(|(name, tool)| {
989 // Skip tools that cannot be supported
990 let input_schema = tool.input_schema(model.tool_input_format()).ok()?;
991 Some(LanguageModelRequestTool {
992 name: name.into(),
993 description: tool.description(),
994 input_schema,
995 })
996 })
997 .collect()
998 } else {
999 Vec::default()
1000 }
1001 }
1002
1003 pub fn insert_user_message(
1004 &mut self,
1005 text: impl Into<String>,
1006 loaded_context: ContextLoadResult,
1007 git_checkpoint: Option<GitStoreCheckpoint>,
1008 creases: Vec<MessageCrease>,
1009 cx: &mut Context<Self>,
1010 ) -> MessageId {
1011 if !loaded_context.referenced_buffers.is_empty() {
1012 self.action_log.update(cx, |log, cx| {
1013 for buffer in loaded_context.referenced_buffers {
1014 log.buffer_read(buffer, cx);
1015 }
1016 });
1017 }
1018
1019 let message_id = self.insert_message(
1020 Role::User,
1021 vec![MessageSegment::Text(text.into())],
1022 loaded_context.loaded_context,
1023 creases,
1024 false,
1025 cx,
1026 );
1027
1028 if let Some(git_checkpoint) = git_checkpoint {
1029 self.pending_checkpoint = Some(ThreadCheckpoint {
1030 message_id,
1031 git_checkpoint,
1032 });
1033 }
1034
1035 self.auto_capture_telemetry(cx);
1036
1037 message_id
1038 }
1039
1040 pub fn insert_invisible_continue_message(&mut self, cx: &mut Context<Self>) -> MessageId {
1041 let id = self.insert_message(
1042 Role::User,
1043 vec![MessageSegment::Text("Continue where you left off".into())],
1044 LoadedContext::default(),
1045 vec![],
1046 true,
1047 cx,
1048 );
1049 self.pending_checkpoint = None;
1050
1051 id
1052 }
1053
1054 pub fn insert_assistant_message(
1055 &mut self,
1056 segments: Vec<MessageSegment>,
1057 cx: &mut Context<Self>,
1058 ) -> MessageId {
1059 self.insert_message(
1060 Role::Assistant,
1061 segments,
1062 LoadedContext::default(),
1063 Vec::new(),
1064 false,
1065 cx,
1066 )
1067 }
1068
1069 pub fn insert_message(
1070 &mut self,
1071 role: Role,
1072 segments: Vec<MessageSegment>,
1073 loaded_context: LoadedContext,
1074 creases: Vec<MessageCrease>,
1075 is_hidden: bool,
1076 cx: &mut Context<Self>,
1077 ) -> MessageId {
1078 let id = self.next_message_id.post_inc();
1079 self.messages.push(Message {
1080 id,
1081 role,
1082 segments,
1083 loaded_context,
1084 creases,
1085 is_hidden,
1086 ui_only: false,
1087 });
1088 self.touch_updated_at();
1089 cx.emit(ThreadEvent::MessageAdded(id));
1090 id
1091 }
1092
1093 pub fn edit_message(
1094 &mut self,
1095 id: MessageId,
1096 new_role: Role,
1097 new_segments: Vec<MessageSegment>,
1098 creases: Vec<MessageCrease>,
1099 loaded_context: Option<LoadedContext>,
1100 checkpoint: Option<GitStoreCheckpoint>,
1101 cx: &mut Context<Self>,
1102 ) -> bool {
1103 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
1104 return false;
1105 };
1106 message.role = new_role;
1107 message.segments = new_segments;
1108 message.creases = creases;
1109 if let Some(context) = loaded_context {
1110 message.loaded_context = context;
1111 }
1112 if let Some(git_checkpoint) = checkpoint {
1113 self.checkpoints_by_message.insert(
1114 id,
1115 ThreadCheckpoint {
1116 message_id: id,
1117 git_checkpoint,
1118 },
1119 );
1120 }
1121 self.touch_updated_at();
1122 cx.emit(ThreadEvent::MessageEdited(id));
1123 true
1124 }
1125
1126 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
1127 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
1128 return false;
1129 };
1130 self.messages.remove(index);
1131 self.touch_updated_at();
1132 cx.emit(ThreadEvent::MessageDeleted(id));
1133 true
1134 }
1135
1136 /// Returns the representation of this [`Thread`] in a textual form.
1137 ///
1138 /// This is the representation we use when attaching a thread as context to another thread.
1139 pub fn text(&self) -> String {
1140 let mut text = String::new();
1141
1142 for message in &self.messages {
1143 text.push_str(match message.role {
1144 language_model::Role::User => "User:",
1145 language_model::Role::Assistant => "Agent:",
1146 language_model::Role::System => "System:",
1147 });
1148 text.push('\n');
1149
1150 for segment in &message.segments {
1151 match segment {
1152 MessageSegment::Text(content) => text.push_str(content),
1153 MessageSegment::Thinking { text: content, .. } => {
1154 text.push_str(&format!("<think>{}</think>", content))
1155 }
1156 MessageSegment::RedactedThinking(_) => {}
1157 }
1158 }
1159 text.push('\n');
1160 }
1161
1162 text
1163 }
1164
1165 /// Serializes this thread into a format for storage or telemetry.
1166 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
1167 let initial_project_snapshot = self.initial_project_snapshot.clone();
1168 cx.spawn(async move |this, cx| {
1169 let initial_project_snapshot = initial_project_snapshot.await;
1170 this.read_with(cx, |this, cx| SerializedThread {
1171 version: SerializedThread::VERSION.to_string(),
1172 summary: this.summary().or_default(),
1173 updated_at: this.updated_at(),
1174 messages: this
1175 .messages()
1176 .filter(|message| !message.ui_only)
1177 .map(|message| SerializedMessage {
1178 id: message.id,
1179 role: message.role,
1180 segments: message
1181 .segments
1182 .iter()
1183 .map(|segment| match segment {
1184 MessageSegment::Text(text) => {
1185 SerializedMessageSegment::Text { text: text.clone() }
1186 }
1187 MessageSegment::Thinking { text, signature } => {
1188 SerializedMessageSegment::Thinking {
1189 text: text.clone(),
1190 signature: signature.clone(),
1191 }
1192 }
1193 MessageSegment::RedactedThinking(data) => {
1194 SerializedMessageSegment::RedactedThinking {
1195 data: data.clone(),
1196 }
1197 }
1198 })
1199 .collect(),
1200 tool_uses: this
1201 .tool_uses_for_message(message.id, cx)
1202 .into_iter()
1203 .map(|tool_use| SerializedToolUse {
1204 id: tool_use.id,
1205 name: tool_use.name,
1206 input: tool_use.input,
1207 })
1208 .collect(),
1209 tool_results: this
1210 .tool_results_for_message(message.id)
1211 .into_iter()
1212 .map(|tool_result| SerializedToolResult {
1213 tool_use_id: tool_result.tool_use_id.clone(),
1214 is_error: tool_result.is_error,
1215 content: tool_result.content.clone(),
1216 output: tool_result.output.clone(),
1217 })
1218 .collect(),
1219 context: message.loaded_context.text.clone(),
1220 creases: message
1221 .creases
1222 .iter()
1223 .map(|crease| SerializedCrease {
1224 start: crease.range.start,
1225 end: crease.range.end,
1226 icon_path: crease.icon_path.clone(),
1227 label: crease.label.clone(),
1228 })
1229 .collect(),
1230 is_hidden: message.is_hidden,
1231 })
1232 .collect(),
1233 initial_project_snapshot,
1234 cumulative_token_usage: this.cumulative_token_usage,
1235 request_token_usage: this.request_token_usage.clone(),
1236 detailed_summary_state: this.detailed_summary_rx.borrow().clone(),
1237 exceeded_window_error: this.exceeded_window_error.clone(),
1238 model: this
1239 .configured_model
1240 .as_ref()
1241 .map(|model| SerializedLanguageModel {
1242 provider: model.provider.id().0.to_string(),
1243 model: model.model.id().0.to_string(),
1244 }),
1245 completion_mode: Some(this.completion_mode),
1246 tool_use_limit_reached: this.tool_use_limit_reached,
1247 profile: Some(this.profile.id().clone()),
1248 })
1249 })
1250 }
1251
1252 pub fn remaining_turns(&self) -> u32 {
1253 self.remaining_turns
1254 }
1255
1256 pub fn set_remaining_turns(&mut self, remaining_turns: u32) {
1257 self.remaining_turns = remaining_turns;
1258 }
1259
1260 pub fn send_to_model(
1261 &mut self,
1262 model: Arc<dyn LanguageModel>,
1263 intent: CompletionIntent,
1264 window: Option<AnyWindowHandle>,
1265 cx: &mut Context<Self>,
1266 ) {
1267 if self.remaining_turns == 0 {
1268 return;
1269 }
1270
1271 self.remaining_turns -= 1;
1272
1273 self.flush_notifications(model.clone(), intent, cx);
1274
1275 let _checkpoint = self.finalize_pending_checkpoint(cx);
1276 self.stream_completion(
1277 self.to_completion_request(model.clone(), intent, cx),
1278 model,
1279 intent,
1280 window,
1281 cx,
1282 );
1283 }
1284
1285 pub fn retry_last_completion(
1286 &mut self,
1287 window: Option<AnyWindowHandle>,
1288 cx: &mut Context<Self>,
1289 ) {
1290 // Clear any existing error state
1291 self.retry_state = None;
1292
1293 // Use the last error context if available, otherwise fall back to configured model
1294 let (model, intent) = if let Some((model, intent)) = self.last_error_context.take() {
1295 (model, intent)
1296 } else if let Some(configured_model) = self.configured_model.as_ref() {
1297 let model = configured_model.model.clone();
1298 let intent = if self.has_pending_tool_uses() {
1299 CompletionIntent::ToolResults
1300 } else {
1301 CompletionIntent::UserPrompt
1302 };
1303 (model, intent)
1304 } else if let Some(configured_model) = self.get_or_init_configured_model(cx) {
1305 let model = configured_model.model.clone();
1306 let intent = if self.has_pending_tool_uses() {
1307 CompletionIntent::ToolResults
1308 } else {
1309 CompletionIntent::UserPrompt
1310 };
1311 (model, intent)
1312 } else {
1313 return;
1314 };
1315
1316 self.send_to_model(model, intent, window, cx);
1317 }
1318
1319 pub fn enable_burn_mode_and_retry(
1320 &mut self,
1321 window: Option<AnyWindowHandle>,
1322 cx: &mut Context<Self>,
1323 ) {
1324 self.completion_mode = CompletionMode::Burn;
1325 cx.emit(ThreadEvent::ProfileChanged);
1326 self.retry_last_completion(window, cx);
1327 }
1328
1329 pub fn used_tools_since_last_user_message(&self) -> bool {
1330 for message in self.messages.iter().rev() {
1331 if self.tool_use.message_has_tool_results(message.id) {
1332 return true;
1333 } else if message.role == Role::User {
1334 return false;
1335 }
1336 }
1337
1338 false
1339 }
1340
1341 pub fn to_completion_request(
1342 &self,
1343 model: Arc<dyn LanguageModel>,
1344 intent: CompletionIntent,
1345 cx: &mut Context<Self>,
1346 ) -> LanguageModelRequest {
1347 let mut request = LanguageModelRequest {
1348 thread_id: Some(self.id.to_string()),
1349 prompt_id: Some(self.last_prompt_id.to_string()),
1350 intent: Some(intent),
1351 mode: None,
1352 messages: vec![],
1353 tools: Vec::new(),
1354 tool_choice: None,
1355 stop: Vec::new(),
1356 temperature: AgentSettings::temperature_for_model(&model, cx),
1357 thinking_allowed: true,
1358 };
1359
1360 let available_tools = self.available_tools(cx, model.clone());
1361 let available_tool_names = available_tools
1362 .iter()
1363 .map(|tool| tool.name.clone())
1364 .collect();
1365
1366 let model_context = &ModelContext {
1367 available_tools: available_tool_names,
1368 };
1369
1370 if let Some(project_context) = self.project_context.borrow().as_ref() {
1371 match self
1372 .prompt_builder
1373 .generate_assistant_system_prompt(project_context, model_context)
1374 {
1375 Err(err) => {
1376 let message = format!("{err:?}").into();
1377 log::error!("{message}");
1378 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1379 header: "Error generating system prompt".into(),
1380 message,
1381 }));
1382 }
1383 Ok(system_prompt) => {
1384 request.messages.push(LanguageModelRequestMessage {
1385 role: Role::System,
1386 content: vec![MessageContent::Text(system_prompt)],
1387 cache: true,
1388 });
1389 }
1390 }
1391 } else {
1392 let message = "Context for system prompt unexpectedly not ready.".into();
1393 log::error!("{message}");
1394 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1395 header: "Error generating system prompt".into(),
1396 message,
1397 }));
1398 }
1399
1400 let mut message_ix_to_cache = None;
1401 for message in &self.messages {
1402 // ui_only messages are for the UI only, not for the model
1403 if message.ui_only {
1404 continue;
1405 }
1406
1407 let mut request_message = LanguageModelRequestMessage {
1408 role: message.role,
1409 content: Vec::new(),
1410 cache: false,
1411 };
1412
1413 message
1414 .loaded_context
1415 .add_to_request_message(&mut request_message);
1416
1417 for segment in &message.segments {
1418 match segment {
1419 MessageSegment::Text(text) => {
1420 let text = text.trim_end();
1421 if !text.is_empty() {
1422 request_message
1423 .content
1424 .push(MessageContent::Text(text.into()));
1425 }
1426 }
1427 MessageSegment::Thinking { text, signature } => {
1428 if !text.is_empty() {
1429 request_message.content.push(MessageContent::Thinking {
1430 text: text.into(),
1431 signature: signature.clone(),
1432 });
1433 }
1434 }
1435 MessageSegment::RedactedThinking(data) => {
1436 request_message
1437 .content
1438 .push(MessageContent::RedactedThinking(data.clone()));
1439 }
1440 };
1441 }
1442
1443 let mut cache_message = true;
1444 let mut tool_results_message = LanguageModelRequestMessage {
1445 role: Role::User,
1446 content: Vec::new(),
1447 cache: false,
1448 };
1449 for (tool_use, tool_result) in self.tool_use.tool_results(message.id) {
1450 if let Some(tool_result) = tool_result {
1451 request_message
1452 .content
1453 .push(MessageContent::ToolUse(tool_use.clone()));
1454 tool_results_message
1455 .content
1456 .push(MessageContent::ToolResult(LanguageModelToolResult {
1457 tool_use_id: tool_use.id.clone(),
1458 tool_name: tool_result.tool_name.clone(),
1459 is_error: tool_result.is_error,
1460 content: if tool_result.content.is_empty() {
1461 // Surprisingly, the API fails if we return an empty string here.
1462 // It thinks we are sending a tool use without a tool result.
1463 "<Tool returned an empty string>".into()
1464 } else {
1465 tool_result.content.clone()
1466 },
1467 output: None,
1468 }));
1469 } else {
1470 cache_message = false;
1471 log::debug!(
1472 "skipped tool use {:?} because it is still pending",
1473 tool_use
1474 );
1475 }
1476 }
1477
1478 if cache_message {
1479 message_ix_to_cache = Some(request.messages.len());
1480 }
1481 request.messages.push(request_message);
1482
1483 if !tool_results_message.content.is_empty() {
1484 if cache_message {
1485 message_ix_to_cache = Some(request.messages.len());
1486 }
1487 request.messages.push(tool_results_message);
1488 }
1489 }
1490
1491 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
1492 if let Some(message_ix_to_cache) = message_ix_to_cache {
1493 request.messages[message_ix_to_cache].cache = true;
1494 }
1495
1496 request.tools = available_tools;
1497 request.mode = if model.supports_burn_mode() {
1498 Some(self.completion_mode.into())
1499 } else {
1500 Some(CompletionMode::Normal.into())
1501 };
1502
1503 request
1504 }
1505
1506 fn to_summarize_request(
1507 &self,
1508 model: &Arc<dyn LanguageModel>,
1509 intent: CompletionIntent,
1510 added_user_message: String,
1511 cx: &App,
1512 ) -> LanguageModelRequest {
1513 let mut request = LanguageModelRequest {
1514 thread_id: None,
1515 prompt_id: None,
1516 intent: Some(intent),
1517 mode: None,
1518 messages: vec![],
1519 tools: Vec::new(),
1520 tool_choice: None,
1521 stop: Vec::new(),
1522 temperature: AgentSettings::temperature_for_model(model, cx),
1523 thinking_allowed: false,
1524 };
1525
1526 for message in &self.messages {
1527 let mut request_message = LanguageModelRequestMessage {
1528 role: message.role,
1529 content: Vec::new(),
1530 cache: false,
1531 };
1532
1533 for segment in &message.segments {
1534 match segment {
1535 MessageSegment::Text(text) => request_message
1536 .content
1537 .push(MessageContent::Text(text.clone())),
1538 MessageSegment::Thinking { .. } => {}
1539 MessageSegment::RedactedThinking(_) => {}
1540 }
1541 }
1542
1543 if request_message.content.is_empty() {
1544 continue;
1545 }
1546
1547 request.messages.push(request_message);
1548 }
1549
1550 request.messages.push(LanguageModelRequestMessage {
1551 role: Role::User,
1552 content: vec![MessageContent::Text(added_user_message)],
1553 cache: false,
1554 });
1555
1556 request
1557 }
1558
1559 /// Insert auto-generated notifications (if any) to the thread
1560 fn flush_notifications(
1561 &mut self,
1562 model: Arc<dyn LanguageModel>,
1563 intent: CompletionIntent,
1564 cx: &mut Context<Self>,
1565 ) {
1566 match intent {
1567 CompletionIntent::UserPrompt | CompletionIntent::ToolResults => {
1568 if let Some(pending_tool_use) = self.attach_tracked_files_state(model, cx) {
1569 cx.emit(ThreadEvent::ToolFinished {
1570 tool_use_id: pending_tool_use.id.clone(),
1571 pending_tool_use: Some(pending_tool_use),
1572 });
1573 }
1574 }
1575 CompletionIntent::ThreadSummarization
1576 | CompletionIntent::ThreadContextSummarization
1577 | CompletionIntent::CreateFile
1578 | CompletionIntent::EditFile
1579 | CompletionIntent::InlineAssist
1580 | CompletionIntent::TerminalInlineAssist
1581 | CompletionIntent::GenerateGitCommitMessage => {}
1582 };
1583 }
1584
1585 fn attach_tracked_files_state(
1586 &mut self,
1587 model: Arc<dyn LanguageModel>,
1588 cx: &mut App,
1589 ) -> Option<PendingToolUse> {
1590 // Represent notification as a simulated `project_notifications` tool call
1591 let tool_name = Arc::from("project_notifications");
1592 let tool = self.tools.read(cx).tool(&tool_name, cx)?;
1593
1594 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
1595 return None;
1596 }
1597
1598 if self
1599 .action_log
1600 .update(cx, |log, cx| log.unnotified_user_edits(cx).is_none())
1601 {
1602 return None;
1603 }
1604
1605 let input = serde_json::json!({});
1606 let request = Arc::new(LanguageModelRequest::default()); // unused
1607 let window = None;
1608 let tool_result = tool.run(
1609 input,
1610 request,
1611 self.project.clone(),
1612 self.action_log.clone(),
1613 model.clone(),
1614 window,
1615 cx,
1616 );
1617
1618 let tool_use_id =
1619 LanguageModelToolUseId::from(format!("project_notifications_{}", self.messages.len()));
1620
1621 let tool_use = LanguageModelToolUse {
1622 id: tool_use_id.clone(),
1623 name: tool_name.clone(),
1624 raw_input: "{}".to_string(),
1625 input: serde_json::json!({}),
1626 is_input_complete: true,
1627 };
1628
1629 let tool_output = cx.background_executor().block(tool_result.output);
1630
1631 // Attach a project_notification tool call to the latest existing
1632 // Assistant message. We cannot create a new Assistant message
1633 // because thinking models require a `thinking` block that we
1634 // cannot mock. We cannot send a notification as a normal
1635 // (non-tool-use) User message because this distracts Agent
1636 // too much.
1637 let tool_message_id = self
1638 .messages
1639 .iter()
1640 .enumerate()
1641 .rfind(|(_, message)| message.role == Role::Assistant)
1642 .map(|(_, message)| message.id)?;
1643
1644 let tool_use_metadata = ToolUseMetadata {
1645 model: model.clone(),
1646 thread_id: self.id.clone(),
1647 prompt_id: self.last_prompt_id.clone(),
1648 };
1649
1650 self.tool_use
1651 .request_tool_use(tool_message_id, tool_use, tool_use_metadata.clone(), cx);
1652
1653 let pending_tool_use = self.tool_use.insert_tool_output(
1654 tool_use_id.clone(),
1655 tool_name,
1656 tool_output,
1657 self.configured_model.as_ref(),
1658 self.completion_mode,
1659 );
1660
1661 pending_tool_use
1662 }
1663
1664 pub fn stream_completion(
1665 &mut self,
1666 request: LanguageModelRequest,
1667 model: Arc<dyn LanguageModel>,
1668 intent: CompletionIntent,
1669 window: Option<AnyWindowHandle>,
1670 cx: &mut Context<Self>,
1671 ) {
1672 self.tool_use_limit_reached = false;
1673
1674 let pending_completion_id = post_inc(&mut self.completion_count);
1675 let mut request_callback_parameters = if self.request_callback.is_some() {
1676 Some((request.clone(), Vec::new()))
1677 } else {
1678 None
1679 };
1680 let prompt_id = self.last_prompt_id.clone();
1681 let tool_use_metadata = ToolUseMetadata {
1682 model: model.clone(),
1683 thread_id: self.id.clone(),
1684 prompt_id: prompt_id.clone(),
1685 };
1686
1687 let completion_mode = request
1688 .mode
1689 .unwrap_or(cloud_llm_client::CompletionMode::Normal);
1690
1691 self.last_received_chunk_at = Some(Instant::now());
1692
1693 let task = cx.spawn(async move |thread, cx| {
1694 let stream_completion_future = model.stream_completion(request, &cx);
1695 let initial_token_usage =
1696 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage);
1697 let stream_completion = async {
1698 let mut events = stream_completion_future.await?;
1699
1700 let mut stop_reason = StopReason::EndTurn;
1701 let mut current_token_usage = TokenUsage::default();
1702
1703 thread
1704 .update(cx, |_thread, cx| {
1705 cx.emit(ThreadEvent::NewRequest);
1706 })
1707 .ok();
1708
1709 let mut request_assistant_message_id = None;
1710
1711 while let Some(event) = events.next().await {
1712 if let Some((_, response_events)) = request_callback_parameters.as_mut() {
1713 response_events
1714 .push(event.as_ref().map_err(|error| error.to_string()).cloned());
1715 }
1716
1717 thread.update(cx, |thread, cx| {
1718 match event? {
1719 LanguageModelCompletionEvent::StartMessage { .. } => {
1720 request_assistant_message_id =
1721 Some(thread.insert_assistant_message(
1722 vec![MessageSegment::Text(String::new())],
1723 cx,
1724 ));
1725 }
1726 LanguageModelCompletionEvent::Stop(reason) => {
1727 stop_reason = reason;
1728 }
1729 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1730 thread.update_token_usage_at_last_message(token_usage);
1731 thread.cumulative_token_usage = thread.cumulative_token_usage
1732 + token_usage
1733 - current_token_usage;
1734 current_token_usage = token_usage;
1735 }
1736 LanguageModelCompletionEvent::Text(chunk) => {
1737 thread.received_chunk();
1738
1739 cx.emit(ThreadEvent::ReceivedTextChunk);
1740 if let Some(last_message) = thread.messages.last_mut() {
1741 if last_message.role == Role::Assistant
1742 && !thread.tool_use.has_tool_results(last_message.id)
1743 {
1744 last_message.push_text(&chunk);
1745 cx.emit(ThreadEvent::StreamedAssistantText(
1746 last_message.id,
1747 chunk,
1748 ));
1749 } else {
1750 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1751 // of a new Assistant response.
1752 //
1753 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1754 // will result in duplicating the text of the chunk in the rendered Markdown.
1755 request_assistant_message_id =
1756 Some(thread.insert_assistant_message(
1757 vec![MessageSegment::Text(chunk.to_string())],
1758 cx,
1759 ));
1760 };
1761 }
1762 }
1763 LanguageModelCompletionEvent::Thinking {
1764 text: chunk,
1765 signature,
1766 } => {
1767 thread.received_chunk();
1768
1769 if let Some(last_message) = thread.messages.last_mut() {
1770 if last_message.role == Role::Assistant
1771 && !thread.tool_use.has_tool_results(last_message.id)
1772 {
1773 last_message.push_thinking(&chunk, signature);
1774 cx.emit(ThreadEvent::StreamedAssistantThinking(
1775 last_message.id,
1776 chunk,
1777 ));
1778 } else {
1779 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1780 // of a new Assistant response.
1781 //
1782 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1783 // will result in duplicating the text of the chunk in the rendered Markdown.
1784 request_assistant_message_id =
1785 Some(thread.insert_assistant_message(
1786 vec![MessageSegment::Thinking {
1787 text: chunk.to_string(),
1788 signature,
1789 }],
1790 cx,
1791 ));
1792 };
1793 }
1794 }
1795 LanguageModelCompletionEvent::RedactedThinking { data } => {
1796 thread.received_chunk();
1797
1798 if let Some(last_message) = thread.messages.last_mut() {
1799 if last_message.role == Role::Assistant
1800 && !thread.tool_use.has_tool_results(last_message.id)
1801 {
1802 last_message.push_redacted_thinking(data);
1803 } else {
1804 request_assistant_message_id =
1805 Some(thread.insert_assistant_message(
1806 vec![MessageSegment::RedactedThinking(data)],
1807 cx,
1808 ));
1809 };
1810 }
1811 }
1812 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1813 let last_assistant_message_id = request_assistant_message_id
1814 .unwrap_or_else(|| {
1815 let new_assistant_message_id =
1816 thread.insert_assistant_message(vec![], cx);
1817 request_assistant_message_id =
1818 Some(new_assistant_message_id);
1819 new_assistant_message_id
1820 });
1821
1822 let tool_use_id = tool_use.id.clone();
1823 let streamed_input = if tool_use.is_input_complete {
1824 None
1825 } else {
1826 Some((&tool_use.input).clone())
1827 };
1828
1829 let ui_text = thread.tool_use.request_tool_use(
1830 last_assistant_message_id,
1831 tool_use,
1832 tool_use_metadata.clone(),
1833 cx,
1834 );
1835
1836 if let Some(input) = streamed_input {
1837 cx.emit(ThreadEvent::StreamedToolUse {
1838 tool_use_id,
1839 ui_text,
1840 input,
1841 });
1842 }
1843 }
1844 LanguageModelCompletionEvent::ToolUseJsonParseError {
1845 id,
1846 tool_name,
1847 raw_input: invalid_input_json,
1848 json_parse_error,
1849 } => {
1850 thread.receive_invalid_tool_json(
1851 id,
1852 tool_name,
1853 invalid_input_json,
1854 json_parse_error,
1855 window,
1856 cx,
1857 );
1858 }
1859 LanguageModelCompletionEvent::StatusUpdate(status_update) => {
1860 if let Some(completion) = thread
1861 .pending_completions
1862 .iter_mut()
1863 .find(|completion| completion.id == pending_completion_id)
1864 {
1865 match status_update {
1866 CompletionRequestStatus::Queued { position } => {
1867 completion.queue_state =
1868 QueueState::Queued { position };
1869 }
1870 CompletionRequestStatus::Started => {
1871 completion.queue_state = QueueState::Started;
1872 }
1873 CompletionRequestStatus::Failed {
1874 code,
1875 message,
1876 request_id: _,
1877 retry_after,
1878 } => {
1879 return Err(
1880 LanguageModelCompletionError::from_cloud_failure(
1881 model.upstream_provider_name(),
1882 code,
1883 message,
1884 retry_after.map(Duration::from_secs_f64),
1885 ),
1886 );
1887 }
1888 CompletionRequestStatus::UsageUpdated { amount, limit } => {
1889 thread.update_model_request_usage(
1890 amount as u32,
1891 limit,
1892 cx,
1893 );
1894 }
1895 CompletionRequestStatus::ToolUseLimitReached => {
1896 thread.tool_use_limit_reached = true;
1897 cx.emit(ThreadEvent::ToolUseLimitReached);
1898 }
1899 }
1900 }
1901 }
1902 }
1903
1904 thread.touch_updated_at();
1905 cx.emit(ThreadEvent::StreamedCompletion);
1906 cx.notify();
1907
1908 thread.auto_capture_telemetry(cx);
1909 Ok(())
1910 })??;
1911
1912 smol::future::yield_now().await;
1913 }
1914
1915 thread.update(cx, |thread, cx| {
1916 thread.last_received_chunk_at = None;
1917 thread
1918 .pending_completions
1919 .retain(|completion| completion.id != pending_completion_id);
1920
1921 // If there is a response without tool use, summarize the message. Otherwise,
1922 // allow two tool uses before summarizing.
1923 if matches!(thread.summary, ThreadSummary::Pending)
1924 && thread.messages.len() >= 2
1925 && (!thread.has_pending_tool_uses() || thread.messages.len() >= 6)
1926 {
1927 thread.summarize(cx);
1928 }
1929 })?;
1930
1931 anyhow::Ok(stop_reason)
1932 };
1933
1934 let result = stream_completion.await;
1935 let mut retry_scheduled = false;
1936
1937 thread
1938 .update(cx, |thread, cx| {
1939 thread.finalize_pending_checkpoint(cx);
1940 match result.as_ref() {
1941 Ok(stop_reason) => {
1942 match stop_reason {
1943 StopReason::ToolUse => {
1944 let tool_uses =
1945 thread.use_pending_tools(window, model.clone(), cx);
1946 cx.emit(ThreadEvent::UsePendingTools { tool_uses });
1947 }
1948 StopReason::EndTurn | StopReason::MaxTokens => {
1949 thread.project.update(cx, |project, cx| {
1950 project.set_agent_location(None, cx);
1951 });
1952 }
1953 StopReason::Refusal => {
1954 thread.project.update(cx, |project, cx| {
1955 project.set_agent_location(None, cx);
1956 });
1957
1958 // Remove the turn that was refused.
1959 //
1960 // https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/handle-streaming-refusals#reset-context-after-refusal
1961 {
1962 let mut messages_to_remove = Vec::new();
1963
1964 for (ix, message) in
1965 thread.messages.iter().enumerate().rev()
1966 {
1967 messages_to_remove.push(message.id);
1968
1969 if message.role == Role::User {
1970 if ix == 0 {
1971 break;
1972 }
1973
1974 if let Some(prev_message) =
1975 thread.messages.get(ix - 1)
1976 {
1977 if prev_message.role == Role::Assistant {
1978 break;
1979 }
1980 }
1981 }
1982 }
1983
1984 for message_id in messages_to_remove {
1985 thread.delete_message(message_id, cx);
1986 }
1987 }
1988
1989 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1990 header: "Language model refusal".into(),
1991 message:
1992 "Model refused to generate content for safety reasons."
1993 .into(),
1994 }));
1995 }
1996 }
1997
1998 // We successfully completed, so cancel any remaining retries.
1999 thread.retry_state = None;
2000 }
2001 Err(error) => {
2002 thread.project.update(cx, |project, cx| {
2003 project.set_agent_location(None, cx);
2004 });
2005
2006 if error.is::<PaymentRequiredError>() {
2007 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
2008 } else if let Some(error) =
2009 error.downcast_ref::<ModelRequestLimitReachedError>()
2010 {
2011 cx.emit(ThreadEvent::ShowError(
2012 ThreadError::ModelRequestLimitReached { plan: error.plan },
2013 ));
2014 } else if let Some(completion_error) =
2015 error.downcast_ref::<LanguageModelCompletionError>()
2016 {
2017 match &completion_error {
2018 LanguageModelCompletionError::PromptTooLarge {
2019 tokens, ..
2020 } => {
2021 let tokens = tokens.unwrap_or_else(|| {
2022 // We didn't get an exact token count from the API, so fall back on our estimate.
2023 thread
2024 .total_token_usage()
2025 .map(|usage| usage.total)
2026 .unwrap_or(0)
2027 // We know the context window was exceeded in practice, so if our estimate was
2028 // lower than max tokens, the estimate was wrong; return that we exceeded by 1.
2029 .max(
2030 model
2031 .max_token_count_for_mode(completion_mode)
2032 .saturating_add(1),
2033 )
2034 });
2035 thread.exceeded_window_error = Some(ExceededWindowError {
2036 model_id: model.id(),
2037 token_count: tokens,
2038 });
2039 cx.notify();
2040 }
2041 _ => {
2042 if let Some(retry_strategy) =
2043 Thread::get_retry_strategy(completion_error)
2044 {
2045 log::info!(
2046 "Retrying with {:?} for language model completion error {:?}",
2047 retry_strategy,
2048 completion_error
2049 );
2050
2051 retry_scheduled = thread
2052 .handle_retryable_error_with_delay(
2053 &completion_error,
2054 Some(retry_strategy),
2055 model.clone(),
2056 intent,
2057 window,
2058 cx,
2059 );
2060 }
2061 }
2062 }
2063 }
2064
2065 if !retry_scheduled {
2066 thread.cancel_last_completion(window, cx);
2067 }
2068 }
2069 }
2070
2071 if !retry_scheduled {
2072 cx.emit(ThreadEvent::Stopped(result.map_err(Arc::new)));
2073 }
2074
2075 if let Some((request_callback, (request, response_events))) = thread
2076 .request_callback
2077 .as_mut()
2078 .zip(request_callback_parameters.as_ref())
2079 {
2080 request_callback(request, response_events);
2081 }
2082
2083 thread.auto_capture_telemetry(cx);
2084
2085 if let Ok(initial_usage) = initial_token_usage {
2086 let usage = thread.cumulative_token_usage - initial_usage;
2087
2088 telemetry::event!(
2089 "Assistant Thread Completion",
2090 thread_id = thread.id().to_string(),
2091 prompt_id = prompt_id,
2092 model = model.telemetry_id(),
2093 model_provider = model.provider_id().to_string(),
2094 input_tokens = usage.input_tokens,
2095 output_tokens = usage.output_tokens,
2096 cache_creation_input_tokens = usage.cache_creation_input_tokens,
2097 cache_read_input_tokens = usage.cache_read_input_tokens,
2098 );
2099 }
2100 })
2101 .ok();
2102 });
2103
2104 self.pending_completions.push(PendingCompletion {
2105 id: pending_completion_id,
2106 queue_state: QueueState::Sending,
2107 _task: task,
2108 });
2109 }
2110
2111 pub fn summarize(&mut self, cx: &mut Context<Self>) {
2112 let Some(model) = LanguageModelRegistry::read_global(cx).thread_summary_model() else {
2113 println!("No thread summary model");
2114 return;
2115 };
2116
2117 if !model.provider.is_authenticated(cx) {
2118 return;
2119 }
2120
2121 let added_user_message = include_str!("./prompts/summarize_thread_prompt.txt");
2122
2123 let request = self.to_summarize_request(
2124 &model.model,
2125 CompletionIntent::ThreadSummarization,
2126 added_user_message.into(),
2127 cx,
2128 );
2129
2130 self.summary = ThreadSummary::Generating;
2131
2132 self.pending_summary = cx.spawn(async move |this, cx| {
2133 let result = async {
2134 let mut messages = model.model.stream_completion(request, &cx).await?;
2135
2136 let mut new_summary = String::new();
2137 while let Some(event) = messages.next().await {
2138 let Ok(event) = event else {
2139 continue;
2140 };
2141 let text = match event {
2142 LanguageModelCompletionEvent::Text(text) => text,
2143 LanguageModelCompletionEvent::StatusUpdate(
2144 CompletionRequestStatus::UsageUpdated { amount, limit },
2145 ) => {
2146 this.update(cx, |thread, cx| {
2147 thread.update_model_request_usage(amount as u32, limit, cx);
2148 })?;
2149 continue;
2150 }
2151 _ => continue,
2152 };
2153
2154 let mut lines = text.lines();
2155 new_summary.extend(lines.next());
2156
2157 // Stop if the LLM generated multiple lines.
2158 if lines.next().is_some() {
2159 break;
2160 }
2161 }
2162
2163 anyhow::Ok(new_summary)
2164 }
2165 .await;
2166
2167 this.update(cx, |this, cx| {
2168 match result {
2169 Ok(new_summary) => {
2170 if new_summary.is_empty() {
2171 this.summary = ThreadSummary::Error;
2172 } else {
2173 this.summary = ThreadSummary::Ready(new_summary.into());
2174 }
2175 }
2176 Err(err) => {
2177 this.summary = ThreadSummary::Error;
2178 log::error!("Failed to generate thread summary: {}", err);
2179 }
2180 }
2181 cx.emit(ThreadEvent::SummaryGenerated);
2182 })
2183 .log_err()?;
2184
2185 Some(())
2186 });
2187 }
2188
2189 fn get_retry_strategy(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2190 use LanguageModelCompletionError::*;
2191
2192 // General strategy here:
2193 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2194 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2195 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2196 match error {
2197 HttpResponseError {
2198 status_code: StatusCode::TOO_MANY_REQUESTS,
2199 ..
2200 } => Some(RetryStrategy::ExponentialBackoff {
2201 initial_delay: BASE_RETRY_DELAY,
2202 max_attempts: MAX_RETRY_ATTEMPTS,
2203 }),
2204 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2205 Some(RetryStrategy::Fixed {
2206 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2207 max_attempts: MAX_RETRY_ATTEMPTS,
2208 })
2209 }
2210 UpstreamProviderError {
2211 status,
2212 retry_after,
2213 ..
2214 } => match *status {
2215 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2216 Some(RetryStrategy::Fixed {
2217 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2218 max_attempts: MAX_RETRY_ATTEMPTS,
2219 })
2220 }
2221 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2222 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2223 // Internal Server Error could be anything, retry up to 3 times.
2224 max_attempts: 3,
2225 }),
2226 status => {
2227 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2228 // but we frequently get them in practice. See https://http.dev/529
2229 if status.as_u16() == 529 {
2230 Some(RetryStrategy::Fixed {
2231 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2232 max_attempts: MAX_RETRY_ATTEMPTS,
2233 })
2234 } else {
2235 Some(RetryStrategy::Fixed {
2236 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2237 max_attempts: 2,
2238 })
2239 }
2240 }
2241 },
2242 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2243 delay: BASE_RETRY_DELAY,
2244 max_attempts: 3,
2245 }),
2246 ApiReadResponseError { .. }
2247 | HttpSend { .. }
2248 | DeserializeResponse { .. }
2249 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2250 delay: BASE_RETRY_DELAY,
2251 max_attempts: 3,
2252 }),
2253 // Retrying these errors definitely shouldn't help.
2254 HttpResponseError {
2255 status_code:
2256 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2257 ..
2258 }
2259 | AuthenticationError { .. }
2260 | PermissionError { .. }
2261 | NoApiKey { .. }
2262 | ApiEndpointNotFound { .. }
2263 | PromptTooLarge { .. } => None,
2264 // These errors might be transient, so retry them
2265 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2266 delay: BASE_RETRY_DELAY,
2267 max_attempts: 1,
2268 }),
2269 // Retry all other 4xx and 5xx errors once.
2270 HttpResponseError { status_code, .. }
2271 if status_code.is_client_error() || status_code.is_server_error() =>
2272 {
2273 Some(RetryStrategy::Fixed {
2274 delay: BASE_RETRY_DELAY,
2275 max_attempts: 3,
2276 })
2277 }
2278 // Conservatively assume that any other errors are non-retryable
2279 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2280 delay: BASE_RETRY_DELAY,
2281 max_attempts: 2,
2282 }),
2283 }
2284 }
2285
2286 fn handle_retryable_error_with_delay(
2287 &mut self,
2288 error: &LanguageModelCompletionError,
2289 strategy: Option<RetryStrategy>,
2290 model: Arc<dyn LanguageModel>,
2291 intent: CompletionIntent,
2292 window: Option<AnyWindowHandle>,
2293 cx: &mut Context<Self>,
2294 ) -> bool {
2295 // Store context for the Retry button
2296 self.last_error_context = Some((model.clone(), intent));
2297
2298 // Only auto-retry if Burn Mode is enabled
2299 if self.completion_mode != CompletionMode::Burn {
2300 // Show error with retry options
2301 cx.emit(ThreadEvent::ShowError(ThreadError::RetryableError {
2302 message: format!(
2303 "{}\n\nTo automatically retry when similar errors happen, enable Burn Mode.",
2304 error
2305 )
2306 .into(),
2307 can_enable_burn_mode: true,
2308 }));
2309 return false;
2310 }
2311
2312 let Some(strategy) = strategy.or_else(|| Self::get_retry_strategy(error)) else {
2313 return false;
2314 };
2315
2316 let max_attempts = match &strategy {
2317 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
2318 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
2319 };
2320
2321 let retry_state = self.retry_state.get_or_insert(RetryState {
2322 attempt: 0,
2323 max_attempts,
2324 intent,
2325 });
2326
2327 retry_state.attempt += 1;
2328 let attempt = retry_state.attempt;
2329 let max_attempts = retry_state.max_attempts;
2330 let intent = retry_state.intent;
2331
2332 if attempt <= max_attempts {
2333 let delay = match &strategy {
2334 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
2335 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
2336 Duration::from_secs(delay_secs)
2337 }
2338 RetryStrategy::Fixed { delay, .. } => *delay,
2339 };
2340
2341 // Add a transient message to inform the user
2342 let delay_secs = delay.as_secs();
2343 let retry_message = if max_attempts == 1 {
2344 format!("{error}. Retrying in {delay_secs} seconds...")
2345 } else {
2346 format!(
2347 "{error}. Retrying (attempt {attempt} of {max_attempts}) \
2348 in {delay_secs} seconds..."
2349 )
2350 };
2351 log::warn!(
2352 "Retrying completion request (attempt {attempt} of {max_attempts}) \
2353 in {delay_secs} seconds: {error:?}",
2354 );
2355
2356 // Add a UI-only message instead of a regular message
2357 let id = self.next_message_id.post_inc();
2358 self.messages.push(Message {
2359 id,
2360 role: Role::System,
2361 segments: vec![MessageSegment::Text(retry_message)],
2362 loaded_context: LoadedContext::default(),
2363 creases: Vec::new(),
2364 is_hidden: false,
2365 ui_only: true,
2366 });
2367 cx.emit(ThreadEvent::MessageAdded(id));
2368
2369 // Schedule the retry
2370 let thread_handle = cx.entity().downgrade();
2371
2372 cx.spawn(async move |_thread, cx| {
2373 cx.background_executor().timer(delay).await;
2374
2375 thread_handle
2376 .update(cx, |thread, cx| {
2377 // Retry the completion
2378 thread.send_to_model(model, intent, window, cx);
2379 })
2380 .log_err();
2381 })
2382 .detach();
2383
2384 true
2385 } else {
2386 // Max retries exceeded
2387 self.retry_state = None;
2388
2389 // Stop generating since we're giving up on retrying.
2390 self.pending_completions.clear();
2391
2392 // Show error alongside a Retry button, but no
2393 // Enable Burn Mode button (since it's already enabled)
2394 cx.emit(ThreadEvent::ShowError(ThreadError::RetryableError {
2395 message: format!("Failed after retrying: {}", error).into(),
2396 can_enable_burn_mode: false,
2397 }));
2398
2399 false
2400 }
2401 }
2402
2403 pub fn start_generating_detailed_summary_if_needed(
2404 &mut self,
2405 thread_store: WeakEntity<ThreadStore>,
2406 cx: &mut Context<Self>,
2407 ) {
2408 let Some(last_message_id) = self.messages.last().map(|message| message.id) else {
2409 return;
2410 };
2411
2412 match &*self.detailed_summary_rx.borrow() {
2413 DetailedSummaryState::Generating { message_id, .. }
2414 | DetailedSummaryState::Generated { message_id, .. }
2415 if *message_id == last_message_id =>
2416 {
2417 // Already up-to-date
2418 return;
2419 }
2420 _ => {}
2421 }
2422
2423 let Some(ConfiguredModel { model, provider }) =
2424 LanguageModelRegistry::read_global(cx).thread_summary_model()
2425 else {
2426 return;
2427 };
2428
2429 if !provider.is_authenticated(cx) {
2430 return;
2431 }
2432
2433 let added_user_message = include_str!("./prompts/summarize_thread_detailed_prompt.txt");
2434
2435 let request = self.to_summarize_request(
2436 &model,
2437 CompletionIntent::ThreadContextSummarization,
2438 added_user_message.into(),
2439 cx,
2440 );
2441
2442 *self.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generating {
2443 message_id: last_message_id,
2444 };
2445
2446 // Replace the detailed summarization task if there is one, cancelling it. It would probably
2447 // be better to allow the old task to complete, but this would require logic for choosing
2448 // which result to prefer (the old task could complete after the new one, resulting in a
2449 // stale summary).
2450 self.detailed_summary_task = cx.spawn(async move |thread, cx| {
2451 let stream = model.stream_completion_text(request, &cx);
2452 let Some(mut messages) = stream.await.log_err() else {
2453 thread
2454 .update(cx, |thread, _cx| {
2455 *thread.detailed_summary_tx.borrow_mut() =
2456 DetailedSummaryState::NotGenerated;
2457 })
2458 .ok()?;
2459 return None;
2460 };
2461
2462 let mut new_detailed_summary = String::new();
2463
2464 while let Some(chunk) = messages.stream.next().await {
2465 if let Some(chunk) = chunk.log_err() {
2466 new_detailed_summary.push_str(&chunk);
2467 }
2468 }
2469
2470 thread
2471 .update(cx, |thread, _cx| {
2472 *thread.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generated {
2473 text: new_detailed_summary.into(),
2474 message_id: last_message_id,
2475 };
2476 })
2477 .ok()?;
2478
2479 // Save thread so its summary can be reused later
2480 if let Some(thread) = thread.upgrade() {
2481 if let Ok(Ok(save_task)) = cx.update(|cx| {
2482 thread_store
2483 .update(cx, |thread_store, cx| thread_store.save_thread(&thread, cx))
2484 }) {
2485 save_task.await.log_err();
2486 }
2487 }
2488
2489 Some(())
2490 });
2491 }
2492
2493 pub async fn wait_for_detailed_summary_or_text(
2494 this: &Entity<Self>,
2495 cx: &mut AsyncApp,
2496 ) -> Option<SharedString> {
2497 let mut detailed_summary_rx = this
2498 .read_with(cx, |this, _cx| this.detailed_summary_rx.clone())
2499 .ok()?;
2500 loop {
2501 match detailed_summary_rx.recv().await? {
2502 DetailedSummaryState::Generating { .. } => {}
2503 DetailedSummaryState::NotGenerated => {
2504 return this.read_with(cx, |this, _cx| this.text().into()).ok();
2505 }
2506 DetailedSummaryState::Generated { text, .. } => return Some(text),
2507 }
2508 }
2509 }
2510
2511 pub fn latest_detailed_summary_or_text(&self) -> SharedString {
2512 self.detailed_summary_rx
2513 .borrow()
2514 .text()
2515 .unwrap_or_else(|| self.text().into())
2516 }
2517
2518 pub fn is_generating_detailed_summary(&self) -> bool {
2519 matches!(
2520 &*self.detailed_summary_rx.borrow(),
2521 DetailedSummaryState::Generating { .. }
2522 )
2523 }
2524
2525 pub fn use_pending_tools(
2526 &mut self,
2527 window: Option<AnyWindowHandle>,
2528 model: Arc<dyn LanguageModel>,
2529 cx: &mut Context<Self>,
2530 ) -> Vec<PendingToolUse> {
2531 self.auto_capture_telemetry(cx);
2532 let request =
2533 Arc::new(self.to_completion_request(model.clone(), CompletionIntent::ToolResults, cx));
2534 let pending_tool_uses = self
2535 .tool_use
2536 .pending_tool_uses()
2537 .into_iter()
2538 .filter(|tool_use| tool_use.status.is_idle())
2539 .cloned()
2540 .collect::<Vec<_>>();
2541
2542 for tool_use in pending_tool_uses.iter() {
2543 self.use_pending_tool(tool_use.clone(), request.clone(), model.clone(), window, cx);
2544 }
2545
2546 pending_tool_uses
2547 }
2548
2549 fn use_pending_tool(
2550 &mut self,
2551 tool_use: PendingToolUse,
2552 request: Arc<LanguageModelRequest>,
2553 model: Arc<dyn LanguageModel>,
2554 window: Option<AnyWindowHandle>,
2555 cx: &mut Context<Self>,
2556 ) {
2557 let Some(tool) = self.tools.read(cx).tool(&tool_use.name, cx) else {
2558 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2559 };
2560
2561 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
2562 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2563 }
2564
2565 if tool.needs_confirmation(&tool_use.input, &self.project, cx)
2566 && !AgentSettings::get_global(cx).always_allow_tool_actions
2567 {
2568 self.tool_use.confirm_tool_use(
2569 tool_use.id,
2570 tool_use.ui_text,
2571 tool_use.input,
2572 request,
2573 tool,
2574 );
2575 cx.emit(ThreadEvent::ToolConfirmationNeeded);
2576 } else {
2577 self.run_tool(
2578 tool_use.id,
2579 tool_use.ui_text,
2580 tool_use.input,
2581 request,
2582 tool,
2583 model,
2584 window,
2585 cx,
2586 );
2587 }
2588 }
2589
2590 pub fn handle_hallucinated_tool_use(
2591 &mut self,
2592 tool_use_id: LanguageModelToolUseId,
2593 hallucinated_tool_name: Arc<str>,
2594 window: Option<AnyWindowHandle>,
2595 cx: &mut Context<Thread>,
2596 ) {
2597 let available_tools = self.profile.enabled_tools(cx);
2598
2599 let tool_list = available_tools
2600 .iter()
2601 .map(|(name, tool)| format!("- {}: {}", name, tool.description()))
2602 .collect::<Vec<_>>()
2603 .join("\n");
2604
2605 let error_message = format!(
2606 "The tool '{}' doesn't exist or is not enabled. Available tools:\n{}",
2607 hallucinated_tool_name, tool_list
2608 );
2609
2610 let pending_tool_use = self.tool_use.insert_tool_output(
2611 tool_use_id.clone(),
2612 hallucinated_tool_name,
2613 Err(anyhow!("Missing tool call: {error_message}")),
2614 self.configured_model.as_ref(),
2615 self.completion_mode,
2616 );
2617
2618 cx.emit(ThreadEvent::MissingToolUse {
2619 tool_use_id: tool_use_id.clone(),
2620 ui_text: error_message.into(),
2621 });
2622
2623 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2624 }
2625
2626 pub fn receive_invalid_tool_json(
2627 &mut self,
2628 tool_use_id: LanguageModelToolUseId,
2629 tool_name: Arc<str>,
2630 invalid_json: Arc<str>,
2631 error: String,
2632 window: Option<AnyWindowHandle>,
2633 cx: &mut Context<Thread>,
2634 ) {
2635 log::error!("The model returned invalid input JSON: {invalid_json}");
2636
2637 let pending_tool_use = self.tool_use.insert_tool_output(
2638 tool_use_id.clone(),
2639 tool_name,
2640 Err(anyhow!("Error parsing input JSON: {error}")),
2641 self.configured_model.as_ref(),
2642 self.completion_mode,
2643 );
2644 let ui_text = if let Some(pending_tool_use) = &pending_tool_use {
2645 pending_tool_use.ui_text.clone()
2646 } else {
2647 log::error!(
2648 "There was no pending tool use for tool use {tool_use_id}, even though it finished (with invalid input JSON)."
2649 );
2650 format!("Unknown tool {}", tool_use_id).into()
2651 };
2652
2653 cx.emit(ThreadEvent::InvalidToolInput {
2654 tool_use_id: tool_use_id.clone(),
2655 ui_text,
2656 invalid_input_json: invalid_json,
2657 });
2658
2659 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2660 }
2661
2662 pub fn run_tool(
2663 &mut self,
2664 tool_use_id: LanguageModelToolUseId,
2665 ui_text: impl Into<SharedString>,
2666 input: serde_json::Value,
2667 request: Arc<LanguageModelRequest>,
2668 tool: Arc<dyn Tool>,
2669 model: Arc<dyn LanguageModel>,
2670 window: Option<AnyWindowHandle>,
2671 cx: &mut Context<Thread>,
2672 ) {
2673 let task =
2674 self.spawn_tool_use(tool_use_id.clone(), request, input, tool, model, window, cx);
2675 self.tool_use
2676 .run_pending_tool(tool_use_id, ui_text.into(), task);
2677 }
2678
2679 fn spawn_tool_use(
2680 &mut self,
2681 tool_use_id: LanguageModelToolUseId,
2682 request: Arc<LanguageModelRequest>,
2683 input: serde_json::Value,
2684 tool: Arc<dyn Tool>,
2685 model: Arc<dyn LanguageModel>,
2686 window: Option<AnyWindowHandle>,
2687 cx: &mut Context<Thread>,
2688 ) -> Task<()> {
2689 let tool_name: Arc<str> = tool.name().into();
2690
2691 let tool_result = tool.run(
2692 input,
2693 request,
2694 self.project.clone(),
2695 self.action_log.clone(),
2696 model,
2697 window,
2698 cx,
2699 );
2700
2701 // Store the card separately if it exists
2702 if let Some(card) = tool_result.card.clone() {
2703 self.tool_use
2704 .insert_tool_result_card(tool_use_id.clone(), card);
2705 }
2706
2707 cx.spawn({
2708 async move |thread: WeakEntity<Thread>, cx| {
2709 let output = tool_result.output.await;
2710
2711 thread
2712 .update(cx, |thread, cx| {
2713 let pending_tool_use = thread.tool_use.insert_tool_output(
2714 tool_use_id.clone(),
2715 tool_name,
2716 output,
2717 thread.configured_model.as_ref(),
2718 thread.completion_mode,
2719 );
2720 thread.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2721 })
2722 .ok();
2723 }
2724 })
2725 }
2726
2727 fn tool_finished(
2728 &mut self,
2729 tool_use_id: LanguageModelToolUseId,
2730 pending_tool_use: Option<PendingToolUse>,
2731 canceled: bool,
2732 window: Option<AnyWindowHandle>,
2733 cx: &mut Context<Self>,
2734 ) {
2735 if self.all_tools_finished() {
2736 if let Some(ConfiguredModel { model, .. }) = self.configured_model.as_ref() {
2737 if !canceled {
2738 self.send_to_model(model.clone(), CompletionIntent::ToolResults, window, cx);
2739 }
2740 self.auto_capture_telemetry(cx);
2741 }
2742 }
2743
2744 cx.emit(ThreadEvent::ToolFinished {
2745 tool_use_id,
2746 pending_tool_use,
2747 });
2748 }
2749
2750 /// Cancels the last pending completion, if there are any pending.
2751 ///
2752 /// Returns whether a completion was canceled.
2753 pub fn cancel_last_completion(
2754 &mut self,
2755 window: Option<AnyWindowHandle>,
2756 cx: &mut Context<Self>,
2757 ) -> bool {
2758 let mut canceled = self.pending_completions.pop().is_some() || self.retry_state.is_some();
2759
2760 self.retry_state = None;
2761
2762 for pending_tool_use in self.tool_use.cancel_pending() {
2763 canceled = true;
2764 self.tool_finished(
2765 pending_tool_use.id.clone(),
2766 Some(pending_tool_use),
2767 true,
2768 window,
2769 cx,
2770 );
2771 }
2772
2773 if canceled {
2774 cx.emit(ThreadEvent::CompletionCanceled);
2775
2776 // When canceled, we always want to insert the checkpoint.
2777 // (We skip over finalize_pending_checkpoint, because it
2778 // would conclude we didn't have anything to insert here.)
2779 if let Some(checkpoint) = self.pending_checkpoint.take() {
2780 self.insert_checkpoint(checkpoint, cx);
2781 }
2782 } else {
2783 self.finalize_pending_checkpoint(cx);
2784 }
2785
2786 canceled
2787 }
2788
2789 /// Signals that any in-progress editing should be canceled.
2790 ///
2791 /// This method is used to notify listeners (like ActiveThread) that
2792 /// they should cancel any editing operations.
2793 pub fn cancel_editing(&mut self, cx: &mut Context<Self>) {
2794 cx.emit(ThreadEvent::CancelEditing);
2795 }
2796
2797 pub fn feedback(&self) -> Option<ThreadFeedback> {
2798 self.feedback
2799 }
2800
2801 pub fn message_feedback(&self, message_id: MessageId) -> Option<ThreadFeedback> {
2802 self.message_feedback.get(&message_id).copied()
2803 }
2804
2805 pub fn report_message_feedback(
2806 &mut self,
2807 message_id: MessageId,
2808 feedback: ThreadFeedback,
2809 cx: &mut Context<Self>,
2810 ) -> Task<Result<()>> {
2811 if self.message_feedback.get(&message_id) == Some(&feedback) {
2812 return Task::ready(Ok(()));
2813 }
2814
2815 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2816 let serialized_thread = self.serialize(cx);
2817 let thread_id = self.id().clone();
2818 let client = self.project.read(cx).client();
2819
2820 let enabled_tool_names: Vec<String> = self
2821 .profile
2822 .enabled_tools(cx)
2823 .iter()
2824 .map(|(name, _)| name.clone().into())
2825 .collect();
2826
2827 self.message_feedback.insert(message_id, feedback);
2828
2829 cx.notify();
2830
2831 let message_content = self
2832 .message(message_id)
2833 .map(|msg| msg.to_string())
2834 .unwrap_or_default();
2835
2836 cx.background_spawn(async move {
2837 let final_project_snapshot = final_project_snapshot.await;
2838 let serialized_thread = serialized_thread.await?;
2839 let thread_data =
2840 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
2841
2842 let rating = match feedback {
2843 ThreadFeedback::Positive => "positive",
2844 ThreadFeedback::Negative => "negative",
2845 };
2846 telemetry::event!(
2847 "Assistant Thread Rated",
2848 rating,
2849 thread_id,
2850 enabled_tool_names,
2851 message_id = message_id.0,
2852 message_content,
2853 thread_data,
2854 final_project_snapshot
2855 );
2856 client.telemetry().flush_events().await;
2857
2858 Ok(())
2859 })
2860 }
2861
2862 pub fn report_feedback(
2863 &mut self,
2864 feedback: ThreadFeedback,
2865 cx: &mut Context<Self>,
2866 ) -> Task<Result<()>> {
2867 let last_assistant_message_id = self
2868 .messages
2869 .iter()
2870 .rev()
2871 .find(|msg| msg.role == Role::Assistant)
2872 .map(|msg| msg.id);
2873
2874 if let Some(message_id) = last_assistant_message_id {
2875 self.report_message_feedback(message_id, feedback, cx)
2876 } else {
2877 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2878 let serialized_thread = self.serialize(cx);
2879 let thread_id = self.id().clone();
2880 let client = self.project.read(cx).client();
2881 self.feedback = Some(feedback);
2882 cx.notify();
2883
2884 cx.background_spawn(async move {
2885 let final_project_snapshot = final_project_snapshot.await;
2886 let serialized_thread = serialized_thread.await?;
2887 let thread_data = serde_json::to_value(serialized_thread)
2888 .unwrap_or_else(|_| serde_json::Value::Null);
2889
2890 let rating = match feedback {
2891 ThreadFeedback::Positive => "positive",
2892 ThreadFeedback::Negative => "negative",
2893 };
2894 telemetry::event!(
2895 "Assistant Thread Rated",
2896 rating,
2897 thread_id,
2898 thread_data,
2899 final_project_snapshot
2900 );
2901 client.telemetry().flush_events().await;
2902
2903 Ok(())
2904 })
2905 }
2906 }
2907
2908 /// Create a snapshot of the current project state including git information and unsaved buffers.
2909 fn project_snapshot(
2910 project: Entity<Project>,
2911 cx: &mut Context<Self>,
2912 ) -> Task<Arc<ProjectSnapshot>> {
2913 let git_store = project.read(cx).git_store().clone();
2914 let worktree_snapshots: Vec<_> = project
2915 .read(cx)
2916 .visible_worktrees(cx)
2917 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
2918 .collect();
2919
2920 cx.spawn(async move |_, cx| {
2921 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
2922
2923 let mut unsaved_buffers = Vec::new();
2924 cx.update(|app_cx| {
2925 let buffer_store = project.read(app_cx).buffer_store();
2926 for buffer_handle in buffer_store.read(app_cx).buffers() {
2927 let buffer = buffer_handle.read(app_cx);
2928 if buffer.is_dirty() {
2929 if let Some(file) = buffer.file() {
2930 let path = file.path().to_string_lossy().to_string();
2931 unsaved_buffers.push(path);
2932 }
2933 }
2934 }
2935 })
2936 .ok();
2937
2938 Arc::new(ProjectSnapshot {
2939 worktree_snapshots,
2940 unsaved_buffer_paths: unsaved_buffers,
2941 timestamp: Utc::now(),
2942 })
2943 })
2944 }
2945
2946 fn worktree_snapshot(
2947 worktree: Entity<project::Worktree>,
2948 git_store: Entity<GitStore>,
2949 cx: &App,
2950 ) -> Task<WorktreeSnapshot> {
2951 cx.spawn(async move |cx| {
2952 // Get worktree path and snapshot
2953 let worktree_info = cx.update(|app_cx| {
2954 let worktree = worktree.read(app_cx);
2955 let path = worktree.abs_path().to_string_lossy().to_string();
2956 let snapshot = worktree.snapshot();
2957 (path, snapshot)
2958 });
2959
2960 let Ok((worktree_path, _snapshot)) = worktree_info else {
2961 return WorktreeSnapshot {
2962 worktree_path: String::new(),
2963 git_state: None,
2964 };
2965 };
2966
2967 let git_state = git_store
2968 .update(cx, |git_store, cx| {
2969 git_store
2970 .repositories()
2971 .values()
2972 .find(|repo| {
2973 repo.read(cx)
2974 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
2975 .is_some()
2976 })
2977 .cloned()
2978 })
2979 .ok()
2980 .flatten()
2981 .map(|repo| {
2982 repo.update(cx, |repo, _| {
2983 let current_branch =
2984 repo.branch.as_ref().map(|branch| branch.name().to_owned());
2985 repo.send_job(None, |state, _| async move {
2986 let RepositoryState::Local { backend, .. } = state else {
2987 return GitState {
2988 remote_url: None,
2989 head_sha: None,
2990 current_branch,
2991 diff: None,
2992 };
2993 };
2994
2995 let remote_url = backend.remote_url("origin");
2996 let head_sha = backend.head_sha().await;
2997 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
2998
2999 GitState {
3000 remote_url,
3001 head_sha,
3002 current_branch,
3003 diff,
3004 }
3005 })
3006 })
3007 });
3008
3009 let git_state = match git_state {
3010 Some(git_state) => match git_state.ok() {
3011 Some(git_state) => git_state.await.ok(),
3012 None => None,
3013 },
3014 None => None,
3015 };
3016
3017 WorktreeSnapshot {
3018 worktree_path,
3019 git_state,
3020 }
3021 })
3022 }
3023
3024 pub fn to_markdown(&self, cx: &App) -> Result<String> {
3025 let mut markdown = Vec::new();
3026
3027 let summary = self.summary().or_default();
3028 writeln!(markdown, "# {summary}\n")?;
3029
3030 for message in self.messages() {
3031 writeln!(
3032 markdown,
3033 "## {role}\n",
3034 role = match message.role {
3035 Role::User => "User",
3036 Role::Assistant => "Agent",
3037 Role::System => "System",
3038 }
3039 )?;
3040
3041 if !message.loaded_context.text.is_empty() {
3042 writeln!(markdown, "{}", message.loaded_context.text)?;
3043 }
3044
3045 if !message.loaded_context.images.is_empty() {
3046 writeln!(
3047 markdown,
3048 "\n{} images attached as context.\n",
3049 message.loaded_context.images.len()
3050 )?;
3051 }
3052
3053 for segment in &message.segments {
3054 match segment {
3055 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
3056 MessageSegment::Thinking { text, .. } => {
3057 writeln!(markdown, "<think>\n{}\n</think>\n", text)?
3058 }
3059 MessageSegment::RedactedThinking(_) => {}
3060 }
3061 }
3062
3063 for tool_use in self.tool_uses_for_message(message.id, cx) {
3064 writeln!(
3065 markdown,
3066 "**Use Tool: {} ({})**",
3067 tool_use.name, tool_use.id
3068 )?;
3069 writeln!(markdown, "```json")?;
3070 writeln!(
3071 markdown,
3072 "{}",
3073 serde_json::to_string_pretty(&tool_use.input)?
3074 )?;
3075 writeln!(markdown, "```")?;
3076 }
3077
3078 for tool_result in self.tool_results_for_message(message.id) {
3079 write!(markdown, "\n**Tool Results: {}", tool_result.tool_use_id)?;
3080 if tool_result.is_error {
3081 write!(markdown, " (Error)")?;
3082 }
3083
3084 writeln!(markdown, "**\n")?;
3085 match &tool_result.content {
3086 LanguageModelToolResultContent::Text(text) => {
3087 writeln!(markdown, "{text}")?;
3088 }
3089 LanguageModelToolResultContent::Image(image) => {
3090 writeln!(markdown, "", image.source)?;
3091 }
3092 }
3093
3094 if let Some(output) = tool_result.output.as_ref() {
3095 writeln!(
3096 markdown,
3097 "\n\nDebug Output:\n\n```json\n{}\n```\n",
3098 serde_json::to_string_pretty(output)?
3099 )?;
3100 }
3101 }
3102 }
3103
3104 Ok(String::from_utf8_lossy(&markdown).to_string())
3105 }
3106
3107 pub fn keep_edits_in_range(
3108 &mut self,
3109 buffer: Entity<language::Buffer>,
3110 buffer_range: Range<language::Anchor>,
3111 cx: &mut Context<Self>,
3112 ) {
3113 self.action_log.update(cx, |action_log, cx| {
3114 action_log.keep_edits_in_range(buffer, buffer_range, cx)
3115 });
3116 }
3117
3118 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
3119 self.action_log
3120 .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
3121 }
3122
3123 pub fn reject_edits_in_ranges(
3124 &mut self,
3125 buffer: Entity<language::Buffer>,
3126 buffer_ranges: Vec<Range<language::Anchor>>,
3127 cx: &mut Context<Self>,
3128 ) -> Task<Result<()>> {
3129 self.action_log.update(cx, |action_log, cx| {
3130 action_log.reject_edits_in_ranges(buffer, buffer_ranges, cx)
3131 })
3132 }
3133
3134 pub fn action_log(&self) -> &Entity<ActionLog> {
3135 &self.action_log
3136 }
3137
3138 pub fn project(&self) -> &Entity<Project> {
3139 &self.project
3140 }
3141
3142 pub fn auto_capture_telemetry(&mut self, cx: &mut Context<Self>) {
3143 if !cx.has_flag::<feature_flags::ThreadAutoCaptureFeatureFlag>() {
3144 return;
3145 }
3146
3147 let now = Instant::now();
3148 if let Some(last) = self.last_auto_capture_at {
3149 if now.duration_since(last).as_secs() < 10 {
3150 return;
3151 }
3152 }
3153
3154 self.last_auto_capture_at = Some(now);
3155
3156 let thread_id = self.id().clone();
3157 let github_login = self
3158 .project
3159 .read(cx)
3160 .user_store()
3161 .read(cx)
3162 .current_user()
3163 .map(|user| user.github_login.clone());
3164 let client = self.project.read(cx).client();
3165 let serialize_task = self.serialize(cx);
3166
3167 cx.background_executor()
3168 .spawn(async move {
3169 if let Ok(serialized_thread) = serialize_task.await {
3170 if let Ok(thread_data) = serde_json::to_value(serialized_thread) {
3171 telemetry::event!(
3172 "Agent Thread Auto-Captured",
3173 thread_id = thread_id.to_string(),
3174 thread_data = thread_data,
3175 auto_capture_reason = "tracked_user",
3176 github_login = github_login
3177 );
3178
3179 client.telemetry().flush_events().await;
3180 }
3181 }
3182 })
3183 .detach();
3184 }
3185
3186 pub fn cumulative_token_usage(&self) -> TokenUsage {
3187 self.cumulative_token_usage
3188 }
3189
3190 pub fn token_usage_up_to_message(&self, message_id: MessageId) -> TotalTokenUsage {
3191 let Some(model) = self.configured_model.as_ref() else {
3192 return TotalTokenUsage::default();
3193 };
3194
3195 let max = model
3196 .model
3197 .max_token_count_for_mode(self.completion_mode().into());
3198
3199 let index = self
3200 .messages
3201 .iter()
3202 .position(|msg| msg.id == message_id)
3203 .unwrap_or(0);
3204
3205 if index == 0 {
3206 return TotalTokenUsage { total: 0, max };
3207 }
3208
3209 let token_usage = &self
3210 .request_token_usage
3211 .get(index - 1)
3212 .cloned()
3213 .unwrap_or_default();
3214
3215 TotalTokenUsage {
3216 total: token_usage.total_tokens(),
3217 max,
3218 }
3219 }
3220
3221 pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
3222 let model = self.configured_model.as_ref()?;
3223
3224 let max = model
3225 .model
3226 .max_token_count_for_mode(self.completion_mode().into());
3227
3228 if let Some(exceeded_error) = &self.exceeded_window_error {
3229 if model.model.id() == exceeded_error.model_id {
3230 return Some(TotalTokenUsage {
3231 total: exceeded_error.token_count,
3232 max,
3233 });
3234 }
3235 }
3236
3237 let total = self
3238 .token_usage_at_last_message()
3239 .unwrap_or_default()
3240 .total_tokens();
3241
3242 Some(TotalTokenUsage { total, max })
3243 }
3244
3245 fn token_usage_at_last_message(&self) -> Option<TokenUsage> {
3246 self.request_token_usage
3247 .get(self.messages.len().saturating_sub(1))
3248 .or_else(|| self.request_token_usage.last())
3249 .cloned()
3250 }
3251
3252 fn update_token_usage_at_last_message(&mut self, token_usage: TokenUsage) {
3253 let placeholder = self.token_usage_at_last_message().unwrap_or_default();
3254 self.request_token_usage
3255 .resize(self.messages.len(), placeholder);
3256
3257 if let Some(last) = self.request_token_usage.last_mut() {
3258 *last = token_usage;
3259 }
3260 }
3261
3262 fn update_model_request_usage(&self, amount: u32, limit: UsageLimit, cx: &mut Context<Self>) {
3263 self.cloud_user_store.update(cx, |cloud_user_store, cx| {
3264 cloud_user_store.update_model_request_usage(
3265 ModelRequestUsage(RequestUsage {
3266 amount: amount as i32,
3267 limit,
3268 }),
3269 cx,
3270 )
3271 });
3272 }
3273
3274 pub fn deny_tool_use(
3275 &mut self,
3276 tool_use_id: LanguageModelToolUseId,
3277 tool_name: Arc<str>,
3278 window: Option<AnyWindowHandle>,
3279 cx: &mut Context<Self>,
3280 ) {
3281 let err = Err(anyhow::anyhow!(
3282 "Permission to run tool action denied by user"
3283 ));
3284
3285 self.tool_use.insert_tool_output(
3286 tool_use_id.clone(),
3287 tool_name,
3288 err,
3289 self.configured_model.as_ref(),
3290 self.completion_mode,
3291 );
3292 self.tool_finished(tool_use_id.clone(), None, true, window, cx);
3293 }
3294}
3295
3296#[derive(Debug, Clone, Error)]
3297pub enum ThreadError {
3298 #[error("Payment required")]
3299 PaymentRequired,
3300 #[error("Model request limit reached")]
3301 ModelRequestLimitReached { plan: Plan },
3302 #[error("Message {header}: {message}")]
3303 Message {
3304 header: SharedString,
3305 message: SharedString,
3306 },
3307 #[error("Retryable error: {message}")]
3308 RetryableError {
3309 message: SharedString,
3310 can_enable_burn_mode: bool,
3311 },
3312}
3313
3314#[derive(Debug, Clone)]
3315pub enum ThreadEvent {
3316 ShowError(ThreadError),
3317 StreamedCompletion,
3318 ReceivedTextChunk,
3319 NewRequest,
3320 StreamedAssistantText(MessageId, String),
3321 StreamedAssistantThinking(MessageId, String),
3322 StreamedToolUse {
3323 tool_use_id: LanguageModelToolUseId,
3324 ui_text: Arc<str>,
3325 input: serde_json::Value,
3326 },
3327 MissingToolUse {
3328 tool_use_id: LanguageModelToolUseId,
3329 ui_text: Arc<str>,
3330 },
3331 InvalidToolInput {
3332 tool_use_id: LanguageModelToolUseId,
3333 ui_text: Arc<str>,
3334 invalid_input_json: Arc<str>,
3335 },
3336 Stopped(Result<StopReason, Arc<anyhow::Error>>),
3337 MessageAdded(MessageId),
3338 MessageEdited(MessageId),
3339 MessageDeleted(MessageId),
3340 SummaryGenerated,
3341 SummaryChanged,
3342 UsePendingTools {
3343 tool_uses: Vec<PendingToolUse>,
3344 },
3345 ToolFinished {
3346 #[allow(unused)]
3347 tool_use_id: LanguageModelToolUseId,
3348 /// The pending tool use that corresponds to this tool.
3349 pending_tool_use: Option<PendingToolUse>,
3350 },
3351 CheckpointChanged,
3352 ToolConfirmationNeeded,
3353 ToolUseLimitReached,
3354 CancelEditing,
3355 CompletionCanceled,
3356 ProfileChanged,
3357}
3358
3359impl EventEmitter<ThreadEvent> for Thread {}
3360
3361struct PendingCompletion {
3362 id: usize,
3363 queue_state: QueueState,
3364 _task: Task<()>,
3365}
3366
3367#[cfg(test)]
3368mod tests {
3369 use super::*;
3370 use crate::{
3371 context::load_context, context_store::ContextStore, thread_store, thread_store::ThreadStore,
3372 };
3373
3374 // Test-specific constants
3375 const TEST_RATE_LIMIT_RETRY_SECS: u64 = 30;
3376 use agent_settings::{AgentProfileId, AgentSettings, LanguageModelParameters};
3377 use assistant_tool::ToolRegistry;
3378 use assistant_tools;
3379 use futures::StreamExt;
3380 use futures::future::BoxFuture;
3381 use futures::stream::BoxStream;
3382 use gpui::TestAppContext;
3383 use http_client;
3384 use language_model::fake_provider::{FakeLanguageModel, FakeLanguageModelProvider};
3385 use language_model::{
3386 LanguageModelCompletionError, LanguageModelName, LanguageModelProviderId,
3387 LanguageModelProviderName, LanguageModelToolChoice,
3388 };
3389 use parking_lot::Mutex;
3390 use project::{FakeFs, Project};
3391 use prompt_store::PromptBuilder;
3392 use serde_json::json;
3393 use settings::{Settings, SettingsStore};
3394 use std::sync::Arc;
3395 use std::time::Duration;
3396 use theme::ThemeSettings;
3397 use util::path;
3398 use workspace::Workspace;
3399
3400 #[gpui::test]
3401 async fn test_message_with_context(cx: &mut TestAppContext) {
3402 init_test_settings(cx);
3403
3404 let project = create_test_project(
3405 cx,
3406 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3407 )
3408 .await;
3409
3410 let (_workspace, _thread_store, thread, context_store, model) =
3411 setup_test_environment(cx, project.clone()).await;
3412
3413 add_file_to_context(&project, &context_store, "test/code.rs", cx)
3414 .await
3415 .unwrap();
3416
3417 let context =
3418 context_store.read_with(cx, |store, _| store.context().next().cloned().unwrap());
3419 let loaded_context = cx
3420 .update(|cx| load_context(vec![context], &project, &None, cx))
3421 .await;
3422
3423 // Insert user message with context
3424 let message_id = thread.update(cx, |thread, cx| {
3425 thread.insert_user_message(
3426 "Please explain this code",
3427 loaded_context,
3428 None,
3429 Vec::new(),
3430 cx,
3431 )
3432 });
3433
3434 // Check content and context in message object
3435 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3436
3437 // Use different path format strings based on platform for the test
3438 #[cfg(windows)]
3439 let path_part = r"test\code.rs";
3440 #[cfg(not(windows))]
3441 let path_part = "test/code.rs";
3442
3443 let expected_context = format!(
3444 r#"
3445<context>
3446The following items were attached by the user. They are up-to-date and don't need to be re-read.
3447
3448<files>
3449```rs {path_part}
3450fn main() {{
3451 println!("Hello, world!");
3452}}
3453```
3454</files>
3455</context>
3456"#
3457 );
3458
3459 assert_eq!(message.role, Role::User);
3460 assert_eq!(message.segments.len(), 1);
3461 assert_eq!(
3462 message.segments[0],
3463 MessageSegment::Text("Please explain this code".to_string())
3464 );
3465 assert_eq!(message.loaded_context.text, expected_context);
3466
3467 // Check message in request
3468 let request = thread.update(cx, |thread, cx| {
3469 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3470 });
3471
3472 assert_eq!(request.messages.len(), 2);
3473 let expected_full_message = format!("{}Please explain this code", expected_context);
3474 assert_eq!(request.messages[1].string_contents(), expected_full_message);
3475 }
3476
3477 #[gpui::test]
3478 async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
3479 init_test_settings(cx);
3480
3481 let project = create_test_project(
3482 cx,
3483 json!({
3484 "file1.rs": "fn function1() {}\n",
3485 "file2.rs": "fn function2() {}\n",
3486 "file3.rs": "fn function3() {}\n",
3487 "file4.rs": "fn function4() {}\n",
3488 }),
3489 )
3490 .await;
3491
3492 let (_, _thread_store, thread, context_store, model) =
3493 setup_test_environment(cx, project.clone()).await;
3494
3495 // First message with context 1
3496 add_file_to_context(&project, &context_store, "test/file1.rs", cx)
3497 .await
3498 .unwrap();
3499 let new_contexts = context_store.update(cx, |store, cx| {
3500 store.new_context_for_thread(thread.read(cx), None)
3501 });
3502 assert_eq!(new_contexts.len(), 1);
3503 let loaded_context = cx
3504 .update(|cx| load_context(new_contexts, &project, &None, cx))
3505 .await;
3506 let message1_id = thread.update(cx, |thread, cx| {
3507 thread.insert_user_message("Message 1", loaded_context, None, Vec::new(), cx)
3508 });
3509
3510 // Second message with contexts 1 and 2 (context 1 should be skipped as it's already included)
3511 add_file_to_context(&project, &context_store, "test/file2.rs", cx)
3512 .await
3513 .unwrap();
3514 let new_contexts = context_store.update(cx, |store, cx| {
3515 store.new_context_for_thread(thread.read(cx), None)
3516 });
3517 assert_eq!(new_contexts.len(), 1);
3518 let loaded_context = cx
3519 .update(|cx| load_context(new_contexts, &project, &None, cx))
3520 .await;
3521 let message2_id = thread.update(cx, |thread, cx| {
3522 thread.insert_user_message("Message 2", loaded_context, None, Vec::new(), cx)
3523 });
3524
3525 // Third message with all three contexts (contexts 1 and 2 should be skipped)
3526 //
3527 add_file_to_context(&project, &context_store, "test/file3.rs", cx)
3528 .await
3529 .unwrap();
3530 let new_contexts = context_store.update(cx, |store, cx| {
3531 store.new_context_for_thread(thread.read(cx), None)
3532 });
3533 assert_eq!(new_contexts.len(), 1);
3534 let loaded_context = cx
3535 .update(|cx| load_context(new_contexts, &project, &None, cx))
3536 .await;
3537 let message3_id = thread.update(cx, |thread, cx| {
3538 thread.insert_user_message("Message 3", loaded_context, None, Vec::new(), cx)
3539 });
3540
3541 // Check what contexts are included in each message
3542 let (message1, message2, message3) = thread.read_with(cx, |thread, _| {
3543 (
3544 thread.message(message1_id).unwrap().clone(),
3545 thread.message(message2_id).unwrap().clone(),
3546 thread.message(message3_id).unwrap().clone(),
3547 )
3548 });
3549
3550 // First message should include context 1
3551 assert!(message1.loaded_context.text.contains("file1.rs"));
3552
3553 // Second message should include only context 2 (not 1)
3554 assert!(!message2.loaded_context.text.contains("file1.rs"));
3555 assert!(message2.loaded_context.text.contains("file2.rs"));
3556
3557 // Third message should include only context 3 (not 1 or 2)
3558 assert!(!message3.loaded_context.text.contains("file1.rs"));
3559 assert!(!message3.loaded_context.text.contains("file2.rs"));
3560 assert!(message3.loaded_context.text.contains("file3.rs"));
3561
3562 // Check entire request to make sure all contexts are properly included
3563 let request = thread.update(cx, |thread, cx| {
3564 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3565 });
3566
3567 // The request should contain all 3 messages
3568 assert_eq!(request.messages.len(), 4);
3569
3570 // Check that the contexts are properly formatted in each message
3571 assert!(request.messages[1].string_contents().contains("file1.rs"));
3572 assert!(!request.messages[1].string_contents().contains("file2.rs"));
3573 assert!(!request.messages[1].string_contents().contains("file3.rs"));
3574
3575 assert!(!request.messages[2].string_contents().contains("file1.rs"));
3576 assert!(request.messages[2].string_contents().contains("file2.rs"));
3577 assert!(!request.messages[2].string_contents().contains("file3.rs"));
3578
3579 assert!(!request.messages[3].string_contents().contains("file1.rs"));
3580 assert!(!request.messages[3].string_contents().contains("file2.rs"));
3581 assert!(request.messages[3].string_contents().contains("file3.rs"));
3582
3583 add_file_to_context(&project, &context_store, "test/file4.rs", cx)
3584 .await
3585 .unwrap();
3586 let new_contexts = context_store.update(cx, |store, cx| {
3587 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3588 });
3589 assert_eq!(new_contexts.len(), 3);
3590 let loaded_context = cx
3591 .update(|cx| load_context(new_contexts, &project, &None, cx))
3592 .await
3593 .loaded_context;
3594
3595 assert!(!loaded_context.text.contains("file1.rs"));
3596 assert!(loaded_context.text.contains("file2.rs"));
3597 assert!(loaded_context.text.contains("file3.rs"));
3598 assert!(loaded_context.text.contains("file4.rs"));
3599
3600 let new_contexts = context_store.update(cx, |store, cx| {
3601 // Remove file4.rs
3602 store.remove_context(&loaded_context.contexts[2].handle(), cx);
3603 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3604 });
3605 assert_eq!(new_contexts.len(), 2);
3606 let loaded_context = cx
3607 .update(|cx| load_context(new_contexts, &project, &None, cx))
3608 .await
3609 .loaded_context;
3610
3611 assert!(!loaded_context.text.contains("file1.rs"));
3612 assert!(loaded_context.text.contains("file2.rs"));
3613 assert!(loaded_context.text.contains("file3.rs"));
3614 assert!(!loaded_context.text.contains("file4.rs"));
3615
3616 let new_contexts = context_store.update(cx, |store, cx| {
3617 // Remove file3.rs
3618 store.remove_context(&loaded_context.contexts[1].handle(), cx);
3619 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3620 });
3621 assert_eq!(new_contexts.len(), 1);
3622 let loaded_context = cx
3623 .update(|cx| load_context(new_contexts, &project, &None, cx))
3624 .await
3625 .loaded_context;
3626
3627 assert!(!loaded_context.text.contains("file1.rs"));
3628 assert!(loaded_context.text.contains("file2.rs"));
3629 assert!(!loaded_context.text.contains("file3.rs"));
3630 assert!(!loaded_context.text.contains("file4.rs"));
3631 }
3632
3633 #[gpui::test]
3634 async fn test_message_without_files(cx: &mut TestAppContext) {
3635 init_test_settings(cx);
3636
3637 let project = create_test_project(
3638 cx,
3639 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3640 )
3641 .await;
3642
3643 let (_, _thread_store, thread, _context_store, model) =
3644 setup_test_environment(cx, project.clone()).await;
3645
3646 // Insert user message without any context (empty context vector)
3647 let message_id = thread.update(cx, |thread, cx| {
3648 thread.insert_user_message(
3649 "What is the best way to learn Rust?",
3650 ContextLoadResult::default(),
3651 None,
3652 Vec::new(),
3653 cx,
3654 )
3655 });
3656
3657 // Check content and context in message object
3658 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3659
3660 // Context should be empty when no files are included
3661 assert_eq!(message.role, Role::User);
3662 assert_eq!(message.segments.len(), 1);
3663 assert_eq!(
3664 message.segments[0],
3665 MessageSegment::Text("What is the best way to learn Rust?".to_string())
3666 );
3667 assert_eq!(message.loaded_context.text, "");
3668
3669 // Check message in request
3670 let request = thread.update(cx, |thread, cx| {
3671 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3672 });
3673
3674 assert_eq!(request.messages.len(), 2);
3675 assert_eq!(
3676 request.messages[1].string_contents(),
3677 "What is the best way to learn Rust?"
3678 );
3679
3680 // Add second message, also without context
3681 let message2_id = thread.update(cx, |thread, cx| {
3682 thread.insert_user_message(
3683 "Are there any good books?",
3684 ContextLoadResult::default(),
3685 None,
3686 Vec::new(),
3687 cx,
3688 )
3689 });
3690
3691 let message2 =
3692 thread.read_with(cx, |thread, _| thread.message(message2_id).unwrap().clone());
3693 assert_eq!(message2.loaded_context.text, "");
3694
3695 // Check that both messages appear in the request
3696 let request = thread.update(cx, |thread, cx| {
3697 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3698 });
3699
3700 assert_eq!(request.messages.len(), 3);
3701 assert_eq!(
3702 request.messages[1].string_contents(),
3703 "What is the best way to learn Rust?"
3704 );
3705 assert_eq!(
3706 request.messages[2].string_contents(),
3707 "Are there any good books?"
3708 );
3709 }
3710
3711 #[gpui::test]
3712 #[ignore] // turn this test on when project_notifications tool is re-enabled
3713 async fn test_stale_buffer_notification(cx: &mut TestAppContext) {
3714 init_test_settings(cx);
3715
3716 let project = create_test_project(
3717 cx,
3718 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3719 )
3720 .await;
3721
3722 let (_workspace, _thread_store, thread, context_store, model) =
3723 setup_test_environment(cx, project.clone()).await;
3724
3725 // Add a buffer to the context. This will be a tracked buffer
3726 let buffer = add_file_to_context(&project, &context_store, "test/code.rs", cx)
3727 .await
3728 .unwrap();
3729
3730 let context = context_store
3731 .read_with(cx, |store, _| store.context().next().cloned())
3732 .unwrap();
3733 let loaded_context = cx
3734 .update(|cx| load_context(vec![context], &project, &None, cx))
3735 .await;
3736
3737 // Insert user message and assistant response
3738 thread.update(cx, |thread, cx| {
3739 thread.insert_user_message("Explain this code", loaded_context, None, Vec::new(), cx);
3740 thread.insert_assistant_message(
3741 vec![MessageSegment::Text("This code prints 42.".into())],
3742 cx,
3743 );
3744 });
3745 cx.run_until_parked();
3746
3747 // We shouldn't have a stale buffer notification yet
3748 let notifications = thread.read_with(cx, |thread, _| {
3749 find_tool_uses(thread, "project_notifications")
3750 });
3751 assert!(
3752 notifications.is_empty(),
3753 "Should not have stale buffer notification before buffer is modified"
3754 );
3755
3756 // Modify the buffer
3757 buffer.update(cx, |buffer, cx| {
3758 buffer.edit(
3759 [(1..1, "\n println!(\"Added a new line\");\n")],
3760 None,
3761 cx,
3762 );
3763 });
3764
3765 // Insert another user message
3766 thread.update(cx, |thread, cx| {
3767 thread.insert_user_message(
3768 "What does the code do now?",
3769 ContextLoadResult::default(),
3770 None,
3771 Vec::new(),
3772 cx,
3773 )
3774 });
3775 cx.run_until_parked();
3776
3777 // Check for the stale buffer warning
3778 thread.update(cx, |thread, cx| {
3779 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3780 });
3781 cx.run_until_parked();
3782
3783 let notifications = thread.read_with(cx, |thread, _cx| {
3784 find_tool_uses(thread, "project_notifications")
3785 });
3786
3787 let [notification] = notifications.as_slice() else {
3788 panic!("Should have a `project_notifications` tool use");
3789 };
3790
3791 let Some(notification_content) = notification.content.to_str() else {
3792 panic!("`project_notifications` should return text");
3793 };
3794
3795 assert!(notification_content.contains("These files have changed since the last read:"));
3796 assert!(notification_content.contains("code.rs"));
3797
3798 // Insert another user message and flush notifications again
3799 thread.update(cx, |thread, cx| {
3800 thread.insert_user_message(
3801 "Can you tell me more?",
3802 ContextLoadResult::default(),
3803 None,
3804 Vec::new(),
3805 cx,
3806 )
3807 });
3808
3809 thread.update(cx, |thread, cx| {
3810 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3811 });
3812 cx.run_until_parked();
3813
3814 // There should be no new notifications (we already flushed one)
3815 let notifications = thread.read_with(cx, |thread, _cx| {
3816 find_tool_uses(thread, "project_notifications")
3817 });
3818
3819 assert_eq!(
3820 notifications.len(),
3821 1,
3822 "Should still have only one notification after second flush - no duplicates"
3823 );
3824 }
3825
3826 fn find_tool_uses(thread: &Thread, tool_name: &str) -> Vec<LanguageModelToolResult> {
3827 thread
3828 .messages()
3829 .flat_map(|message| {
3830 thread
3831 .tool_results_for_message(message.id)
3832 .into_iter()
3833 .filter(|result| result.tool_name == tool_name.into())
3834 .cloned()
3835 .collect::<Vec<_>>()
3836 })
3837 .collect()
3838 }
3839
3840 #[gpui::test]
3841 async fn test_storing_profile_setting_per_thread(cx: &mut TestAppContext) {
3842 init_test_settings(cx);
3843
3844 let project = create_test_project(
3845 cx,
3846 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3847 )
3848 .await;
3849
3850 let (_workspace, thread_store, thread, _context_store, _model) =
3851 setup_test_environment(cx, project.clone()).await;
3852
3853 // Check that we are starting with the default profile
3854 let profile = cx.read(|cx| thread.read(cx).profile.clone());
3855 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3856 assert_eq!(
3857 profile,
3858 AgentProfile::new(AgentProfileId::default(), tool_set)
3859 );
3860 }
3861
3862 #[gpui::test]
3863 async fn test_serializing_thread_profile(cx: &mut TestAppContext) {
3864 init_test_settings(cx);
3865
3866 let project = create_test_project(
3867 cx,
3868 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3869 )
3870 .await;
3871
3872 let (_workspace, thread_store, thread, _context_store, _model) =
3873 setup_test_environment(cx, project.clone()).await;
3874
3875 // Profile gets serialized with default values
3876 let serialized = thread
3877 .update(cx, |thread, cx| thread.serialize(cx))
3878 .await
3879 .unwrap();
3880
3881 assert_eq!(serialized.profile, Some(AgentProfileId::default()));
3882
3883 let deserialized = cx.update(|cx| {
3884 thread.update(cx, |thread, cx| {
3885 Thread::deserialize(
3886 thread.id.clone(),
3887 serialized,
3888 thread.project.clone(),
3889 thread.cloud_user_store.clone(),
3890 thread.tools.clone(),
3891 thread.prompt_builder.clone(),
3892 thread.project_context.clone(),
3893 None,
3894 cx,
3895 )
3896 })
3897 });
3898 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3899
3900 assert_eq!(
3901 deserialized.profile,
3902 AgentProfile::new(AgentProfileId::default(), tool_set)
3903 );
3904 }
3905
3906 #[gpui::test]
3907 async fn test_temperature_setting(cx: &mut TestAppContext) {
3908 init_test_settings(cx);
3909
3910 let project = create_test_project(
3911 cx,
3912 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3913 )
3914 .await;
3915
3916 let (_workspace, _thread_store, thread, _context_store, model) =
3917 setup_test_environment(cx, project.clone()).await;
3918
3919 // Both model and provider
3920 cx.update(|cx| {
3921 AgentSettings::override_global(
3922 AgentSettings {
3923 model_parameters: vec![LanguageModelParameters {
3924 provider: Some(model.provider_id().0.to_string().into()),
3925 model: Some(model.id().0.clone()),
3926 temperature: Some(0.66),
3927 }],
3928 ..AgentSettings::get_global(cx).clone()
3929 },
3930 cx,
3931 );
3932 });
3933
3934 let request = thread.update(cx, |thread, cx| {
3935 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3936 });
3937 assert_eq!(request.temperature, Some(0.66));
3938
3939 // Only model
3940 cx.update(|cx| {
3941 AgentSettings::override_global(
3942 AgentSettings {
3943 model_parameters: vec![LanguageModelParameters {
3944 provider: None,
3945 model: Some(model.id().0.clone()),
3946 temperature: Some(0.66),
3947 }],
3948 ..AgentSettings::get_global(cx).clone()
3949 },
3950 cx,
3951 );
3952 });
3953
3954 let request = thread.update(cx, |thread, cx| {
3955 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3956 });
3957 assert_eq!(request.temperature, Some(0.66));
3958
3959 // Only provider
3960 cx.update(|cx| {
3961 AgentSettings::override_global(
3962 AgentSettings {
3963 model_parameters: vec![LanguageModelParameters {
3964 provider: Some(model.provider_id().0.to_string().into()),
3965 model: None,
3966 temperature: Some(0.66),
3967 }],
3968 ..AgentSettings::get_global(cx).clone()
3969 },
3970 cx,
3971 );
3972 });
3973
3974 let request = thread.update(cx, |thread, cx| {
3975 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3976 });
3977 assert_eq!(request.temperature, Some(0.66));
3978
3979 // Same model name, different provider
3980 cx.update(|cx| {
3981 AgentSettings::override_global(
3982 AgentSettings {
3983 model_parameters: vec![LanguageModelParameters {
3984 provider: Some("anthropic".into()),
3985 model: Some(model.id().0.clone()),
3986 temperature: Some(0.66),
3987 }],
3988 ..AgentSettings::get_global(cx).clone()
3989 },
3990 cx,
3991 );
3992 });
3993
3994 let request = thread.update(cx, |thread, cx| {
3995 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3996 });
3997 assert_eq!(request.temperature, None);
3998 }
3999
4000 #[gpui::test]
4001 async fn test_thread_summary(cx: &mut TestAppContext) {
4002 init_test_settings(cx);
4003
4004 let project = create_test_project(cx, json!({})).await;
4005
4006 let (_, _thread_store, thread, _context_store, model) =
4007 setup_test_environment(cx, project.clone()).await;
4008
4009 // Initial state should be pending
4010 thread.read_with(cx, |thread, _| {
4011 assert!(matches!(thread.summary(), ThreadSummary::Pending));
4012 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4013 });
4014
4015 // Manually setting the summary should not be allowed in this state
4016 thread.update(cx, |thread, cx| {
4017 thread.set_summary("This should not work", cx);
4018 });
4019
4020 thread.read_with(cx, |thread, _| {
4021 assert!(matches!(thread.summary(), ThreadSummary::Pending));
4022 });
4023
4024 // Send a message
4025 thread.update(cx, |thread, cx| {
4026 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
4027 thread.send_to_model(
4028 model.clone(),
4029 CompletionIntent::ThreadSummarization,
4030 None,
4031 cx,
4032 );
4033 });
4034
4035 let fake_model = model.as_fake();
4036 simulate_successful_response(&fake_model, cx);
4037
4038 // Should start generating summary when there are >= 2 messages
4039 thread.read_with(cx, |thread, _| {
4040 assert_eq!(*thread.summary(), ThreadSummary::Generating);
4041 });
4042
4043 // Should not be able to set the summary while generating
4044 thread.update(cx, |thread, cx| {
4045 thread.set_summary("This should not work either", cx);
4046 });
4047
4048 thread.read_with(cx, |thread, _| {
4049 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4050 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4051 });
4052
4053 cx.run_until_parked();
4054 fake_model.stream_last_completion_response("Brief");
4055 fake_model.stream_last_completion_response(" Introduction");
4056 fake_model.end_last_completion_stream();
4057 cx.run_until_parked();
4058
4059 // Summary should be set
4060 thread.read_with(cx, |thread, _| {
4061 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4062 assert_eq!(thread.summary().or_default(), "Brief Introduction");
4063 });
4064
4065 // Now we should be able to set a summary
4066 thread.update(cx, |thread, cx| {
4067 thread.set_summary("Brief Intro", cx);
4068 });
4069
4070 thread.read_with(cx, |thread, _| {
4071 assert_eq!(thread.summary().or_default(), "Brief Intro");
4072 });
4073
4074 // Test setting an empty summary (should default to DEFAULT)
4075 thread.update(cx, |thread, cx| {
4076 thread.set_summary("", cx);
4077 });
4078
4079 thread.read_with(cx, |thread, _| {
4080 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4081 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
4082 });
4083 }
4084
4085 #[gpui::test]
4086 async fn test_thread_summary_error_set_manually(cx: &mut TestAppContext) {
4087 init_test_settings(cx);
4088
4089 let project = create_test_project(cx, json!({})).await;
4090
4091 let (_, _thread_store, thread, _context_store, model) =
4092 setup_test_environment(cx, project.clone()).await;
4093
4094 test_summarize_error(&model, &thread, cx);
4095
4096 // Now we should be able to set a summary
4097 thread.update(cx, |thread, cx| {
4098 thread.set_summary("Brief Intro", cx);
4099 });
4100
4101 thread.read_with(cx, |thread, _| {
4102 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4103 assert_eq!(thread.summary().or_default(), "Brief Intro");
4104 });
4105 }
4106
4107 #[gpui::test]
4108 async fn test_thread_summary_error_retry(cx: &mut TestAppContext) {
4109 init_test_settings(cx);
4110
4111 let project = create_test_project(cx, json!({})).await;
4112
4113 let (_, _thread_store, thread, _context_store, model) =
4114 setup_test_environment(cx, project.clone()).await;
4115
4116 test_summarize_error(&model, &thread, cx);
4117
4118 // Sending another message should not trigger another summarize request
4119 thread.update(cx, |thread, cx| {
4120 thread.insert_user_message(
4121 "How are you?",
4122 ContextLoadResult::default(),
4123 None,
4124 vec![],
4125 cx,
4126 );
4127 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4128 });
4129
4130 let fake_model = model.as_fake();
4131 simulate_successful_response(&fake_model, cx);
4132
4133 thread.read_with(cx, |thread, _| {
4134 // State is still Error, not Generating
4135 assert!(matches!(thread.summary(), ThreadSummary::Error));
4136 });
4137
4138 // But the summarize request can be invoked manually
4139 thread.update(cx, |thread, cx| {
4140 thread.summarize(cx);
4141 });
4142
4143 thread.read_with(cx, |thread, _| {
4144 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4145 });
4146
4147 cx.run_until_parked();
4148 fake_model.stream_last_completion_response("A successful summary");
4149 fake_model.end_last_completion_stream();
4150 cx.run_until_parked();
4151
4152 thread.read_with(cx, |thread, _| {
4153 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4154 assert_eq!(thread.summary().or_default(), "A successful summary");
4155 });
4156 }
4157
4158 // Helper to create a model that returns errors
4159 enum TestError {
4160 Overloaded,
4161 InternalServerError,
4162 }
4163
4164 struct ErrorInjector {
4165 inner: Arc<FakeLanguageModel>,
4166 error_type: TestError,
4167 }
4168
4169 impl ErrorInjector {
4170 fn new(error_type: TestError) -> Self {
4171 Self {
4172 inner: Arc::new(FakeLanguageModel::default()),
4173 error_type,
4174 }
4175 }
4176 }
4177
4178 impl LanguageModel for ErrorInjector {
4179 fn id(&self) -> LanguageModelId {
4180 self.inner.id()
4181 }
4182
4183 fn name(&self) -> LanguageModelName {
4184 self.inner.name()
4185 }
4186
4187 fn provider_id(&self) -> LanguageModelProviderId {
4188 self.inner.provider_id()
4189 }
4190
4191 fn provider_name(&self) -> LanguageModelProviderName {
4192 self.inner.provider_name()
4193 }
4194
4195 fn supports_tools(&self) -> bool {
4196 self.inner.supports_tools()
4197 }
4198
4199 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4200 self.inner.supports_tool_choice(choice)
4201 }
4202
4203 fn supports_images(&self) -> bool {
4204 self.inner.supports_images()
4205 }
4206
4207 fn telemetry_id(&self) -> String {
4208 self.inner.telemetry_id()
4209 }
4210
4211 fn max_token_count(&self) -> u64 {
4212 self.inner.max_token_count()
4213 }
4214
4215 fn count_tokens(
4216 &self,
4217 request: LanguageModelRequest,
4218 cx: &App,
4219 ) -> BoxFuture<'static, Result<u64>> {
4220 self.inner.count_tokens(request, cx)
4221 }
4222
4223 fn stream_completion(
4224 &self,
4225 _request: LanguageModelRequest,
4226 _cx: &AsyncApp,
4227 ) -> BoxFuture<
4228 'static,
4229 Result<
4230 BoxStream<
4231 'static,
4232 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4233 >,
4234 LanguageModelCompletionError,
4235 >,
4236 > {
4237 let error = match self.error_type {
4238 TestError::Overloaded => LanguageModelCompletionError::ServerOverloaded {
4239 provider: self.provider_name(),
4240 retry_after: None,
4241 },
4242 TestError::InternalServerError => {
4243 LanguageModelCompletionError::ApiInternalServerError {
4244 provider: self.provider_name(),
4245 message: "I'm a teapot orbiting the sun".to_string(),
4246 }
4247 }
4248 };
4249 async move {
4250 let stream = futures::stream::once(async move { Err(error) });
4251 Ok(stream.boxed())
4252 }
4253 .boxed()
4254 }
4255
4256 fn as_fake(&self) -> &FakeLanguageModel {
4257 &self.inner
4258 }
4259 }
4260
4261 #[gpui::test]
4262 async fn test_retry_on_overloaded_error(cx: &mut TestAppContext) {
4263 init_test_settings(cx);
4264
4265 let project = create_test_project(cx, json!({})).await;
4266 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4267
4268 // Enable Burn Mode to allow retries
4269 thread.update(cx, |thread, _| {
4270 thread.set_completion_mode(CompletionMode::Burn);
4271 });
4272
4273 // Create model that returns overloaded error
4274 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4275
4276 // Insert a user message
4277 thread.update(cx, |thread, cx| {
4278 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4279 });
4280
4281 // Start completion
4282 thread.update(cx, |thread, cx| {
4283 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4284 });
4285
4286 cx.run_until_parked();
4287
4288 thread.read_with(cx, |thread, _| {
4289 assert!(thread.retry_state.is_some(), "Should have retry state");
4290 let retry_state = thread.retry_state.as_ref().unwrap();
4291 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4292 assert_eq!(
4293 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
4294 "Should retry MAX_RETRY_ATTEMPTS times for overloaded errors"
4295 );
4296 });
4297
4298 // Check that a retry message was added
4299 thread.read_with(cx, |thread, _| {
4300 let mut messages = thread.messages();
4301 assert!(
4302 messages.any(|msg| {
4303 msg.role == Role::System
4304 && msg.ui_only
4305 && msg.segments.iter().any(|seg| {
4306 if let MessageSegment::Text(text) = seg {
4307 text.contains("overloaded")
4308 && text
4309 .contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS))
4310 } else {
4311 false
4312 }
4313 })
4314 }),
4315 "Should have added a system retry message"
4316 );
4317 });
4318
4319 let retry_count = thread.update(cx, |thread, _| {
4320 thread
4321 .messages
4322 .iter()
4323 .filter(|m| {
4324 m.ui_only
4325 && m.segments.iter().any(|s| {
4326 if let MessageSegment::Text(text) = s {
4327 text.contains("Retrying") && text.contains("seconds")
4328 } else {
4329 false
4330 }
4331 })
4332 })
4333 .count()
4334 });
4335
4336 assert_eq!(retry_count, 1, "Should have one retry message");
4337 }
4338
4339 #[gpui::test]
4340 async fn test_retry_on_internal_server_error(cx: &mut TestAppContext) {
4341 init_test_settings(cx);
4342
4343 let project = create_test_project(cx, json!({})).await;
4344 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4345
4346 // Enable Burn Mode to allow retries
4347 thread.update(cx, |thread, _| {
4348 thread.set_completion_mode(CompletionMode::Burn);
4349 });
4350
4351 // Create model that returns internal server error
4352 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4353
4354 // Insert a user message
4355 thread.update(cx, |thread, cx| {
4356 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4357 });
4358
4359 // Start completion
4360 thread.update(cx, |thread, cx| {
4361 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4362 });
4363
4364 cx.run_until_parked();
4365
4366 // Check retry state on thread
4367 thread.read_with(cx, |thread, _| {
4368 assert!(thread.retry_state.is_some(), "Should have retry state");
4369 let retry_state = thread.retry_state.as_ref().unwrap();
4370 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4371 assert_eq!(
4372 retry_state.max_attempts, 3,
4373 "Should have correct max attempts"
4374 );
4375 });
4376
4377 // Check that a retry message was added with provider name
4378 thread.read_with(cx, |thread, _| {
4379 let mut messages = thread.messages();
4380 assert!(
4381 messages.any(|msg| {
4382 msg.role == Role::System
4383 && msg.ui_only
4384 && msg.segments.iter().any(|seg| {
4385 if let MessageSegment::Text(text) = seg {
4386 text.contains("internal")
4387 && text.contains("Fake")
4388 && text.contains("Retrying")
4389 && text.contains("attempt 1 of 3")
4390 && text.contains("seconds")
4391 } else {
4392 false
4393 }
4394 })
4395 }),
4396 "Should have added a system retry message with provider name"
4397 );
4398 });
4399
4400 // Count retry messages
4401 let retry_count = thread.update(cx, |thread, _| {
4402 thread
4403 .messages
4404 .iter()
4405 .filter(|m| {
4406 m.ui_only
4407 && m.segments.iter().any(|s| {
4408 if let MessageSegment::Text(text) = s {
4409 text.contains("Retrying") && text.contains("seconds")
4410 } else {
4411 false
4412 }
4413 })
4414 })
4415 .count()
4416 });
4417
4418 assert_eq!(retry_count, 1, "Should have one retry message");
4419 }
4420
4421 #[gpui::test]
4422 async fn test_exponential_backoff_on_retries(cx: &mut TestAppContext) {
4423 init_test_settings(cx);
4424
4425 let project = create_test_project(cx, json!({})).await;
4426 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4427
4428 // Enable Burn Mode to allow retries
4429 thread.update(cx, |thread, _| {
4430 thread.set_completion_mode(CompletionMode::Burn);
4431 });
4432
4433 // Create model that returns internal server error
4434 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4435
4436 // Insert a user message
4437 thread.update(cx, |thread, cx| {
4438 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4439 });
4440
4441 // Track retry events and completion count
4442 // Track completion events
4443 let completion_count = Arc::new(Mutex::new(0));
4444 let completion_count_clone = completion_count.clone();
4445
4446 let _subscription = thread.update(cx, |_, cx| {
4447 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4448 if let ThreadEvent::NewRequest = event {
4449 *completion_count_clone.lock() += 1;
4450 }
4451 })
4452 });
4453
4454 // First attempt
4455 thread.update(cx, |thread, cx| {
4456 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4457 });
4458 cx.run_until_parked();
4459
4460 // Should have scheduled first retry - count retry messages
4461 let retry_count = thread.update(cx, |thread, _| {
4462 thread
4463 .messages
4464 .iter()
4465 .filter(|m| {
4466 m.ui_only
4467 && m.segments.iter().any(|s| {
4468 if let MessageSegment::Text(text) = s {
4469 text.contains("Retrying") && text.contains("seconds")
4470 } else {
4471 false
4472 }
4473 })
4474 })
4475 .count()
4476 });
4477 assert_eq!(retry_count, 1, "Should have scheduled first retry");
4478
4479 // Check retry state
4480 thread.read_with(cx, |thread, _| {
4481 assert!(thread.retry_state.is_some(), "Should have retry state");
4482 let retry_state = thread.retry_state.as_ref().unwrap();
4483 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4484 assert_eq!(
4485 retry_state.max_attempts, 3,
4486 "Internal server errors should retry up to 3 times"
4487 );
4488 });
4489
4490 // Advance clock for first retry
4491 cx.executor().advance_clock(BASE_RETRY_DELAY);
4492 cx.run_until_parked();
4493
4494 // Advance clock for second retry
4495 cx.executor().advance_clock(BASE_RETRY_DELAY);
4496 cx.run_until_parked();
4497
4498 // Advance clock for third retry
4499 cx.executor().advance_clock(BASE_RETRY_DELAY);
4500 cx.run_until_parked();
4501
4502 // Should have completed all retries - count retry messages
4503 let retry_count = thread.update(cx, |thread, _| {
4504 thread
4505 .messages
4506 .iter()
4507 .filter(|m| {
4508 m.ui_only
4509 && m.segments.iter().any(|s| {
4510 if let MessageSegment::Text(text) = s {
4511 text.contains("Retrying") && text.contains("seconds")
4512 } else {
4513 false
4514 }
4515 })
4516 })
4517 .count()
4518 });
4519 assert_eq!(
4520 retry_count, 3,
4521 "Should have 3 retries for internal server errors"
4522 );
4523
4524 // For internal server errors, we retry 3 times and then give up
4525 // Check that retry_state is cleared after all retries
4526 thread.read_with(cx, |thread, _| {
4527 assert!(
4528 thread.retry_state.is_none(),
4529 "Retry state should be cleared after all retries"
4530 );
4531 });
4532
4533 // Verify total attempts (1 initial + 3 retries)
4534 assert_eq!(
4535 *completion_count.lock(),
4536 4,
4537 "Should have attempted once plus 3 retries"
4538 );
4539 }
4540
4541 #[gpui::test]
4542 async fn test_max_retries_exceeded(cx: &mut TestAppContext) {
4543 init_test_settings(cx);
4544
4545 let project = create_test_project(cx, json!({})).await;
4546 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4547
4548 // Enable Burn Mode to allow retries
4549 thread.update(cx, |thread, _| {
4550 thread.set_completion_mode(CompletionMode::Burn);
4551 });
4552
4553 // Create model that returns overloaded error
4554 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4555
4556 // Insert a user message
4557 thread.update(cx, |thread, cx| {
4558 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4559 });
4560
4561 // Track events
4562 let stopped_with_error = Arc::new(Mutex::new(false));
4563 let stopped_with_error_clone = stopped_with_error.clone();
4564
4565 let _subscription = thread.update(cx, |_, cx| {
4566 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4567 if let ThreadEvent::Stopped(Err(_)) = event {
4568 *stopped_with_error_clone.lock() = true;
4569 }
4570 })
4571 });
4572
4573 // Start initial completion
4574 thread.update(cx, |thread, cx| {
4575 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4576 });
4577 cx.run_until_parked();
4578
4579 // Advance through all retries
4580 for _ in 0..MAX_RETRY_ATTEMPTS {
4581 cx.executor().advance_clock(BASE_RETRY_DELAY);
4582 cx.run_until_parked();
4583 }
4584
4585 let retry_count = thread.update(cx, |thread, _| {
4586 thread
4587 .messages
4588 .iter()
4589 .filter(|m| {
4590 m.ui_only
4591 && m.segments.iter().any(|s| {
4592 if let MessageSegment::Text(text) = s {
4593 text.contains("Retrying") && text.contains("seconds")
4594 } else {
4595 false
4596 }
4597 })
4598 })
4599 .count()
4600 });
4601
4602 // After max retries, should emit Stopped(Err(...)) event
4603 assert_eq!(
4604 retry_count, MAX_RETRY_ATTEMPTS as usize,
4605 "Should have attempted MAX_RETRY_ATTEMPTS retries for overloaded errors"
4606 );
4607 assert!(
4608 *stopped_with_error.lock(),
4609 "Should emit Stopped(Err(...)) event after max retries exceeded"
4610 );
4611
4612 // Retry state should be cleared
4613 thread.read_with(cx, |thread, _| {
4614 assert!(
4615 thread.retry_state.is_none(),
4616 "Retry state should be cleared after max retries"
4617 );
4618
4619 // Verify we have the expected number of retry messages
4620 let retry_messages = thread
4621 .messages
4622 .iter()
4623 .filter(|msg| msg.ui_only && msg.role == Role::System)
4624 .count();
4625 assert_eq!(
4626 retry_messages, MAX_RETRY_ATTEMPTS as usize,
4627 "Should have MAX_RETRY_ATTEMPTS retry messages for overloaded errors"
4628 );
4629 });
4630 }
4631
4632 #[gpui::test]
4633 async fn test_retry_message_removed_on_retry(cx: &mut TestAppContext) {
4634 init_test_settings(cx);
4635
4636 let project = create_test_project(cx, json!({})).await;
4637 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4638
4639 // Enable Burn Mode to allow retries
4640 thread.update(cx, |thread, _| {
4641 thread.set_completion_mode(CompletionMode::Burn);
4642 });
4643
4644 // We'll use a wrapper to switch behavior after first failure
4645 struct RetryTestModel {
4646 inner: Arc<FakeLanguageModel>,
4647 failed_once: Arc<Mutex<bool>>,
4648 }
4649
4650 impl LanguageModel for RetryTestModel {
4651 fn id(&self) -> LanguageModelId {
4652 self.inner.id()
4653 }
4654
4655 fn name(&self) -> LanguageModelName {
4656 self.inner.name()
4657 }
4658
4659 fn provider_id(&self) -> LanguageModelProviderId {
4660 self.inner.provider_id()
4661 }
4662
4663 fn provider_name(&self) -> LanguageModelProviderName {
4664 self.inner.provider_name()
4665 }
4666
4667 fn supports_tools(&self) -> bool {
4668 self.inner.supports_tools()
4669 }
4670
4671 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4672 self.inner.supports_tool_choice(choice)
4673 }
4674
4675 fn supports_images(&self) -> bool {
4676 self.inner.supports_images()
4677 }
4678
4679 fn telemetry_id(&self) -> String {
4680 self.inner.telemetry_id()
4681 }
4682
4683 fn max_token_count(&self) -> u64 {
4684 self.inner.max_token_count()
4685 }
4686
4687 fn count_tokens(
4688 &self,
4689 request: LanguageModelRequest,
4690 cx: &App,
4691 ) -> BoxFuture<'static, Result<u64>> {
4692 self.inner.count_tokens(request, cx)
4693 }
4694
4695 fn stream_completion(
4696 &self,
4697 request: LanguageModelRequest,
4698 cx: &AsyncApp,
4699 ) -> BoxFuture<
4700 'static,
4701 Result<
4702 BoxStream<
4703 'static,
4704 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4705 >,
4706 LanguageModelCompletionError,
4707 >,
4708 > {
4709 if !*self.failed_once.lock() {
4710 *self.failed_once.lock() = true;
4711 let provider = self.provider_name();
4712 // Return error on first attempt
4713 let stream = futures::stream::once(async move {
4714 Err(LanguageModelCompletionError::ServerOverloaded {
4715 provider,
4716 retry_after: None,
4717 })
4718 });
4719 async move { Ok(stream.boxed()) }.boxed()
4720 } else {
4721 // Succeed on retry
4722 self.inner.stream_completion(request, cx)
4723 }
4724 }
4725
4726 fn as_fake(&self) -> &FakeLanguageModel {
4727 &self.inner
4728 }
4729 }
4730
4731 let model = Arc::new(RetryTestModel {
4732 inner: Arc::new(FakeLanguageModel::default()),
4733 failed_once: Arc::new(Mutex::new(false)),
4734 });
4735
4736 // Insert a user message
4737 thread.update(cx, |thread, cx| {
4738 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4739 });
4740
4741 // Track message deletions
4742 // Track when retry completes successfully
4743 let retry_completed = Arc::new(Mutex::new(false));
4744 let retry_completed_clone = retry_completed.clone();
4745
4746 let _subscription = thread.update(cx, |_, cx| {
4747 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4748 if let ThreadEvent::StreamedCompletion = event {
4749 *retry_completed_clone.lock() = true;
4750 }
4751 })
4752 });
4753
4754 // Start completion
4755 thread.update(cx, |thread, cx| {
4756 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4757 });
4758 cx.run_until_parked();
4759
4760 // Get the retry message ID
4761 let retry_message_id = thread.read_with(cx, |thread, _| {
4762 thread
4763 .messages()
4764 .find(|msg| msg.role == Role::System && msg.ui_only)
4765 .map(|msg| msg.id)
4766 .expect("Should have a retry message")
4767 });
4768
4769 // Wait for retry
4770 cx.executor().advance_clock(BASE_RETRY_DELAY);
4771 cx.run_until_parked();
4772
4773 // Stream some successful content
4774 let fake_model = model.as_fake();
4775 // After the retry, there should be a new pending completion
4776 let pending = fake_model.pending_completions();
4777 assert!(
4778 !pending.is_empty(),
4779 "Should have a pending completion after retry"
4780 );
4781 fake_model.stream_completion_response(&pending[0], "Success!");
4782 fake_model.end_completion_stream(&pending[0]);
4783 cx.run_until_parked();
4784
4785 // Check that the retry completed successfully
4786 assert!(
4787 *retry_completed.lock(),
4788 "Retry should have completed successfully"
4789 );
4790
4791 // Retry message should still exist but be marked as ui_only
4792 thread.read_with(cx, |thread, _| {
4793 let retry_msg = thread
4794 .message(retry_message_id)
4795 .expect("Retry message should still exist");
4796 assert!(retry_msg.ui_only, "Retry message should be ui_only");
4797 assert_eq!(
4798 retry_msg.role,
4799 Role::System,
4800 "Retry message should have System role"
4801 );
4802 });
4803 }
4804
4805 #[gpui::test]
4806 async fn test_successful_completion_clears_retry_state(cx: &mut TestAppContext) {
4807 init_test_settings(cx);
4808
4809 let project = create_test_project(cx, json!({})).await;
4810 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4811
4812 // Enable Burn Mode to allow retries
4813 thread.update(cx, |thread, _| {
4814 thread.set_completion_mode(CompletionMode::Burn);
4815 });
4816
4817 // Create a model that fails once then succeeds
4818 struct FailOnceModel {
4819 inner: Arc<FakeLanguageModel>,
4820 failed_once: Arc<Mutex<bool>>,
4821 }
4822
4823 impl LanguageModel for FailOnceModel {
4824 fn id(&self) -> LanguageModelId {
4825 self.inner.id()
4826 }
4827
4828 fn name(&self) -> LanguageModelName {
4829 self.inner.name()
4830 }
4831
4832 fn provider_id(&self) -> LanguageModelProviderId {
4833 self.inner.provider_id()
4834 }
4835
4836 fn provider_name(&self) -> LanguageModelProviderName {
4837 self.inner.provider_name()
4838 }
4839
4840 fn supports_tools(&self) -> bool {
4841 self.inner.supports_tools()
4842 }
4843
4844 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4845 self.inner.supports_tool_choice(choice)
4846 }
4847
4848 fn supports_images(&self) -> bool {
4849 self.inner.supports_images()
4850 }
4851
4852 fn telemetry_id(&self) -> String {
4853 self.inner.telemetry_id()
4854 }
4855
4856 fn max_token_count(&self) -> u64 {
4857 self.inner.max_token_count()
4858 }
4859
4860 fn count_tokens(
4861 &self,
4862 request: LanguageModelRequest,
4863 cx: &App,
4864 ) -> BoxFuture<'static, Result<u64>> {
4865 self.inner.count_tokens(request, cx)
4866 }
4867
4868 fn stream_completion(
4869 &self,
4870 request: LanguageModelRequest,
4871 cx: &AsyncApp,
4872 ) -> BoxFuture<
4873 'static,
4874 Result<
4875 BoxStream<
4876 'static,
4877 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4878 >,
4879 LanguageModelCompletionError,
4880 >,
4881 > {
4882 if !*self.failed_once.lock() {
4883 *self.failed_once.lock() = true;
4884 let provider = self.provider_name();
4885 // Return error on first attempt
4886 let stream = futures::stream::once(async move {
4887 Err(LanguageModelCompletionError::ServerOverloaded {
4888 provider,
4889 retry_after: None,
4890 })
4891 });
4892 async move { Ok(stream.boxed()) }.boxed()
4893 } else {
4894 // Succeed on retry
4895 self.inner.stream_completion(request, cx)
4896 }
4897 }
4898 }
4899
4900 let fail_once_model = Arc::new(FailOnceModel {
4901 inner: Arc::new(FakeLanguageModel::default()),
4902 failed_once: Arc::new(Mutex::new(false)),
4903 });
4904
4905 // Insert a user message
4906 thread.update(cx, |thread, cx| {
4907 thread.insert_user_message(
4908 "Test message",
4909 ContextLoadResult::default(),
4910 None,
4911 vec![],
4912 cx,
4913 );
4914 });
4915
4916 // Start completion with fail-once model
4917 thread.update(cx, |thread, cx| {
4918 thread.send_to_model(
4919 fail_once_model.clone(),
4920 CompletionIntent::UserPrompt,
4921 None,
4922 cx,
4923 );
4924 });
4925
4926 cx.run_until_parked();
4927
4928 // Verify retry state exists after first failure
4929 thread.read_with(cx, |thread, _| {
4930 assert!(
4931 thread.retry_state.is_some(),
4932 "Should have retry state after failure"
4933 );
4934 });
4935
4936 // Wait for retry delay
4937 cx.executor().advance_clock(BASE_RETRY_DELAY);
4938 cx.run_until_parked();
4939
4940 // The retry should now use our FailOnceModel which should succeed
4941 // We need to help the FakeLanguageModel complete the stream
4942 let inner_fake = fail_once_model.inner.clone();
4943
4944 // Wait a bit for the retry to start
4945 cx.run_until_parked();
4946
4947 // Check for pending completions and complete them
4948 if let Some(pending) = inner_fake.pending_completions().first() {
4949 inner_fake.stream_completion_response(pending, "Success!");
4950 inner_fake.end_completion_stream(pending);
4951 }
4952 cx.run_until_parked();
4953
4954 thread.read_with(cx, |thread, _| {
4955 assert!(
4956 thread.retry_state.is_none(),
4957 "Retry state should be cleared after successful completion"
4958 );
4959
4960 let has_assistant_message = thread
4961 .messages
4962 .iter()
4963 .any(|msg| msg.role == Role::Assistant && !msg.ui_only);
4964 assert!(
4965 has_assistant_message,
4966 "Should have an assistant message after successful retry"
4967 );
4968 });
4969 }
4970
4971 #[gpui::test]
4972 async fn test_rate_limit_retry_single_attempt(cx: &mut TestAppContext) {
4973 init_test_settings(cx);
4974
4975 let project = create_test_project(cx, json!({})).await;
4976 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4977
4978 // Enable Burn Mode to allow retries
4979 thread.update(cx, |thread, _| {
4980 thread.set_completion_mode(CompletionMode::Burn);
4981 });
4982
4983 // Create a model that returns rate limit error with retry_after
4984 struct RateLimitModel {
4985 inner: Arc<FakeLanguageModel>,
4986 }
4987
4988 impl LanguageModel for RateLimitModel {
4989 fn id(&self) -> LanguageModelId {
4990 self.inner.id()
4991 }
4992
4993 fn name(&self) -> LanguageModelName {
4994 self.inner.name()
4995 }
4996
4997 fn provider_id(&self) -> LanguageModelProviderId {
4998 self.inner.provider_id()
4999 }
5000
5001 fn provider_name(&self) -> LanguageModelProviderName {
5002 self.inner.provider_name()
5003 }
5004
5005 fn supports_tools(&self) -> bool {
5006 self.inner.supports_tools()
5007 }
5008
5009 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
5010 self.inner.supports_tool_choice(choice)
5011 }
5012
5013 fn supports_images(&self) -> bool {
5014 self.inner.supports_images()
5015 }
5016
5017 fn telemetry_id(&self) -> String {
5018 self.inner.telemetry_id()
5019 }
5020
5021 fn max_token_count(&self) -> u64 {
5022 self.inner.max_token_count()
5023 }
5024
5025 fn count_tokens(
5026 &self,
5027 request: LanguageModelRequest,
5028 cx: &App,
5029 ) -> BoxFuture<'static, Result<u64>> {
5030 self.inner.count_tokens(request, cx)
5031 }
5032
5033 fn stream_completion(
5034 &self,
5035 _request: LanguageModelRequest,
5036 _cx: &AsyncApp,
5037 ) -> BoxFuture<
5038 'static,
5039 Result<
5040 BoxStream<
5041 'static,
5042 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
5043 >,
5044 LanguageModelCompletionError,
5045 >,
5046 > {
5047 let provider = self.provider_name();
5048 async move {
5049 let stream = futures::stream::once(async move {
5050 Err(LanguageModelCompletionError::RateLimitExceeded {
5051 provider,
5052 retry_after: Some(Duration::from_secs(TEST_RATE_LIMIT_RETRY_SECS)),
5053 })
5054 });
5055 Ok(stream.boxed())
5056 }
5057 .boxed()
5058 }
5059
5060 fn as_fake(&self) -> &FakeLanguageModel {
5061 &self.inner
5062 }
5063 }
5064
5065 let model = Arc::new(RateLimitModel {
5066 inner: Arc::new(FakeLanguageModel::default()),
5067 });
5068
5069 // Insert a user message
5070 thread.update(cx, |thread, cx| {
5071 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5072 });
5073
5074 // Start completion
5075 thread.update(cx, |thread, cx| {
5076 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5077 });
5078
5079 cx.run_until_parked();
5080
5081 let retry_count = thread.update(cx, |thread, _| {
5082 thread
5083 .messages
5084 .iter()
5085 .filter(|m| {
5086 m.ui_only
5087 && m.segments.iter().any(|s| {
5088 if let MessageSegment::Text(text) = s {
5089 text.contains("rate limit exceeded")
5090 } else {
5091 false
5092 }
5093 })
5094 })
5095 .count()
5096 });
5097 assert_eq!(retry_count, 1, "Should have scheduled one retry");
5098
5099 thread.read_with(cx, |thread, _| {
5100 assert!(
5101 thread.retry_state.is_some(),
5102 "Rate limit errors should set retry_state"
5103 );
5104 if let Some(retry_state) = &thread.retry_state {
5105 assert_eq!(
5106 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
5107 "Rate limit errors should use MAX_RETRY_ATTEMPTS"
5108 );
5109 }
5110 });
5111
5112 // Verify we have one retry message
5113 thread.read_with(cx, |thread, _| {
5114 let retry_messages = thread
5115 .messages
5116 .iter()
5117 .filter(|msg| {
5118 msg.ui_only
5119 && msg.segments.iter().any(|seg| {
5120 if let MessageSegment::Text(text) = seg {
5121 text.contains("rate limit exceeded")
5122 } else {
5123 false
5124 }
5125 })
5126 })
5127 .count();
5128 assert_eq!(
5129 retry_messages, 1,
5130 "Should have one rate limit retry message"
5131 );
5132 });
5133
5134 // Check that retry message doesn't include attempt count
5135 thread.read_with(cx, |thread, _| {
5136 let retry_message = thread
5137 .messages
5138 .iter()
5139 .find(|msg| msg.role == Role::System && msg.ui_only)
5140 .expect("Should have a retry message");
5141
5142 // Check that the message contains attempt count since we use retry_state
5143 if let Some(MessageSegment::Text(text)) = retry_message.segments.first() {
5144 assert!(
5145 text.contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS)),
5146 "Rate limit retry message should contain attempt count with MAX_RETRY_ATTEMPTS"
5147 );
5148 assert!(
5149 text.contains("Retrying"),
5150 "Rate limit retry message should contain retry text"
5151 );
5152 }
5153 });
5154 }
5155
5156 #[gpui::test]
5157 async fn test_ui_only_messages_not_sent_to_model(cx: &mut TestAppContext) {
5158 init_test_settings(cx);
5159
5160 let project = create_test_project(cx, json!({})).await;
5161 let (_, _, thread, _, model) = setup_test_environment(cx, project.clone()).await;
5162
5163 // Insert a regular user message
5164 thread.update(cx, |thread, cx| {
5165 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5166 });
5167
5168 // Insert a UI-only message (like our retry notifications)
5169 thread.update(cx, |thread, cx| {
5170 let id = thread.next_message_id.post_inc();
5171 thread.messages.push(Message {
5172 id,
5173 role: Role::System,
5174 segments: vec![MessageSegment::Text(
5175 "This is a UI-only message that should not be sent to the model".to_string(),
5176 )],
5177 loaded_context: LoadedContext::default(),
5178 creases: Vec::new(),
5179 is_hidden: true,
5180 ui_only: true,
5181 });
5182 cx.emit(ThreadEvent::MessageAdded(id));
5183 });
5184
5185 // Insert another regular message
5186 thread.update(cx, |thread, cx| {
5187 thread.insert_user_message(
5188 "How are you?",
5189 ContextLoadResult::default(),
5190 None,
5191 vec![],
5192 cx,
5193 );
5194 });
5195
5196 // Generate the completion request
5197 let request = thread.update(cx, |thread, cx| {
5198 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
5199 });
5200
5201 // Verify that the request only contains non-UI-only messages
5202 // Should have system prompt + 2 user messages, but not the UI-only message
5203 let user_messages: Vec<_> = request
5204 .messages
5205 .iter()
5206 .filter(|msg| msg.role == Role::User)
5207 .collect();
5208 assert_eq!(
5209 user_messages.len(),
5210 2,
5211 "Should have exactly 2 user messages"
5212 );
5213
5214 // Verify the UI-only content is not present anywhere in the request
5215 let request_text = request
5216 .messages
5217 .iter()
5218 .flat_map(|msg| &msg.content)
5219 .filter_map(|content| match content {
5220 MessageContent::Text(text) => Some(text.as_str()),
5221 _ => None,
5222 })
5223 .collect::<String>();
5224
5225 assert!(
5226 !request_text.contains("UI-only message"),
5227 "UI-only message content should not be in the request"
5228 );
5229
5230 // Verify the thread still has all 3 messages (including UI-only)
5231 thread.read_with(cx, |thread, _| {
5232 assert_eq!(
5233 thread.messages().count(),
5234 3,
5235 "Thread should have 3 messages"
5236 );
5237 assert_eq!(
5238 thread.messages().filter(|m| m.ui_only).count(),
5239 1,
5240 "Thread should have 1 UI-only message"
5241 );
5242 });
5243
5244 // Verify that UI-only messages are not serialized
5245 let serialized = thread
5246 .update(cx, |thread, cx| thread.serialize(cx))
5247 .await
5248 .unwrap();
5249 assert_eq!(
5250 serialized.messages.len(),
5251 2,
5252 "Serialized thread should only have 2 messages (no UI-only)"
5253 );
5254 }
5255
5256 #[gpui::test]
5257 async fn test_no_retry_without_burn_mode(cx: &mut TestAppContext) {
5258 init_test_settings(cx);
5259
5260 let project = create_test_project(cx, json!({})).await;
5261 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5262
5263 // Ensure we're in Normal mode (not Burn mode)
5264 thread.update(cx, |thread, _| {
5265 thread.set_completion_mode(CompletionMode::Normal);
5266 });
5267
5268 // Track error events
5269 let error_events = Arc::new(Mutex::new(Vec::new()));
5270 let error_events_clone = error_events.clone();
5271
5272 let _subscription = thread.update(cx, |_, cx| {
5273 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
5274 if let ThreadEvent::ShowError(error) = event {
5275 error_events_clone.lock().push(error.clone());
5276 }
5277 })
5278 });
5279
5280 // Create model that returns overloaded error
5281 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5282
5283 // Insert a user message
5284 thread.update(cx, |thread, cx| {
5285 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5286 });
5287
5288 // Start completion
5289 thread.update(cx, |thread, cx| {
5290 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5291 });
5292
5293 cx.run_until_parked();
5294
5295 // Verify no retry state was created
5296 thread.read_with(cx, |thread, _| {
5297 assert!(
5298 thread.retry_state.is_none(),
5299 "Should not have retry state in Normal mode"
5300 );
5301 });
5302
5303 // Check that a retryable error was reported
5304 let errors = error_events.lock();
5305 assert!(!errors.is_empty(), "Should have received an error event");
5306
5307 if let ThreadError::RetryableError {
5308 message: _,
5309 can_enable_burn_mode,
5310 } = &errors[0]
5311 {
5312 assert!(
5313 *can_enable_burn_mode,
5314 "Error should indicate burn mode can be enabled"
5315 );
5316 } else {
5317 panic!("Expected RetryableError, got {:?}", errors[0]);
5318 }
5319
5320 // Verify the thread is no longer generating
5321 thread.read_with(cx, |thread, _| {
5322 assert!(
5323 !thread.is_generating(),
5324 "Should not be generating after error without retry"
5325 );
5326 });
5327 }
5328
5329 #[gpui::test]
5330 async fn test_retry_cancelled_on_stop(cx: &mut TestAppContext) {
5331 init_test_settings(cx);
5332
5333 let project = create_test_project(cx, json!({})).await;
5334 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5335
5336 // Enable Burn Mode to allow retries
5337 thread.update(cx, |thread, _| {
5338 thread.set_completion_mode(CompletionMode::Burn);
5339 });
5340
5341 // Create model that returns overloaded error
5342 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5343
5344 // Insert a user message
5345 thread.update(cx, |thread, cx| {
5346 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5347 });
5348
5349 // Start completion
5350 thread.update(cx, |thread, cx| {
5351 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5352 });
5353
5354 cx.run_until_parked();
5355
5356 // Verify retry was scheduled by checking for retry message
5357 let has_retry_message = thread.read_with(cx, |thread, _| {
5358 thread.messages.iter().any(|m| {
5359 m.ui_only
5360 && m.segments.iter().any(|s| {
5361 if let MessageSegment::Text(text) = s {
5362 text.contains("Retrying") && text.contains("seconds")
5363 } else {
5364 false
5365 }
5366 })
5367 })
5368 });
5369 assert!(has_retry_message, "Should have scheduled a retry");
5370
5371 // Cancel the completion before the retry happens
5372 thread.update(cx, |thread, cx| {
5373 thread.cancel_last_completion(None, cx);
5374 });
5375
5376 cx.run_until_parked();
5377
5378 // The retry should not have happened - no pending completions
5379 let fake_model = model.as_fake();
5380 assert_eq!(
5381 fake_model.pending_completions().len(),
5382 0,
5383 "Should have no pending completions after cancellation"
5384 );
5385
5386 // Verify the retry was cancelled by checking retry state
5387 thread.read_with(cx, |thread, _| {
5388 if let Some(retry_state) = &thread.retry_state {
5389 panic!(
5390 "retry_state should be cleared after cancellation, but found: attempt={}, max_attempts={}, intent={:?}",
5391 retry_state.attempt, retry_state.max_attempts, retry_state.intent
5392 );
5393 }
5394 });
5395 }
5396
5397 fn test_summarize_error(
5398 model: &Arc<dyn LanguageModel>,
5399 thread: &Entity<Thread>,
5400 cx: &mut TestAppContext,
5401 ) {
5402 thread.update(cx, |thread, cx| {
5403 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
5404 thread.send_to_model(
5405 model.clone(),
5406 CompletionIntent::ThreadSummarization,
5407 None,
5408 cx,
5409 );
5410 });
5411
5412 let fake_model = model.as_fake();
5413 simulate_successful_response(&fake_model, cx);
5414
5415 thread.read_with(cx, |thread, _| {
5416 assert!(matches!(thread.summary(), ThreadSummary::Generating));
5417 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5418 });
5419
5420 // Simulate summary request ending
5421 cx.run_until_parked();
5422 fake_model.end_last_completion_stream();
5423 cx.run_until_parked();
5424
5425 // State is set to Error and default message
5426 thread.read_with(cx, |thread, _| {
5427 assert!(matches!(thread.summary(), ThreadSummary::Error));
5428 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5429 });
5430 }
5431
5432 fn simulate_successful_response(fake_model: &FakeLanguageModel, cx: &mut TestAppContext) {
5433 cx.run_until_parked();
5434 fake_model.stream_last_completion_response("Assistant response");
5435 fake_model.end_last_completion_stream();
5436 cx.run_until_parked();
5437 }
5438
5439 fn init_test_settings(cx: &mut TestAppContext) {
5440 cx.update(|cx| {
5441 let settings_store = SettingsStore::test(cx);
5442 cx.set_global(settings_store);
5443 language::init(cx);
5444 Project::init_settings(cx);
5445 AgentSettings::register(cx);
5446 prompt_store::init(cx);
5447 thread_store::init(cx);
5448 workspace::init_settings(cx);
5449 language_model::init_settings(cx);
5450 ThemeSettings::register(cx);
5451 ToolRegistry::default_global(cx);
5452 assistant_tool::init(cx);
5453
5454 let http_client = Arc::new(http_client::HttpClientWithUrl::new(
5455 http_client::FakeHttpClient::with_200_response(),
5456 "http://localhost".to_string(),
5457 None,
5458 ));
5459 assistant_tools::init(http_client, cx);
5460 });
5461 }
5462
5463 // Helper to create a test project with test files
5464 async fn create_test_project(
5465 cx: &mut TestAppContext,
5466 files: serde_json::Value,
5467 ) -> Entity<Project> {
5468 let fs = FakeFs::new(cx.executor());
5469 fs.insert_tree(path!("/test"), files).await;
5470 Project::test(fs, [path!("/test").as_ref()], cx).await
5471 }
5472
5473 async fn setup_test_environment(
5474 cx: &mut TestAppContext,
5475 project: Entity<Project>,
5476 ) -> (
5477 Entity<Workspace>,
5478 Entity<ThreadStore>,
5479 Entity<Thread>,
5480 Entity<ContextStore>,
5481 Arc<dyn LanguageModel>,
5482 ) {
5483 let (workspace, cx) =
5484 cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
5485
5486 let (client, user_store) =
5487 project.read_with(cx, |project, _cx| (project.client(), project.user_store()));
5488 let cloud_user_store =
5489 cx.new(|cx| CloudUserStore::new(client.cloud_client(), user_store, cx));
5490
5491 let thread_store = cx
5492 .update(|_, cx| {
5493 ThreadStore::load(
5494 project.clone(),
5495 cloud_user_store,
5496 cx.new(|_| ToolWorkingSet::default()),
5497 None,
5498 Arc::new(PromptBuilder::new(None).unwrap()),
5499 cx,
5500 )
5501 })
5502 .await
5503 .unwrap();
5504
5505 let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
5506 let context_store = cx.new(|_cx| ContextStore::new(project.downgrade(), None));
5507
5508 let provider = Arc::new(FakeLanguageModelProvider::default());
5509 let model = provider.test_model();
5510 let model: Arc<dyn LanguageModel> = Arc::new(model);
5511
5512 cx.update(|_, cx| {
5513 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
5514 registry.set_default_model(
5515 Some(ConfiguredModel {
5516 provider: provider.clone(),
5517 model: model.clone(),
5518 }),
5519 cx,
5520 );
5521 registry.set_thread_summary_model(
5522 Some(ConfiguredModel {
5523 provider,
5524 model: model.clone(),
5525 }),
5526 cx,
5527 );
5528 })
5529 });
5530
5531 (workspace, thread_store, thread, context_store, model)
5532 }
5533
5534 async fn add_file_to_context(
5535 project: &Entity<Project>,
5536 context_store: &Entity<ContextStore>,
5537 path: &str,
5538 cx: &mut TestAppContext,
5539 ) -> Result<Entity<language::Buffer>> {
5540 let buffer_path = project
5541 .read_with(cx, |project, cx| project.find_project_path(path, cx))
5542 .unwrap();
5543
5544 let buffer = project
5545 .update(cx, |project, cx| {
5546 project.open_buffer(buffer_path.clone(), cx)
5547 })
5548 .await
5549 .unwrap();
5550
5551 context_store.update(cx, |context_store, cx| {
5552 context_store.add_file_from_buffer(&buffer_path, buffer.clone(), false, cx);
5553 });
5554
5555 Ok(buffer)
5556 }
5557}