1use crate::{
2 agent_profile::AgentProfile,
3 context::{AgentContext, AgentContextHandle, ContextLoadResult, LoadedContext},
4 thread_store::{
5 SerializedCrease, SerializedLanguageModel, SerializedMessage, SerializedMessageSegment,
6 SerializedThread, SerializedToolResult, SerializedToolUse, SharedProjectContext,
7 ThreadStore,
8 },
9 tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState},
10};
11use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
12use anyhow::{Result, anyhow};
13use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolWorkingSet};
14use chrono::{DateTime, Utc};
15use client::{ModelRequestUsage, RequestUsage};
16use collections::HashMap;
17use feature_flags::{self, FeatureFlagAppExt};
18use futures::{FutureExt, StreamExt as _, future::Shared};
19use git::repository::DiffType;
20use gpui::{
21 AnyWindowHandle, App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task,
22 WeakEntity, Window,
23};
24use http_client::StatusCode;
25use language_model::{
26 ConfiguredModel, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
27 LanguageModelExt as _, LanguageModelId, LanguageModelRegistry, LanguageModelRequest,
28 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
29 LanguageModelToolResultContent, LanguageModelToolUse, LanguageModelToolUseId, MessageContent,
30 ModelRequestLimitReachedError, PaymentRequiredError, Role, SelectedModel, StopReason,
31 TokenUsage,
32};
33use postage::stream::Stream as _;
34use project::{
35 Project,
36 git_store::{GitStore, GitStoreCheckpoint, RepositoryState},
37};
38use prompt_store::{ModelContext, PromptBuilder};
39use proto::Plan;
40use schemars::JsonSchema;
41use serde::{Deserialize, Serialize};
42use settings::Settings;
43use std::{
44 io::Write,
45 ops::Range,
46 sync::Arc,
47 time::{Duration, Instant},
48};
49use thiserror::Error;
50use util::{ResultExt as _, debug_panic, post_inc};
51use uuid::Uuid;
52use zed_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
53
54const MAX_RETRY_ATTEMPTS: u8 = 3;
55const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
56
57#[derive(Debug, Clone)]
58enum RetryStrategy {
59 ExponentialBackoff {
60 initial_delay: Duration,
61 max_attempts: u8,
62 },
63 Fixed {
64 delay: Duration,
65 max_attempts: u8,
66 },
67}
68
69#[derive(
70 Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
71)]
72pub struct ThreadId(Arc<str>);
73
74impl ThreadId {
75 pub fn new() -> Self {
76 Self(Uuid::new_v4().to_string().into())
77 }
78}
79
80impl std::fmt::Display for ThreadId {
81 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
82 write!(f, "{}", self.0)
83 }
84}
85
86impl From<&str> for ThreadId {
87 fn from(value: &str) -> Self {
88 Self(value.into())
89 }
90}
91
92/// The ID of the user prompt that initiated a request.
93///
94/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
95#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
96pub struct PromptId(Arc<str>);
97
98impl PromptId {
99 pub fn new() -> Self {
100 Self(Uuid::new_v4().to_string().into())
101 }
102}
103
104impl std::fmt::Display for PromptId {
105 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
106 write!(f, "{}", self.0)
107 }
108}
109
110#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
111pub struct MessageId(pub(crate) usize);
112
113impl MessageId {
114 fn post_inc(&mut self) -> Self {
115 Self(post_inc(&mut self.0))
116 }
117
118 pub fn as_usize(&self) -> usize {
119 self.0
120 }
121}
122
123/// Stored information that can be used to resurrect a context crease when creating an editor for a past message.
124#[derive(Clone, Debug)]
125pub struct MessageCrease {
126 pub range: Range<usize>,
127 pub icon_path: SharedString,
128 pub label: SharedString,
129 /// None for a deserialized message, Some otherwise.
130 pub context: Option<AgentContextHandle>,
131}
132
133/// A message in a [`Thread`].
134#[derive(Debug, Clone)]
135pub struct Message {
136 pub id: MessageId,
137 pub role: Role,
138 pub segments: Vec<MessageSegment>,
139 pub loaded_context: LoadedContext,
140 pub creases: Vec<MessageCrease>,
141 pub is_hidden: bool,
142 pub ui_only: bool,
143}
144
145impl Message {
146 /// Returns whether the message contains any meaningful text that should be displayed
147 /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
148 pub fn should_display_content(&self) -> bool {
149 self.segments.iter().all(|segment| segment.should_display())
150 }
151
152 pub fn push_thinking(&mut self, text: &str, signature: Option<String>) {
153 if let Some(MessageSegment::Thinking {
154 text: segment,
155 signature: current_signature,
156 }) = self.segments.last_mut()
157 {
158 if let Some(signature) = signature {
159 *current_signature = Some(signature);
160 }
161 segment.push_str(text);
162 } else {
163 self.segments.push(MessageSegment::Thinking {
164 text: text.to_string(),
165 signature,
166 });
167 }
168 }
169
170 pub fn push_redacted_thinking(&mut self, data: String) {
171 self.segments.push(MessageSegment::RedactedThinking(data));
172 }
173
174 pub fn push_text(&mut self, text: &str) {
175 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
176 segment.push_str(text);
177 } else {
178 self.segments.push(MessageSegment::Text(text.to_string()));
179 }
180 }
181
182 pub fn to_string(&self) -> String {
183 let mut result = String::new();
184
185 if !self.loaded_context.text.is_empty() {
186 result.push_str(&self.loaded_context.text);
187 }
188
189 for segment in &self.segments {
190 match segment {
191 MessageSegment::Text(text) => result.push_str(text),
192 MessageSegment::Thinking { text, .. } => {
193 result.push_str("<think>\n");
194 result.push_str(text);
195 result.push_str("\n</think>");
196 }
197 MessageSegment::RedactedThinking(_) => {}
198 }
199 }
200
201 result
202 }
203}
204
205#[derive(Debug, Clone, PartialEq, Eq)]
206pub enum MessageSegment {
207 Text(String),
208 Thinking {
209 text: String,
210 signature: Option<String>,
211 },
212 RedactedThinking(String),
213}
214
215impl MessageSegment {
216 pub fn should_display(&self) -> bool {
217 match self {
218 Self::Text(text) => text.is_empty(),
219 Self::Thinking { text, .. } => text.is_empty(),
220 Self::RedactedThinking(_) => false,
221 }
222 }
223
224 pub fn text(&self) -> Option<&str> {
225 match self {
226 MessageSegment::Text(text) => Some(text),
227 _ => None,
228 }
229 }
230}
231
232#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
233pub struct ProjectSnapshot {
234 pub worktree_snapshots: Vec<WorktreeSnapshot>,
235 pub unsaved_buffer_paths: Vec<String>,
236 pub timestamp: DateTime<Utc>,
237}
238
239#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
240pub struct WorktreeSnapshot {
241 pub worktree_path: String,
242 pub git_state: Option<GitState>,
243}
244
245#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
246pub struct GitState {
247 pub remote_url: Option<String>,
248 pub head_sha: Option<String>,
249 pub current_branch: Option<String>,
250 pub diff: Option<String>,
251}
252
253#[derive(Clone, Debug)]
254pub struct ThreadCheckpoint {
255 message_id: MessageId,
256 git_checkpoint: GitStoreCheckpoint,
257}
258
259#[derive(Copy, Clone, Debug, PartialEq, Eq)]
260pub enum ThreadFeedback {
261 Positive,
262 Negative,
263}
264
265pub enum LastRestoreCheckpoint {
266 Pending {
267 message_id: MessageId,
268 },
269 Error {
270 message_id: MessageId,
271 error: String,
272 },
273}
274
275impl LastRestoreCheckpoint {
276 pub fn message_id(&self) -> MessageId {
277 match self {
278 LastRestoreCheckpoint::Pending { message_id } => *message_id,
279 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
280 }
281 }
282}
283
284#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
285pub enum DetailedSummaryState {
286 #[default]
287 NotGenerated,
288 Generating {
289 message_id: MessageId,
290 },
291 Generated {
292 text: SharedString,
293 message_id: MessageId,
294 },
295}
296
297impl DetailedSummaryState {
298 fn text(&self) -> Option<SharedString> {
299 if let Self::Generated { text, .. } = self {
300 Some(text.clone())
301 } else {
302 None
303 }
304 }
305}
306
307#[derive(Default, Debug)]
308pub struct TotalTokenUsage {
309 pub total: u64,
310 pub max: u64,
311}
312
313impl TotalTokenUsage {
314 pub fn ratio(&self) -> TokenUsageRatio {
315 #[cfg(debug_assertions)]
316 let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
317 .unwrap_or("0.8".to_string())
318 .parse()
319 .unwrap();
320 #[cfg(not(debug_assertions))]
321 let warning_threshold: f32 = 0.8;
322
323 // When the maximum is unknown because there is no selected model,
324 // avoid showing the token limit warning.
325 if self.max == 0 {
326 TokenUsageRatio::Normal
327 } else if self.total >= self.max {
328 TokenUsageRatio::Exceeded
329 } else if self.total as f32 / self.max as f32 >= warning_threshold {
330 TokenUsageRatio::Warning
331 } else {
332 TokenUsageRatio::Normal
333 }
334 }
335
336 pub fn add(&self, tokens: u64) -> TotalTokenUsage {
337 TotalTokenUsage {
338 total: self.total + tokens,
339 max: self.max,
340 }
341 }
342}
343
344#[derive(Debug, Default, PartialEq, Eq)]
345pub enum TokenUsageRatio {
346 #[default]
347 Normal,
348 Warning,
349 Exceeded,
350}
351
352#[derive(Debug, Clone, Copy)]
353pub enum QueueState {
354 Sending,
355 Queued { position: usize },
356 Started,
357}
358
359/// A thread of conversation with the LLM.
360pub struct Thread {
361 id: ThreadId,
362 updated_at: DateTime<Utc>,
363 summary: ThreadSummary,
364 pending_summary: Task<Option<()>>,
365 detailed_summary_task: Task<Option<()>>,
366 detailed_summary_tx: postage::watch::Sender<DetailedSummaryState>,
367 detailed_summary_rx: postage::watch::Receiver<DetailedSummaryState>,
368 completion_mode: agent_settings::CompletionMode,
369 messages: Vec<Message>,
370 next_message_id: MessageId,
371 last_prompt_id: PromptId,
372 project_context: SharedProjectContext,
373 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
374 completion_count: usize,
375 pending_completions: Vec<PendingCompletion>,
376 project: Entity<Project>,
377 prompt_builder: Arc<PromptBuilder>,
378 tools: Entity<ToolWorkingSet>,
379 tool_use: ToolUseState,
380 action_log: Entity<ActionLog>,
381 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
382 pending_checkpoint: Option<ThreadCheckpoint>,
383 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
384 request_token_usage: Vec<TokenUsage>,
385 cumulative_token_usage: TokenUsage,
386 exceeded_window_error: Option<ExceededWindowError>,
387 tool_use_limit_reached: bool,
388 feedback: Option<ThreadFeedback>,
389 retry_state: Option<RetryState>,
390 message_feedback: HashMap<MessageId, ThreadFeedback>,
391 last_auto_capture_at: Option<Instant>,
392 last_received_chunk_at: Option<Instant>,
393 request_callback: Option<
394 Box<dyn FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>])>,
395 >,
396 remaining_turns: u32,
397 configured_model: Option<ConfiguredModel>,
398 profile: AgentProfile,
399}
400
401#[derive(Clone, Debug)]
402struct RetryState {
403 attempt: u8,
404 max_attempts: u8,
405 intent: CompletionIntent,
406}
407
408#[derive(Clone, Debug, PartialEq, Eq)]
409pub enum ThreadSummary {
410 Pending,
411 Generating,
412 Ready(SharedString),
413 Error,
414}
415
416impl ThreadSummary {
417 pub const DEFAULT: SharedString = SharedString::new_static("New Thread");
418
419 pub fn or_default(&self) -> SharedString {
420 self.unwrap_or(Self::DEFAULT)
421 }
422
423 pub fn unwrap_or(&self, message: impl Into<SharedString>) -> SharedString {
424 self.ready().unwrap_or_else(|| message.into())
425 }
426
427 pub fn ready(&self) -> Option<SharedString> {
428 match self {
429 ThreadSummary::Ready(summary) => Some(summary.clone()),
430 ThreadSummary::Pending | ThreadSummary::Generating | ThreadSummary::Error => None,
431 }
432 }
433}
434
435#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
436pub struct ExceededWindowError {
437 /// Model used when last message exceeded context window
438 model_id: LanguageModelId,
439 /// Token count including last message
440 token_count: u64,
441}
442
443impl Thread {
444 pub fn new(
445 project: Entity<Project>,
446 tools: Entity<ToolWorkingSet>,
447 prompt_builder: Arc<PromptBuilder>,
448 system_prompt: SharedProjectContext,
449 cx: &mut Context<Self>,
450 ) -> Self {
451 let (detailed_summary_tx, detailed_summary_rx) = postage::watch::channel();
452 let configured_model = LanguageModelRegistry::read_global(cx).default_model();
453 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
454
455 Self {
456 id: ThreadId::new(),
457 updated_at: Utc::now(),
458 summary: ThreadSummary::Pending,
459 pending_summary: Task::ready(None),
460 detailed_summary_task: Task::ready(None),
461 detailed_summary_tx,
462 detailed_summary_rx,
463 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
464 messages: Vec::new(),
465 next_message_id: MessageId(0),
466 last_prompt_id: PromptId::new(),
467 project_context: system_prompt,
468 checkpoints_by_message: HashMap::default(),
469 completion_count: 0,
470 pending_completions: Vec::new(),
471 project: project.clone(),
472 prompt_builder,
473 tools: tools.clone(),
474 last_restore_checkpoint: None,
475 pending_checkpoint: None,
476 tool_use: ToolUseState::new(tools.clone()),
477 action_log: cx.new(|_| ActionLog::new(project.clone())),
478 initial_project_snapshot: {
479 let project_snapshot = Self::project_snapshot(project, cx);
480 cx.foreground_executor()
481 .spawn(async move { Some(project_snapshot.await) })
482 .shared()
483 },
484 request_token_usage: Vec::new(),
485 cumulative_token_usage: TokenUsage::default(),
486 exceeded_window_error: None,
487 tool_use_limit_reached: false,
488 feedback: None,
489 retry_state: None,
490 message_feedback: HashMap::default(),
491 last_auto_capture_at: None,
492 last_received_chunk_at: None,
493 request_callback: None,
494 remaining_turns: u32::MAX,
495 configured_model,
496 profile: AgentProfile::new(profile_id, tools),
497 }
498 }
499
500 pub fn deserialize(
501 id: ThreadId,
502 serialized: SerializedThread,
503 project: Entity<Project>,
504 tools: Entity<ToolWorkingSet>,
505 prompt_builder: Arc<PromptBuilder>,
506 project_context: SharedProjectContext,
507 window: Option<&mut Window>, // None in headless mode
508 cx: &mut Context<Self>,
509 ) -> Self {
510 let next_message_id = MessageId(
511 serialized
512 .messages
513 .last()
514 .map(|message| message.id.0 + 1)
515 .unwrap_or(0),
516 );
517 let tool_use = ToolUseState::from_serialized_messages(
518 tools.clone(),
519 &serialized.messages,
520 project.clone(),
521 window,
522 cx,
523 );
524 let (detailed_summary_tx, detailed_summary_rx) =
525 postage::watch::channel_with(serialized.detailed_summary_state);
526
527 let configured_model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
528 serialized
529 .model
530 .and_then(|model| {
531 let model = SelectedModel {
532 provider: model.provider.clone().into(),
533 model: model.model.clone().into(),
534 };
535 registry.select_model(&model, cx)
536 })
537 .or_else(|| registry.default_model())
538 });
539
540 let completion_mode = serialized
541 .completion_mode
542 .unwrap_or_else(|| AgentSettings::get_global(cx).preferred_completion_mode);
543 let profile_id = serialized
544 .profile
545 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
546
547 Self {
548 id,
549 updated_at: serialized.updated_at,
550 summary: ThreadSummary::Ready(serialized.summary),
551 pending_summary: Task::ready(None),
552 detailed_summary_task: Task::ready(None),
553 detailed_summary_tx,
554 detailed_summary_rx,
555 completion_mode,
556 retry_state: None,
557 messages: serialized
558 .messages
559 .into_iter()
560 .map(|message| Message {
561 id: message.id,
562 role: message.role,
563 segments: message
564 .segments
565 .into_iter()
566 .map(|segment| match segment {
567 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
568 SerializedMessageSegment::Thinking { text, signature } => {
569 MessageSegment::Thinking { text, signature }
570 }
571 SerializedMessageSegment::RedactedThinking { data } => {
572 MessageSegment::RedactedThinking(data)
573 }
574 })
575 .collect(),
576 loaded_context: LoadedContext {
577 contexts: Vec::new(),
578 text: message.context,
579 images: Vec::new(),
580 },
581 creases: message
582 .creases
583 .into_iter()
584 .map(|crease| MessageCrease {
585 range: crease.start..crease.end,
586 icon_path: crease.icon_path,
587 label: crease.label,
588 context: None,
589 })
590 .collect(),
591 is_hidden: message.is_hidden,
592 ui_only: false, // UI-only messages are not persisted
593 })
594 .collect(),
595 next_message_id,
596 last_prompt_id: PromptId::new(),
597 project_context,
598 checkpoints_by_message: HashMap::default(),
599 completion_count: 0,
600 pending_completions: Vec::new(),
601 last_restore_checkpoint: None,
602 pending_checkpoint: None,
603 project: project.clone(),
604 prompt_builder,
605 tools: tools.clone(),
606 tool_use,
607 action_log: cx.new(|_| ActionLog::new(project)),
608 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
609 request_token_usage: serialized.request_token_usage,
610 cumulative_token_usage: serialized.cumulative_token_usage,
611 exceeded_window_error: None,
612 tool_use_limit_reached: serialized.tool_use_limit_reached,
613 feedback: None,
614 message_feedback: HashMap::default(),
615 last_auto_capture_at: None,
616 last_received_chunk_at: None,
617 request_callback: None,
618 remaining_turns: u32::MAX,
619 configured_model,
620 profile: AgentProfile::new(profile_id, tools),
621 }
622 }
623
624 pub fn set_request_callback(
625 &mut self,
626 callback: impl 'static
627 + FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>]),
628 ) {
629 self.request_callback = Some(Box::new(callback));
630 }
631
632 pub fn id(&self) -> &ThreadId {
633 &self.id
634 }
635
636 pub fn profile(&self) -> &AgentProfile {
637 &self.profile
638 }
639
640 pub fn set_profile(&mut self, id: AgentProfileId, cx: &mut Context<Self>) {
641 if &id != self.profile.id() {
642 self.profile = AgentProfile::new(id, self.tools.clone());
643 cx.emit(ThreadEvent::ProfileChanged);
644 }
645 }
646
647 pub fn is_empty(&self) -> bool {
648 self.messages.is_empty()
649 }
650
651 pub fn updated_at(&self) -> DateTime<Utc> {
652 self.updated_at
653 }
654
655 pub fn touch_updated_at(&mut self) {
656 self.updated_at = Utc::now();
657 }
658
659 pub fn advance_prompt_id(&mut self) {
660 self.last_prompt_id = PromptId::new();
661 }
662
663 pub fn project_context(&self) -> SharedProjectContext {
664 self.project_context.clone()
665 }
666
667 pub fn get_or_init_configured_model(&mut self, cx: &App) -> Option<ConfiguredModel> {
668 if self.configured_model.is_none() {
669 self.configured_model = LanguageModelRegistry::read_global(cx).default_model();
670 }
671 self.configured_model.clone()
672 }
673
674 pub fn configured_model(&self) -> Option<ConfiguredModel> {
675 self.configured_model.clone()
676 }
677
678 pub fn set_configured_model(&mut self, model: Option<ConfiguredModel>, cx: &mut Context<Self>) {
679 self.configured_model = model;
680 cx.notify();
681 }
682
683 pub fn summary(&self) -> &ThreadSummary {
684 &self.summary
685 }
686
687 pub fn set_summary(&mut self, new_summary: impl Into<SharedString>, cx: &mut Context<Self>) {
688 let current_summary = match &self.summary {
689 ThreadSummary::Pending | ThreadSummary::Generating => return,
690 ThreadSummary::Ready(summary) => summary,
691 ThreadSummary::Error => &ThreadSummary::DEFAULT,
692 };
693
694 let mut new_summary = new_summary.into();
695
696 if new_summary.is_empty() {
697 new_summary = ThreadSummary::DEFAULT;
698 }
699
700 if current_summary != &new_summary {
701 self.summary = ThreadSummary::Ready(new_summary);
702 cx.emit(ThreadEvent::SummaryChanged);
703 }
704 }
705
706 pub fn completion_mode(&self) -> CompletionMode {
707 self.completion_mode
708 }
709
710 pub fn set_completion_mode(&mut self, mode: CompletionMode) {
711 self.completion_mode = mode;
712 }
713
714 pub fn message(&self, id: MessageId) -> Option<&Message> {
715 let index = self
716 .messages
717 .binary_search_by(|message| message.id.cmp(&id))
718 .ok()?;
719
720 self.messages.get(index)
721 }
722
723 pub fn messages(&self) -> impl ExactSizeIterator<Item = &Message> {
724 self.messages.iter()
725 }
726
727 pub fn is_generating(&self) -> bool {
728 !self.pending_completions.is_empty() || !self.all_tools_finished()
729 }
730
731 /// Indicates whether streaming of language model events is stale.
732 /// When `is_generating()` is false, this method returns `None`.
733 pub fn is_generation_stale(&self) -> Option<bool> {
734 const STALE_THRESHOLD: u128 = 250;
735
736 self.last_received_chunk_at
737 .map(|instant| instant.elapsed().as_millis() > STALE_THRESHOLD)
738 }
739
740 fn received_chunk(&mut self) {
741 self.last_received_chunk_at = Some(Instant::now());
742 }
743
744 pub fn queue_state(&self) -> Option<QueueState> {
745 self.pending_completions
746 .first()
747 .map(|pending_completion| pending_completion.queue_state)
748 }
749
750 pub fn tools(&self) -> &Entity<ToolWorkingSet> {
751 &self.tools
752 }
753
754 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
755 self.tool_use
756 .pending_tool_uses()
757 .into_iter()
758 .find(|tool_use| &tool_use.id == id)
759 }
760
761 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
762 self.tool_use
763 .pending_tool_uses()
764 .into_iter()
765 .filter(|tool_use| tool_use.status.needs_confirmation())
766 }
767
768 pub fn has_pending_tool_uses(&self) -> bool {
769 !self.tool_use.pending_tool_uses().is_empty()
770 }
771
772 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
773 self.checkpoints_by_message.get(&id).cloned()
774 }
775
776 pub fn restore_checkpoint(
777 &mut self,
778 checkpoint: ThreadCheckpoint,
779 cx: &mut Context<Self>,
780 ) -> Task<Result<()>> {
781 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
782 message_id: checkpoint.message_id,
783 });
784 cx.emit(ThreadEvent::CheckpointChanged);
785 cx.notify();
786
787 let git_store = self.project().read(cx).git_store().clone();
788 let restore = git_store.update(cx, |git_store, cx| {
789 git_store.restore_checkpoint(checkpoint.git_checkpoint.clone(), cx)
790 });
791
792 cx.spawn(async move |this, cx| {
793 let result = restore.await;
794 this.update(cx, |this, cx| {
795 if let Err(err) = result.as_ref() {
796 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
797 message_id: checkpoint.message_id,
798 error: err.to_string(),
799 });
800 } else {
801 this.truncate(checkpoint.message_id, cx);
802 this.last_restore_checkpoint = None;
803 }
804 this.pending_checkpoint = None;
805 cx.emit(ThreadEvent::CheckpointChanged);
806 cx.notify();
807 })?;
808 result
809 })
810 }
811
812 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
813 let pending_checkpoint = if self.is_generating() {
814 return;
815 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
816 checkpoint
817 } else {
818 return;
819 };
820
821 self.finalize_checkpoint(pending_checkpoint, cx);
822 }
823
824 fn finalize_checkpoint(
825 &mut self,
826 pending_checkpoint: ThreadCheckpoint,
827 cx: &mut Context<Self>,
828 ) {
829 let git_store = self.project.read(cx).git_store().clone();
830 let final_checkpoint = git_store.update(cx, |git_store, cx| git_store.checkpoint(cx));
831 cx.spawn(async move |this, cx| match final_checkpoint.await {
832 Ok(final_checkpoint) => {
833 let equal = git_store
834 .update(cx, |store, cx| {
835 store.compare_checkpoints(
836 pending_checkpoint.git_checkpoint.clone(),
837 final_checkpoint.clone(),
838 cx,
839 )
840 })?
841 .await
842 .unwrap_or(false);
843
844 if !equal {
845 this.update(cx, |this, cx| {
846 this.insert_checkpoint(pending_checkpoint, cx)
847 })?;
848 }
849
850 Ok(())
851 }
852 Err(_) => this.update(cx, |this, cx| {
853 this.insert_checkpoint(pending_checkpoint, cx)
854 }),
855 })
856 .detach();
857 }
858
859 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
860 self.checkpoints_by_message
861 .insert(checkpoint.message_id, checkpoint);
862 cx.emit(ThreadEvent::CheckpointChanged);
863 cx.notify();
864 }
865
866 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
867 self.last_restore_checkpoint.as_ref()
868 }
869
870 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
871 let Some(message_ix) = self
872 .messages
873 .iter()
874 .rposition(|message| message.id == message_id)
875 else {
876 return;
877 };
878 for deleted_message in self.messages.drain(message_ix..) {
879 self.checkpoints_by_message.remove(&deleted_message.id);
880 }
881 cx.notify();
882 }
883
884 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AgentContext> {
885 self.messages
886 .iter()
887 .find(|message| message.id == id)
888 .into_iter()
889 .flat_map(|message| message.loaded_context.contexts.iter())
890 }
891
892 pub fn is_turn_end(&self, ix: usize) -> bool {
893 if self.messages.is_empty() {
894 return false;
895 }
896
897 if !self.is_generating() && ix == self.messages.len() - 1 {
898 return true;
899 }
900
901 let Some(message) = self.messages.get(ix) else {
902 return false;
903 };
904
905 if message.role != Role::Assistant {
906 return false;
907 }
908
909 self.messages
910 .get(ix + 1)
911 .and_then(|message| {
912 self.message(message.id)
913 .map(|next_message| next_message.role == Role::User && !next_message.is_hidden)
914 })
915 .unwrap_or(false)
916 }
917
918 pub fn tool_use_limit_reached(&self) -> bool {
919 self.tool_use_limit_reached
920 }
921
922 /// Returns whether all of the tool uses have finished running.
923 pub fn all_tools_finished(&self) -> bool {
924 // If the only pending tool uses left are the ones with errors, then
925 // that means that we've finished running all of the pending tools.
926 self.tool_use
927 .pending_tool_uses()
928 .iter()
929 .all(|pending_tool_use| pending_tool_use.status.is_error())
930 }
931
932 /// Returns whether any pending tool uses may perform edits
933 pub fn has_pending_edit_tool_uses(&self) -> bool {
934 self.tool_use
935 .pending_tool_uses()
936 .iter()
937 .filter(|pending_tool_use| !pending_tool_use.status.is_error())
938 .any(|pending_tool_use| pending_tool_use.may_perform_edits)
939 }
940
941 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
942 self.tool_use.tool_uses_for_message(id, cx)
943 }
944
945 pub fn tool_results_for_message(
946 &self,
947 assistant_message_id: MessageId,
948 ) -> Vec<&LanguageModelToolResult> {
949 self.tool_use.tool_results_for_message(assistant_message_id)
950 }
951
952 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
953 self.tool_use.tool_result(id)
954 }
955
956 pub fn output_for_tool(&self, id: &LanguageModelToolUseId) -> Option<&Arc<str>> {
957 match &self.tool_use.tool_result(id)?.content {
958 LanguageModelToolResultContent::Text(text) => Some(text),
959 LanguageModelToolResultContent::Image(_) => {
960 // TODO: We should display image
961 None
962 }
963 }
964 }
965
966 pub fn card_for_tool(&self, id: &LanguageModelToolUseId) -> Option<AnyToolCard> {
967 self.tool_use.tool_result_card(id).cloned()
968 }
969
970 /// Return tools that are both enabled and supported by the model
971 pub fn available_tools(
972 &self,
973 cx: &App,
974 model: Arc<dyn LanguageModel>,
975 ) -> Vec<LanguageModelRequestTool> {
976 if model.supports_tools() {
977 self.profile
978 .enabled_tools(cx)
979 .into_iter()
980 .filter_map(|(name, tool)| {
981 // Skip tools that cannot be supported
982 let input_schema = tool.input_schema(model.tool_input_format()).ok()?;
983 Some(LanguageModelRequestTool {
984 name: name.into(),
985 description: tool.description(),
986 input_schema,
987 })
988 })
989 .collect()
990 } else {
991 Vec::default()
992 }
993 }
994
995 pub fn insert_user_message(
996 &mut self,
997 text: impl Into<String>,
998 loaded_context: ContextLoadResult,
999 git_checkpoint: Option<GitStoreCheckpoint>,
1000 creases: Vec<MessageCrease>,
1001 cx: &mut Context<Self>,
1002 ) -> MessageId {
1003 if !loaded_context.referenced_buffers.is_empty() {
1004 self.action_log.update(cx, |log, cx| {
1005 for buffer in loaded_context.referenced_buffers {
1006 log.buffer_read(buffer, cx);
1007 }
1008 });
1009 }
1010
1011 let message_id = self.insert_message(
1012 Role::User,
1013 vec![MessageSegment::Text(text.into())],
1014 loaded_context.loaded_context,
1015 creases,
1016 false,
1017 cx,
1018 );
1019
1020 if let Some(git_checkpoint) = git_checkpoint {
1021 self.pending_checkpoint = Some(ThreadCheckpoint {
1022 message_id,
1023 git_checkpoint,
1024 });
1025 }
1026
1027 self.auto_capture_telemetry(cx);
1028
1029 message_id
1030 }
1031
1032 pub fn insert_invisible_continue_message(&mut self, cx: &mut Context<Self>) -> MessageId {
1033 let id = self.insert_message(
1034 Role::User,
1035 vec![MessageSegment::Text("Continue where you left off".into())],
1036 LoadedContext::default(),
1037 vec![],
1038 true,
1039 cx,
1040 );
1041 self.pending_checkpoint = None;
1042
1043 id
1044 }
1045
1046 pub fn insert_assistant_message(
1047 &mut self,
1048 segments: Vec<MessageSegment>,
1049 cx: &mut Context<Self>,
1050 ) -> MessageId {
1051 self.insert_message(
1052 Role::Assistant,
1053 segments,
1054 LoadedContext::default(),
1055 Vec::new(),
1056 false,
1057 cx,
1058 )
1059 }
1060
1061 pub fn insert_message(
1062 &mut self,
1063 role: Role,
1064 segments: Vec<MessageSegment>,
1065 loaded_context: LoadedContext,
1066 creases: Vec<MessageCrease>,
1067 is_hidden: bool,
1068 cx: &mut Context<Self>,
1069 ) -> MessageId {
1070 let id = self.next_message_id.post_inc();
1071 self.messages.push(Message {
1072 id,
1073 role,
1074 segments,
1075 loaded_context,
1076 creases,
1077 is_hidden,
1078 ui_only: false,
1079 });
1080 self.touch_updated_at();
1081 cx.emit(ThreadEvent::MessageAdded(id));
1082 id
1083 }
1084
1085 pub fn edit_message(
1086 &mut self,
1087 id: MessageId,
1088 new_role: Role,
1089 new_segments: Vec<MessageSegment>,
1090 creases: Vec<MessageCrease>,
1091 loaded_context: Option<LoadedContext>,
1092 checkpoint: Option<GitStoreCheckpoint>,
1093 cx: &mut Context<Self>,
1094 ) -> bool {
1095 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
1096 return false;
1097 };
1098 message.role = new_role;
1099 message.segments = new_segments;
1100 message.creases = creases;
1101 if let Some(context) = loaded_context {
1102 message.loaded_context = context;
1103 }
1104 if let Some(git_checkpoint) = checkpoint {
1105 self.checkpoints_by_message.insert(
1106 id,
1107 ThreadCheckpoint {
1108 message_id: id,
1109 git_checkpoint,
1110 },
1111 );
1112 }
1113 self.touch_updated_at();
1114 cx.emit(ThreadEvent::MessageEdited(id));
1115 true
1116 }
1117
1118 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
1119 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
1120 return false;
1121 };
1122 self.messages.remove(index);
1123 self.touch_updated_at();
1124 cx.emit(ThreadEvent::MessageDeleted(id));
1125 true
1126 }
1127
1128 /// Returns the representation of this [`Thread`] in a textual form.
1129 ///
1130 /// This is the representation we use when attaching a thread as context to another thread.
1131 pub fn text(&self) -> String {
1132 let mut text = String::new();
1133
1134 for message in &self.messages {
1135 text.push_str(match message.role {
1136 language_model::Role::User => "User:",
1137 language_model::Role::Assistant => "Agent:",
1138 language_model::Role::System => "System:",
1139 });
1140 text.push('\n');
1141
1142 for segment in &message.segments {
1143 match segment {
1144 MessageSegment::Text(content) => text.push_str(content),
1145 MessageSegment::Thinking { text: content, .. } => {
1146 text.push_str(&format!("<think>{}</think>", content))
1147 }
1148 MessageSegment::RedactedThinking(_) => {}
1149 }
1150 }
1151 text.push('\n');
1152 }
1153
1154 text
1155 }
1156
1157 /// Serializes this thread into a format for storage or telemetry.
1158 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
1159 let initial_project_snapshot = self.initial_project_snapshot.clone();
1160 cx.spawn(async move |this, cx| {
1161 let initial_project_snapshot = initial_project_snapshot.await;
1162 this.read_with(cx, |this, cx| SerializedThread {
1163 version: SerializedThread::VERSION.to_string(),
1164 summary: this.summary().or_default(),
1165 updated_at: this.updated_at(),
1166 messages: this
1167 .messages()
1168 .filter(|message| !message.ui_only)
1169 .map(|message| SerializedMessage {
1170 id: message.id,
1171 role: message.role,
1172 segments: message
1173 .segments
1174 .iter()
1175 .map(|segment| match segment {
1176 MessageSegment::Text(text) => {
1177 SerializedMessageSegment::Text { text: text.clone() }
1178 }
1179 MessageSegment::Thinking { text, signature } => {
1180 SerializedMessageSegment::Thinking {
1181 text: text.clone(),
1182 signature: signature.clone(),
1183 }
1184 }
1185 MessageSegment::RedactedThinking(data) => {
1186 SerializedMessageSegment::RedactedThinking {
1187 data: data.clone(),
1188 }
1189 }
1190 })
1191 .collect(),
1192 tool_uses: this
1193 .tool_uses_for_message(message.id, cx)
1194 .into_iter()
1195 .map(|tool_use| SerializedToolUse {
1196 id: tool_use.id,
1197 name: tool_use.name,
1198 input: tool_use.input,
1199 })
1200 .collect(),
1201 tool_results: this
1202 .tool_results_for_message(message.id)
1203 .into_iter()
1204 .map(|tool_result| SerializedToolResult {
1205 tool_use_id: tool_result.tool_use_id.clone(),
1206 is_error: tool_result.is_error,
1207 content: tool_result.content.clone(),
1208 output: tool_result.output.clone(),
1209 })
1210 .collect(),
1211 context: message.loaded_context.text.clone(),
1212 creases: message
1213 .creases
1214 .iter()
1215 .map(|crease| SerializedCrease {
1216 start: crease.range.start,
1217 end: crease.range.end,
1218 icon_path: crease.icon_path.clone(),
1219 label: crease.label.clone(),
1220 })
1221 .collect(),
1222 is_hidden: message.is_hidden,
1223 })
1224 .collect(),
1225 initial_project_snapshot,
1226 cumulative_token_usage: this.cumulative_token_usage,
1227 request_token_usage: this.request_token_usage.clone(),
1228 detailed_summary_state: this.detailed_summary_rx.borrow().clone(),
1229 exceeded_window_error: this.exceeded_window_error.clone(),
1230 model: this
1231 .configured_model
1232 .as_ref()
1233 .map(|model| SerializedLanguageModel {
1234 provider: model.provider.id().0.to_string(),
1235 model: model.model.id().0.to_string(),
1236 }),
1237 completion_mode: Some(this.completion_mode),
1238 tool_use_limit_reached: this.tool_use_limit_reached,
1239 profile: Some(this.profile.id().clone()),
1240 })
1241 })
1242 }
1243
1244 pub fn remaining_turns(&self) -> u32 {
1245 self.remaining_turns
1246 }
1247
1248 pub fn set_remaining_turns(&mut self, remaining_turns: u32) {
1249 self.remaining_turns = remaining_turns;
1250 }
1251
1252 pub fn send_to_model(
1253 &mut self,
1254 model: Arc<dyn LanguageModel>,
1255 intent: CompletionIntent,
1256 window: Option<AnyWindowHandle>,
1257 cx: &mut Context<Self>,
1258 ) {
1259 if self.remaining_turns == 0 {
1260 return;
1261 }
1262
1263 self.remaining_turns -= 1;
1264
1265 self.flush_notifications(model.clone(), intent, cx);
1266
1267 let request = self.to_completion_request(model.clone(), intent, cx);
1268
1269 self.stream_completion(request, model, intent, window, cx);
1270 }
1271
1272 pub fn used_tools_since_last_user_message(&self) -> bool {
1273 for message in self.messages.iter().rev() {
1274 if self.tool_use.message_has_tool_results(message.id) {
1275 return true;
1276 } else if message.role == Role::User {
1277 return false;
1278 }
1279 }
1280
1281 false
1282 }
1283
1284 pub fn to_completion_request(
1285 &self,
1286 model: Arc<dyn LanguageModel>,
1287 intent: CompletionIntent,
1288 cx: &mut Context<Self>,
1289 ) -> LanguageModelRequest {
1290 let mut request = LanguageModelRequest {
1291 thread_id: Some(self.id.to_string()),
1292 prompt_id: Some(self.last_prompt_id.to_string()),
1293 intent: Some(intent),
1294 mode: None,
1295 messages: vec![],
1296 tools: Vec::new(),
1297 tool_choice: None,
1298 stop: Vec::new(),
1299 temperature: AgentSettings::temperature_for_model(&model, cx),
1300 thinking_allowed: true,
1301 };
1302
1303 let available_tools = self.available_tools(cx, model.clone());
1304 let available_tool_names = available_tools
1305 .iter()
1306 .map(|tool| tool.name.clone())
1307 .collect();
1308
1309 let model_context = &ModelContext {
1310 available_tools: available_tool_names,
1311 };
1312
1313 if let Some(project_context) = self.project_context.borrow().as_ref() {
1314 match self
1315 .prompt_builder
1316 .generate_assistant_system_prompt(project_context, model_context)
1317 {
1318 Err(err) => {
1319 let message = format!("{err:?}").into();
1320 log::error!("{message}");
1321 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1322 header: "Error generating system prompt".into(),
1323 message,
1324 }));
1325 }
1326 Ok(system_prompt) => {
1327 request.messages.push(LanguageModelRequestMessage {
1328 role: Role::System,
1329 content: vec![MessageContent::Text(system_prompt)],
1330 cache: true,
1331 });
1332 }
1333 }
1334 } else {
1335 let message = "Context for system prompt unexpectedly not ready.".into();
1336 log::error!("{message}");
1337 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1338 header: "Error generating system prompt".into(),
1339 message,
1340 }));
1341 }
1342
1343 let mut message_ix_to_cache = None;
1344 for message in &self.messages {
1345 // ui_only messages are for the UI only, not for the model
1346 if message.ui_only {
1347 continue;
1348 }
1349
1350 let mut request_message = LanguageModelRequestMessage {
1351 role: message.role,
1352 content: Vec::new(),
1353 cache: false,
1354 };
1355
1356 message
1357 .loaded_context
1358 .add_to_request_message(&mut request_message);
1359
1360 for segment in &message.segments {
1361 match segment {
1362 MessageSegment::Text(text) => {
1363 let text = text.trim_end();
1364 if !text.is_empty() {
1365 request_message
1366 .content
1367 .push(MessageContent::Text(text.into()));
1368 }
1369 }
1370 MessageSegment::Thinking { text, signature } => {
1371 if !text.is_empty() {
1372 request_message.content.push(MessageContent::Thinking {
1373 text: text.into(),
1374 signature: signature.clone(),
1375 });
1376 }
1377 }
1378 MessageSegment::RedactedThinking(data) => {
1379 request_message
1380 .content
1381 .push(MessageContent::RedactedThinking(data.clone()));
1382 }
1383 };
1384 }
1385
1386 let mut cache_message = true;
1387 let mut tool_results_message = LanguageModelRequestMessage {
1388 role: Role::User,
1389 content: Vec::new(),
1390 cache: false,
1391 };
1392 for (tool_use, tool_result) in self.tool_use.tool_results(message.id) {
1393 if let Some(tool_result) = tool_result {
1394 request_message
1395 .content
1396 .push(MessageContent::ToolUse(tool_use.clone()));
1397 tool_results_message
1398 .content
1399 .push(MessageContent::ToolResult(LanguageModelToolResult {
1400 tool_use_id: tool_use.id.clone(),
1401 tool_name: tool_result.tool_name.clone(),
1402 is_error: tool_result.is_error,
1403 content: if tool_result.content.is_empty() {
1404 // Surprisingly, the API fails if we return an empty string here.
1405 // It thinks we are sending a tool use without a tool result.
1406 "<Tool returned an empty string>".into()
1407 } else {
1408 tool_result.content.clone()
1409 },
1410 output: None,
1411 }));
1412 } else {
1413 cache_message = false;
1414 log::debug!(
1415 "skipped tool use {:?} because it is still pending",
1416 tool_use
1417 );
1418 }
1419 }
1420
1421 if cache_message {
1422 message_ix_to_cache = Some(request.messages.len());
1423 }
1424 request.messages.push(request_message);
1425
1426 if !tool_results_message.content.is_empty() {
1427 if cache_message {
1428 message_ix_to_cache = Some(request.messages.len());
1429 }
1430 request.messages.push(tool_results_message);
1431 }
1432 }
1433
1434 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
1435 if let Some(message_ix_to_cache) = message_ix_to_cache {
1436 request.messages[message_ix_to_cache].cache = true;
1437 }
1438
1439 request.tools = available_tools;
1440 request.mode = if model.supports_burn_mode() {
1441 Some(self.completion_mode.into())
1442 } else {
1443 Some(CompletionMode::Normal.into())
1444 };
1445
1446 request
1447 }
1448
1449 fn to_summarize_request(
1450 &self,
1451 model: &Arc<dyn LanguageModel>,
1452 intent: CompletionIntent,
1453 added_user_message: String,
1454 cx: &App,
1455 ) -> LanguageModelRequest {
1456 let mut request = LanguageModelRequest {
1457 thread_id: None,
1458 prompt_id: None,
1459 intent: Some(intent),
1460 mode: None,
1461 messages: vec![],
1462 tools: Vec::new(),
1463 tool_choice: None,
1464 stop: Vec::new(),
1465 temperature: AgentSettings::temperature_for_model(model, cx),
1466 thinking_allowed: false,
1467 };
1468
1469 for message in &self.messages {
1470 let mut request_message = LanguageModelRequestMessage {
1471 role: message.role,
1472 content: Vec::new(),
1473 cache: false,
1474 };
1475
1476 for segment in &message.segments {
1477 match segment {
1478 MessageSegment::Text(text) => request_message
1479 .content
1480 .push(MessageContent::Text(text.clone())),
1481 MessageSegment::Thinking { .. } => {}
1482 MessageSegment::RedactedThinking(_) => {}
1483 }
1484 }
1485
1486 if request_message.content.is_empty() {
1487 continue;
1488 }
1489
1490 request.messages.push(request_message);
1491 }
1492
1493 request.messages.push(LanguageModelRequestMessage {
1494 role: Role::User,
1495 content: vec![MessageContent::Text(added_user_message)],
1496 cache: false,
1497 });
1498
1499 request
1500 }
1501
1502 /// Insert auto-generated notifications (if any) to the thread
1503 fn flush_notifications(
1504 &mut self,
1505 model: Arc<dyn LanguageModel>,
1506 intent: CompletionIntent,
1507 cx: &mut Context<Self>,
1508 ) {
1509 match intent {
1510 CompletionIntent::UserPrompt | CompletionIntent::ToolResults => {
1511 if let Some(pending_tool_use) = self.attach_tracked_files_state(model, cx) {
1512 cx.emit(ThreadEvent::ToolFinished {
1513 tool_use_id: pending_tool_use.id.clone(),
1514 pending_tool_use: Some(pending_tool_use),
1515 });
1516 }
1517 }
1518 CompletionIntent::ThreadSummarization
1519 | CompletionIntent::ThreadContextSummarization
1520 | CompletionIntent::CreateFile
1521 | CompletionIntent::EditFile
1522 | CompletionIntent::InlineAssist
1523 | CompletionIntent::TerminalInlineAssist
1524 | CompletionIntent::GenerateGitCommitMessage => {}
1525 };
1526 }
1527
1528 fn attach_tracked_files_state(
1529 &mut self,
1530 model: Arc<dyn LanguageModel>,
1531 cx: &mut App,
1532 ) -> Option<PendingToolUse> {
1533 let action_log = self.action_log.read(cx);
1534
1535 if !action_log.has_unnotified_user_edits() {
1536 return None;
1537 }
1538
1539 // Represent notification as a simulated `project_notifications` tool call
1540 let tool_name = Arc::from("project_notifications");
1541 let Some(tool) = self.tools.read(cx).tool(&tool_name, cx) else {
1542 debug_panic!("`project_notifications` tool not found");
1543 return None;
1544 };
1545
1546 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
1547 return None;
1548 }
1549
1550 let input = serde_json::json!({});
1551 let request = Arc::new(LanguageModelRequest::default()); // unused
1552 let window = None;
1553 let tool_result = tool.run(
1554 input,
1555 request,
1556 self.project.clone(),
1557 self.action_log.clone(),
1558 model.clone(),
1559 window,
1560 cx,
1561 );
1562
1563 let tool_use_id =
1564 LanguageModelToolUseId::from(format!("project_notifications_{}", self.messages.len()));
1565
1566 let tool_use = LanguageModelToolUse {
1567 id: tool_use_id.clone(),
1568 name: tool_name.clone(),
1569 raw_input: "{}".to_string(),
1570 input: serde_json::json!({}),
1571 is_input_complete: true,
1572 };
1573
1574 let tool_output = cx.background_executor().block(tool_result.output);
1575
1576 // Attach a project_notification tool call to the latest existing
1577 // Assistant message. We cannot create a new Assistant message
1578 // because thinking models require a `thinking` block that we
1579 // cannot mock. We cannot send a notification as a normal
1580 // (non-tool-use) User message because this distracts Agent
1581 // too much.
1582 let tool_message_id = self
1583 .messages
1584 .iter()
1585 .enumerate()
1586 .rfind(|(_, message)| message.role == Role::Assistant)
1587 .map(|(_, message)| message.id)?;
1588
1589 let tool_use_metadata = ToolUseMetadata {
1590 model: model.clone(),
1591 thread_id: self.id.clone(),
1592 prompt_id: self.last_prompt_id.clone(),
1593 };
1594
1595 self.tool_use
1596 .request_tool_use(tool_message_id, tool_use, tool_use_metadata.clone(), cx);
1597
1598 let pending_tool_use = self.tool_use.insert_tool_output(
1599 tool_use_id.clone(),
1600 tool_name,
1601 tool_output,
1602 self.configured_model.as_ref(),
1603 self.completion_mode,
1604 );
1605
1606 pending_tool_use
1607 }
1608
1609 pub fn stream_completion(
1610 &mut self,
1611 request: LanguageModelRequest,
1612 model: Arc<dyn LanguageModel>,
1613 intent: CompletionIntent,
1614 window: Option<AnyWindowHandle>,
1615 cx: &mut Context<Self>,
1616 ) {
1617 self.tool_use_limit_reached = false;
1618
1619 let pending_completion_id = post_inc(&mut self.completion_count);
1620 let mut request_callback_parameters = if self.request_callback.is_some() {
1621 Some((request.clone(), Vec::new()))
1622 } else {
1623 None
1624 };
1625 let prompt_id = self.last_prompt_id.clone();
1626 let tool_use_metadata = ToolUseMetadata {
1627 model: model.clone(),
1628 thread_id: self.id.clone(),
1629 prompt_id: prompt_id.clone(),
1630 };
1631
1632 let completion_mode = request
1633 .mode
1634 .unwrap_or(zed_llm_client::CompletionMode::Normal);
1635
1636 self.last_received_chunk_at = Some(Instant::now());
1637
1638 let task = cx.spawn(async move |thread, cx| {
1639 let stream_completion_future = model.stream_completion(request, &cx);
1640 let initial_token_usage =
1641 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage);
1642 let stream_completion = async {
1643 let mut events = stream_completion_future.await?;
1644
1645 let mut stop_reason = StopReason::EndTurn;
1646 let mut current_token_usage = TokenUsage::default();
1647
1648 thread
1649 .update(cx, |_thread, cx| {
1650 cx.emit(ThreadEvent::NewRequest);
1651 })
1652 .ok();
1653
1654 let mut request_assistant_message_id = None;
1655
1656 while let Some(event) = events.next().await {
1657 if let Some((_, response_events)) = request_callback_parameters.as_mut() {
1658 response_events
1659 .push(event.as_ref().map_err(|error| error.to_string()).cloned());
1660 }
1661
1662 thread.update(cx, |thread, cx| {
1663 match event? {
1664 LanguageModelCompletionEvent::StartMessage { .. } => {
1665 request_assistant_message_id =
1666 Some(thread.insert_assistant_message(
1667 vec![MessageSegment::Text(String::new())],
1668 cx,
1669 ));
1670 }
1671 LanguageModelCompletionEvent::Stop(reason) => {
1672 stop_reason = reason;
1673 }
1674 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1675 thread.update_token_usage_at_last_message(token_usage);
1676 thread.cumulative_token_usage = thread.cumulative_token_usage
1677 + token_usage
1678 - current_token_usage;
1679 current_token_usage = token_usage;
1680 }
1681 LanguageModelCompletionEvent::Text(chunk) => {
1682 thread.received_chunk();
1683
1684 cx.emit(ThreadEvent::ReceivedTextChunk);
1685 if let Some(last_message) = thread.messages.last_mut() {
1686 if last_message.role == Role::Assistant
1687 && !thread.tool_use.has_tool_results(last_message.id)
1688 {
1689 last_message.push_text(&chunk);
1690 cx.emit(ThreadEvent::StreamedAssistantText(
1691 last_message.id,
1692 chunk,
1693 ));
1694 } else {
1695 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1696 // of a new Assistant response.
1697 //
1698 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1699 // will result in duplicating the text of the chunk in the rendered Markdown.
1700 request_assistant_message_id =
1701 Some(thread.insert_assistant_message(
1702 vec![MessageSegment::Text(chunk.to_string())],
1703 cx,
1704 ));
1705 };
1706 }
1707 }
1708 LanguageModelCompletionEvent::Thinking {
1709 text: chunk,
1710 signature,
1711 } => {
1712 thread.received_chunk();
1713
1714 if let Some(last_message) = thread.messages.last_mut() {
1715 if last_message.role == Role::Assistant
1716 && !thread.tool_use.has_tool_results(last_message.id)
1717 {
1718 last_message.push_thinking(&chunk, signature);
1719 cx.emit(ThreadEvent::StreamedAssistantThinking(
1720 last_message.id,
1721 chunk,
1722 ));
1723 } else {
1724 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1725 // of a new Assistant response.
1726 //
1727 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1728 // will result in duplicating the text of the chunk in the rendered Markdown.
1729 request_assistant_message_id =
1730 Some(thread.insert_assistant_message(
1731 vec![MessageSegment::Thinking {
1732 text: chunk.to_string(),
1733 signature,
1734 }],
1735 cx,
1736 ));
1737 };
1738 }
1739 }
1740 LanguageModelCompletionEvent::RedactedThinking { data } => {
1741 thread.received_chunk();
1742
1743 if let Some(last_message) = thread.messages.last_mut() {
1744 if last_message.role == Role::Assistant
1745 && !thread.tool_use.has_tool_results(last_message.id)
1746 {
1747 last_message.push_redacted_thinking(data);
1748 } else {
1749 request_assistant_message_id =
1750 Some(thread.insert_assistant_message(
1751 vec![MessageSegment::RedactedThinking(data)],
1752 cx,
1753 ));
1754 };
1755 }
1756 }
1757 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1758 let last_assistant_message_id = request_assistant_message_id
1759 .unwrap_or_else(|| {
1760 let new_assistant_message_id =
1761 thread.insert_assistant_message(vec![], cx);
1762 request_assistant_message_id =
1763 Some(new_assistant_message_id);
1764 new_assistant_message_id
1765 });
1766
1767 let tool_use_id = tool_use.id.clone();
1768 let streamed_input = if tool_use.is_input_complete {
1769 None
1770 } else {
1771 Some((&tool_use.input).clone())
1772 };
1773
1774 let ui_text = thread.tool_use.request_tool_use(
1775 last_assistant_message_id,
1776 tool_use,
1777 tool_use_metadata.clone(),
1778 cx,
1779 );
1780
1781 if let Some(input) = streamed_input {
1782 cx.emit(ThreadEvent::StreamedToolUse {
1783 tool_use_id,
1784 ui_text,
1785 input,
1786 });
1787 }
1788 }
1789 LanguageModelCompletionEvent::ToolUseJsonParseError {
1790 id,
1791 tool_name,
1792 raw_input: invalid_input_json,
1793 json_parse_error,
1794 } => {
1795 thread.receive_invalid_tool_json(
1796 id,
1797 tool_name,
1798 invalid_input_json,
1799 json_parse_error,
1800 window,
1801 cx,
1802 );
1803 }
1804 LanguageModelCompletionEvent::StatusUpdate(status_update) => {
1805 if let Some(completion) = thread
1806 .pending_completions
1807 .iter_mut()
1808 .find(|completion| completion.id == pending_completion_id)
1809 {
1810 match status_update {
1811 CompletionRequestStatus::Queued { position } => {
1812 completion.queue_state =
1813 QueueState::Queued { position };
1814 }
1815 CompletionRequestStatus::Started => {
1816 completion.queue_state = QueueState::Started;
1817 }
1818 CompletionRequestStatus::Failed {
1819 code,
1820 message,
1821 request_id: _,
1822 retry_after,
1823 } => {
1824 return Err(
1825 LanguageModelCompletionError::from_cloud_failure(
1826 model.upstream_provider_name(),
1827 code,
1828 message,
1829 retry_after.map(Duration::from_secs_f64),
1830 ),
1831 );
1832 }
1833 CompletionRequestStatus::UsageUpdated { amount, limit } => {
1834 thread.update_model_request_usage(
1835 amount as u32,
1836 limit,
1837 cx,
1838 );
1839 }
1840 CompletionRequestStatus::ToolUseLimitReached => {
1841 thread.tool_use_limit_reached = true;
1842 cx.emit(ThreadEvent::ToolUseLimitReached);
1843 }
1844 }
1845 }
1846 }
1847 }
1848
1849 thread.touch_updated_at();
1850 cx.emit(ThreadEvent::StreamedCompletion);
1851 cx.notify();
1852
1853 thread.auto_capture_telemetry(cx);
1854 Ok(())
1855 })??;
1856
1857 smol::future::yield_now().await;
1858 }
1859
1860 thread.update(cx, |thread, cx| {
1861 thread.last_received_chunk_at = None;
1862 thread
1863 .pending_completions
1864 .retain(|completion| completion.id != pending_completion_id);
1865
1866 // If there is a response without tool use, summarize the message. Otherwise,
1867 // allow two tool uses before summarizing.
1868 if matches!(thread.summary, ThreadSummary::Pending)
1869 && thread.messages.len() >= 2
1870 && (!thread.has_pending_tool_uses() || thread.messages.len() >= 6)
1871 {
1872 thread.summarize(cx);
1873 }
1874 })?;
1875
1876 anyhow::Ok(stop_reason)
1877 };
1878
1879 let result = stream_completion.await;
1880 let mut retry_scheduled = false;
1881
1882 thread
1883 .update(cx, |thread, cx| {
1884 thread.finalize_pending_checkpoint(cx);
1885 match result.as_ref() {
1886 Ok(stop_reason) => {
1887 match stop_reason {
1888 StopReason::ToolUse => {
1889 let tool_uses =
1890 thread.use_pending_tools(window, model.clone(), cx);
1891 cx.emit(ThreadEvent::UsePendingTools { tool_uses });
1892 }
1893 StopReason::EndTurn | StopReason::MaxTokens => {
1894 thread.project.update(cx, |project, cx| {
1895 project.set_agent_location(None, cx);
1896 });
1897 }
1898 StopReason::Refusal => {
1899 thread.project.update(cx, |project, cx| {
1900 project.set_agent_location(None, cx);
1901 });
1902
1903 // Remove the turn that was refused.
1904 //
1905 // https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/handle-streaming-refusals#reset-context-after-refusal
1906 {
1907 let mut messages_to_remove = Vec::new();
1908
1909 for (ix, message) in
1910 thread.messages.iter().enumerate().rev()
1911 {
1912 messages_to_remove.push(message.id);
1913
1914 if message.role == Role::User {
1915 if ix == 0 {
1916 break;
1917 }
1918
1919 if let Some(prev_message) =
1920 thread.messages.get(ix - 1)
1921 {
1922 if prev_message.role == Role::Assistant {
1923 break;
1924 }
1925 }
1926 }
1927 }
1928
1929 for message_id in messages_to_remove {
1930 thread.delete_message(message_id, cx);
1931 }
1932 }
1933
1934 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1935 header: "Language model refusal".into(),
1936 message:
1937 "Model refused to generate content for safety reasons."
1938 .into(),
1939 }));
1940 }
1941 }
1942
1943 // We successfully completed, so cancel any remaining retries.
1944 thread.retry_state = None;
1945 }
1946 Err(error) => {
1947 thread.project.update(cx, |project, cx| {
1948 project.set_agent_location(None, cx);
1949 });
1950
1951 if error.is::<PaymentRequiredError>() {
1952 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1953 } else if let Some(error) =
1954 error.downcast_ref::<ModelRequestLimitReachedError>()
1955 {
1956 cx.emit(ThreadEvent::ShowError(
1957 ThreadError::ModelRequestLimitReached { plan: error.plan },
1958 ));
1959 } else if let Some(completion_error) =
1960 error.downcast_ref::<LanguageModelCompletionError>()
1961 {
1962 match &completion_error {
1963 LanguageModelCompletionError::PromptTooLarge {
1964 tokens, ..
1965 } => {
1966 let tokens = tokens.unwrap_or_else(|| {
1967 // We didn't get an exact token count from the API, so fall back on our estimate.
1968 thread
1969 .total_token_usage()
1970 .map(|usage| usage.total)
1971 .unwrap_or(0)
1972 // We know the context window was exceeded in practice, so if our estimate was
1973 // lower than max tokens, the estimate was wrong; return that we exceeded by 1.
1974 .max(
1975 model
1976 .max_token_count_for_mode(completion_mode)
1977 .saturating_add(1),
1978 )
1979 });
1980 thread.exceeded_window_error = Some(ExceededWindowError {
1981 model_id: model.id(),
1982 token_count: tokens,
1983 });
1984 cx.notify();
1985 }
1986 _ => {
1987 if let Some(retry_strategy) =
1988 Thread::get_retry_strategy(completion_error)
1989 {
1990 retry_scheduled = thread
1991 .handle_retryable_error_with_delay(
1992 &completion_error,
1993 Some(retry_strategy),
1994 model.clone(),
1995 intent,
1996 window,
1997 cx,
1998 );
1999 }
2000 }
2001 }
2002 }
2003
2004 if !retry_scheduled {
2005 thread.cancel_last_completion(window, cx);
2006 }
2007 }
2008 }
2009
2010 if !retry_scheduled {
2011 cx.emit(ThreadEvent::Stopped(result.map_err(Arc::new)));
2012 }
2013
2014 if let Some((request_callback, (request, response_events))) = thread
2015 .request_callback
2016 .as_mut()
2017 .zip(request_callback_parameters.as_ref())
2018 {
2019 request_callback(request, response_events);
2020 }
2021
2022 thread.auto_capture_telemetry(cx);
2023
2024 if let Ok(initial_usage) = initial_token_usage {
2025 let usage = thread.cumulative_token_usage - initial_usage;
2026
2027 telemetry::event!(
2028 "Assistant Thread Completion",
2029 thread_id = thread.id().to_string(),
2030 prompt_id = prompt_id,
2031 model = model.telemetry_id(),
2032 model_provider = model.provider_id().to_string(),
2033 input_tokens = usage.input_tokens,
2034 output_tokens = usage.output_tokens,
2035 cache_creation_input_tokens = usage.cache_creation_input_tokens,
2036 cache_read_input_tokens = usage.cache_read_input_tokens,
2037 );
2038 }
2039 })
2040 .ok();
2041 });
2042
2043 self.pending_completions.push(PendingCompletion {
2044 id: pending_completion_id,
2045 queue_state: QueueState::Sending,
2046 _task: task,
2047 });
2048 }
2049
2050 pub fn summarize(&mut self, cx: &mut Context<Self>) {
2051 let Some(model) = LanguageModelRegistry::read_global(cx).thread_summary_model() else {
2052 println!("No thread summary model");
2053 return;
2054 };
2055
2056 if !model.provider.is_authenticated(cx) {
2057 return;
2058 }
2059
2060 let added_user_message = include_str!("./prompts/summarize_thread_prompt.txt");
2061
2062 let request = self.to_summarize_request(
2063 &model.model,
2064 CompletionIntent::ThreadSummarization,
2065 added_user_message.into(),
2066 cx,
2067 );
2068
2069 self.summary = ThreadSummary::Generating;
2070
2071 self.pending_summary = cx.spawn(async move |this, cx| {
2072 let result = async {
2073 let mut messages = model.model.stream_completion(request, &cx).await?;
2074
2075 let mut new_summary = String::new();
2076 while let Some(event) = messages.next().await {
2077 let Ok(event) = event else {
2078 continue;
2079 };
2080 let text = match event {
2081 LanguageModelCompletionEvent::Text(text) => text,
2082 LanguageModelCompletionEvent::StatusUpdate(
2083 CompletionRequestStatus::UsageUpdated { amount, limit },
2084 ) => {
2085 this.update(cx, |thread, cx| {
2086 thread.update_model_request_usage(amount as u32, limit, cx);
2087 })?;
2088 continue;
2089 }
2090 _ => continue,
2091 };
2092
2093 let mut lines = text.lines();
2094 new_summary.extend(lines.next());
2095
2096 // Stop if the LLM generated multiple lines.
2097 if lines.next().is_some() {
2098 break;
2099 }
2100 }
2101
2102 anyhow::Ok(new_summary)
2103 }
2104 .await;
2105
2106 this.update(cx, |this, cx| {
2107 match result {
2108 Ok(new_summary) => {
2109 if new_summary.is_empty() {
2110 this.summary = ThreadSummary::Error;
2111 } else {
2112 this.summary = ThreadSummary::Ready(new_summary.into());
2113 }
2114 }
2115 Err(err) => {
2116 this.summary = ThreadSummary::Error;
2117 log::error!("Failed to generate thread summary: {}", err);
2118 }
2119 }
2120 cx.emit(ThreadEvent::SummaryGenerated);
2121 })
2122 .log_err()?;
2123
2124 Some(())
2125 });
2126 }
2127
2128 fn get_retry_strategy(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2129 use LanguageModelCompletionError::*;
2130
2131 // General strategy here:
2132 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2133 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), try multiple times with exponential backoff.
2134 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), just retry once.
2135 match error {
2136 HttpResponseError {
2137 status_code: StatusCode::TOO_MANY_REQUESTS,
2138 ..
2139 } => Some(RetryStrategy::ExponentialBackoff {
2140 initial_delay: BASE_RETRY_DELAY,
2141 max_attempts: MAX_RETRY_ATTEMPTS,
2142 }),
2143 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2144 Some(RetryStrategy::Fixed {
2145 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2146 max_attempts: MAX_RETRY_ATTEMPTS,
2147 })
2148 }
2149 UpstreamProviderError {
2150 status,
2151 retry_after,
2152 ..
2153 } => match *status {
2154 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2155 Some(RetryStrategy::Fixed {
2156 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2157 max_attempts: MAX_RETRY_ATTEMPTS,
2158 })
2159 }
2160 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2161 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2162 // Internal Server Error could be anything, so only retry once.
2163 max_attempts: 1,
2164 }),
2165 status => {
2166 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2167 // but we frequently get them in practice. See https://http.dev/529
2168 if status.as_u16() == 529 {
2169 Some(RetryStrategy::Fixed {
2170 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2171 max_attempts: MAX_RETRY_ATTEMPTS,
2172 })
2173 } else {
2174 None
2175 }
2176 }
2177 },
2178 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2179 delay: BASE_RETRY_DELAY,
2180 max_attempts: 1,
2181 }),
2182 ApiReadResponseError { .. }
2183 | HttpSend { .. }
2184 | DeserializeResponse { .. }
2185 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2186 delay: BASE_RETRY_DELAY,
2187 max_attempts: 1,
2188 }),
2189 // Retrying these errors definitely shouldn't help.
2190 HttpResponseError {
2191 status_code:
2192 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2193 ..
2194 }
2195 | SerializeRequest { .. }
2196 | BuildRequestBody { .. }
2197 | PromptTooLarge { .. }
2198 | AuthenticationError { .. }
2199 | PermissionError { .. }
2200 | ApiEndpointNotFound { .. }
2201 | NoApiKey { .. } => None,
2202 // Retry all other 4xx and 5xx errors once.
2203 HttpResponseError { status_code, .. }
2204 if status_code.is_client_error() || status_code.is_server_error() =>
2205 {
2206 Some(RetryStrategy::Fixed {
2207 delay: BASE_RETRY_DELAY,
2208 max_attempts: 1,
2209 })
2210 }
2211 // Conservatively assume that any other errors are non-retryable
2212 HttpResponseError { .. } | Other(..) => None,
2213 }
2214 }
2215
2216 fn handle_retryable_error_with_delay(
2217 &mut self,
2218 error: &LanguageModelCompletionError,
2219 strategy: Option<RetryStrategy>,
2220 model: Arc<dyn LanguageModel>,
2221 intent: CompletionIntent,
2222 window: Option<AnyWindowHandle>,
2223 cx: &mut Context<Self>,
2224 ) -> bool {
2225 let Some(strategy) = strategy.or_else(|| Self::get_retry_strategy(error)) else {
2226 return false;
2227 };
2228
2229 let max_attempts = match &strategy {
2230 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
2231 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
2232 };
2233
2234 let retry_state = self.retry_state.get_or_insert(RetryState {
2235 attempt: 0,
2236 max_attempts,
2237 intent,
2238 });
2239
2240 retry_state.attempt += 1;
2241 let attempt = retry_state.attempt;
2242 let max_attempts = retry_state.max_attempts;
2243 let intent = retry_state.intent;
2244
2245 if attempt <= max_attempts {
2246 let delay = match &strategy {
2247 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
2248 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
2249 Duration::from_secs(delay_secs)
2250 }
2251 RetryStrategy::Fixed { delay, .. } => *delay,
2252 };
2253
2254 // Add a transient message to inform the user
2255 let delay_secs = delay.as_secs();
2256 let retry_message = if max_attempts == 1 {
2257 format!("{error}. Retrying in {delay_secs} seconds...")
2258 } else {
2259 format!(
2260 "{error}. Retrying (attempt {attempt} of {max_attempts}) \
2261 in {delay_secs} seconds..."
2262 )
2263 };
2264 log::warn!(
2265 "Retrying completion request (attempt {attempt} of {max_attempts}) \
2266 in {delay_secs} seconds: {error:?}",
2267 );
2268
2269 // Add a UI-only message instead of a regular message
2270 let id = self.next_message_id.post_inc();
2271 self.messages.push(Message {
2272 id,
2273 role: Role::System,
2274 segments: vec![MessageSegment::Text(retry_message)],
2275 loaded_context: LoadedContext::default(),
2276 creases: Vec::new(),
2277 is_hidden: false,
2278 ui_only: true,
2279 });
2280 cx.emit(ThreadEvent::MessageAdded(id));
2281
2282 // Schedule the retry
2283 let thread_handle = cx.entity().downgrade();
2284
2285 cx.spawn(async move |_thread, cx| {
2286 cx.background_executor().timer(delay).await;
2287
2288 thread_handle
2289 .update(cx, |thread, cx| {
2290 // Retry the completion
2291 thread.send_to_model(model, intent, window, cx);
2292 })
2293 .log_err();
2294 })
2295 .detach();
2296
2297 true
2298 } else {
2299 // Max retries exceeded
2300 self.retry_state = None;
2301
2302 // Stop generating since we're giving up on retrying.
2303 self.pending_completions.clear();
2304
2305 false
2306 }
2307 }
2308
2309 pub fn start_generating_detailed_summary_if_needed(
2310 &mut self,
2311 thread_store: WeakEntity<ThreadStore>,
2312 cx: &mut Context<Self>,
2313 ) {
2314 let Some(last_message_id) = self.messages.last().map(|message| message.id) else {
2315 return;
2316 };
2317
2318 match &*self.detailed_summary_rx.borrow() {
2319 DetailedSummaryState::Generating { message_id, .. }
2320 | DetailedSummaryState::Generated { message_id, .. }
2321 if *message_id == last_message_id =>
2322 {
2323 // Already up-to-date
2324 return;
2325 }
2326 _ => {}
2327 }
2328
2329 let Some(ConfiguredModel { model, provider }) =
2330 LanguageModelRegistry::read_global(cx).thread_summary_model()
2331 else {
2332 return;
2333 };
2334
2335 if !provider.is_authenticated(cx) {
2336 return;
2337 }
2338
2339 let added_user_message = include_str!("./prompts/summarize_thread_detailed_prompt.txt");
2340
2341 let request = self.to_summarize_request(
2342 &model,
2343 CompletionIntent::ThreadContextSummarization,
2344 added_user_message.into(),
2345 cx,
2346 );
2347
2348 *self.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generating {
2349 message_id: last_message_id,
2350 };
2351
2352 // Replace the detailed summarization task if there is one, cancelling it. It would probably
2353 // be better to allow the old task to complete, but this would require logic for choosing
2354 // which result to prefer (the old task could complete after the new one, resulting in a
2355 // stale summary).
2356 self.detailed_summary_task = cx.spawn(async move |thread, cx| {
2357 let stream = model.stream_completion_text(request, &cx);
2358 let Some(mut messages) = stream.await.log_err() else {
2359 thread
2360 .update(cx, |thread, _cx| {
2361 *thread.detailed_summary_tx.borrow_mut() =
2362 DetailedSummaryState::NotGenerated;
2363 })
2364 .ok()?;
2365 return None;
2366 };
2367
2368 let mut new_detailed_summary = String::new();
2369
2370 while let Some(chunk) = messages.stream.next().await {
2371 if let Some(chunk) = chunk.log_err() {
2372 new_detailed_summary.push_str(&chunk);
2373 }
2374 }
2375
2376 thread
2377 .update(cx, |thread, _cx| {
2378 *thread.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generated {
2379 text: new_detailed_summary.into(),
2380 message_id: last_message_id,
2381 };
2382 })
2383 .ok()?;
2384
2385 // Save thread so its summary can be reused later
2386 if let Some(thread) = thread.upgrade() {
2387 if let Ok(Ok(save_task)) = cx.update(|cx| {
2388 thread_store
2389 .update(cx, |thread_store, cx| thread_store.save_thread(&thread, cx))
2390 }) {
2391 save_task.await.log_err();
2392 }
2393 }
2394
2395 Some(())
2396 });
2397 }
2398
2399 pub async fn wait_for_detailed_summary_or_text(
2400 this: &Entity<Self>,
2401 cx: &mut AsyncApp,
2402 ) -> Option<SharedString> {
2403 let mut detailed_summary_rx = this
2404 .read_with(cx, |this, _cx| this.detailed_summary_rx.clone())
2405 .ok()?;
2406 loop {
2407 match detailed_summary_rx.recv().await? {
2408 DetailedSummaryState::Generating { .. } => {}
2409 DetailedSummaryState::NotGenerated => {
2410 return this.read_with(cx, |this, _cx| this.text().into()).ok();
2411 }
2412 DetailedSummaryState::Generated { text, .. } => return Some(text),
2413 }
2414 }
2415 }
2416
2417 pub fn latest_detailed_summary_or_text(&self) -> SharedString {
2418 self.detailed_summary_rx
2419 .borrow()
2420 .text()
2421 .unwrap_or_else(|| self.text().into())
2422 }
2423
2424 pub fn is_generating_detailed_summary(&self) -> bool {
2425 matches!(
2426 &*self.detailed_summary_rx.borrow(),
2427 DetailedSummaryState::Generating { .. }
2428 )
2429 }
2430
2431 pub fn use_pending_tools(
2432 &mut self,
2433 window: Option<AnyWindowHandle>,
2434 model: Arc<dyn LanguageModel>,
2435 cx: &mut Context<Self>,
2436 ) -> Vec<PendingToolUse> {
2437 self.auto_capture_telemetry(cx);
2438 let request =
2439 Arc::new(self.to_completion_request(model.clone(), CompletionIntent::ToolResults, cx));
2440 let pending_tool_uses = self
2441 .tool_use
2442 .pending_tool_uses()
2443 .into_iter()
2444 .filter(|tool_use| tool_use.status.is_idle())
2445 .cloned()
2446 .collect::<Vec<_>>();
2447
2448 for tool_use in pending_tool_uses.iter() {
2449 self.use_pending_tool(tool_use.clone(), request.clone(), model.clone(), window, cx);
2450 }
2451
2452 pending_tool_uses
2453 }
2454
2455 fn use_pending_tool(
2456 &mut self,
2457 tool_use: PendingToolUse,
2458 request: Arc<LanguageModelRequest>,
2459 model: Arc<dyn LanguageModel>,
2460 window: Option<AnyWindowHandle>,
2461 cx: &mut Context<Self>,
2462 ) {
2463 let Some(tool) = self.tools.read(cx).tool(&tool_use.name, cx) else {
2464 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2465 };
2466
2467 if !self.profile.is_tool_enabled(tool.source(), tool.name(), cx) {
2468 return self.handle_hallucinated_tool_use(tool_use.id, tool_use.name, window, cx);
2469 }
2470
2471 if tool.needs_confirmation(&tool_use.input, cx)
2472 && !AgentSettings::get_global(cx).always_allow_tool_actions
2473 {
2474 self.tool_use.confirm_tool_use(
2475 tool_use.id,
2476 tool_use.ui_text,
2477 tool_use.input,
2478 request,
2479 tool,
2480 );
2481 cx.emit(ThreadEvent::ToolConfirmationNeeded);
2482 } else {
2483 self.run_tool(
2484 tool_use.id,
2485 tool_use.ui_text,
2486 tool_use.input,
2487 request,
2488 tool,
2489 model,
2490 window,
2491 cx,
2492 );
2493 }
2494 }
2495
2496 pub fn handle_hallucinated_tool_use(
2497 &mut self,
2498 tool_use_id: LanguageModelToolUseId,
2499 hallucinated_tool_name: Arc<str>,
2500 window: Option<AnyWindowHandle>,
2501 cx: &mut Context<Thread>,
2502 ) {
2503 let available_tools = self.profile.enabled_tools(cx);
2504
2505 let tool_list = available_tools
2506 .iter()
2507 .map(|(name, tool)| format!("- {}: {}", name, tool.description()))
2508 .collect::<Vec<_>>()
2509 .join("\n");
2510
2511 let error_message = format!(
2512 "The tool '{}' doesn't exist or is not enabled. Available tools:\n{}",
2513 hallucinated_tool_name, tool_list
2514 );
2515
2516 let pending_tool_use = self.tool_use.insert_tool_output(
2517 tool_use_id.clone(),
2518 hallucinated_tool_name,
2519 Err(anyhow!("Missing tool call: {error_message}")),
2520 self.configured_model.as_ref(),
2521 self.completion_mode,
2522 );
2523
2524 cx.emit(ThreadEvent::MissingToolUse {
2525 tool_use_id: tool_use_id.clone(),
2526 ui_text: error_message.into(),
2527 });
2528
2529 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2530 }
2531
2532 pub fn receive_invalid_tool_json(
2533 &mut self,
2534 tool_use_id: LanguageModelToolUseId,
2535 tool_name: Arc<str>,
2536 invalid_json: Arc<str>,
2537 error: String,
2538 window: Option<AnyWindowHandle>,
2539 cx: &mut Context<Thread>,
2540 ) {
2541 log::error!("The model returned invalid input JSON: {invalid_json}");
2542
2543 let pending_tool_use = self.tool_use.insert_tool_output(
2544 tool_use_id.clone(),
2545 tool_name,
2546 Err(anyhow!("Error parsing input JSON: {error}")),
2547 self.configured_model.as_ref(),
2548 self.completion_mode,
2549 );
2550 let ui_text = if let Some(pending_tool_use) = &pending_tool_use {
2551 pending_tool_use.ui_text.clone()
2552 } else {
2553 log::error!(
2554 "There was no pending tool use for tool use {tool_use_id}, even though it finished (with invalid input JSON)."
2555 );
2556 format!("Unknown tool {}", tool_use_id).into()
2557 };
2558
2559 cx.emit(ThreadEvent::InvalidToolInput {
2560 tool_use_id: tool_use_id.clone(),
2561 ui_text,
2562 invalid_input_json: invalid_json,
2563 });
2564
2565 self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2566 }
2567
2568 pub fn run_tool(
2569 &mut self,
2570 tool_use_id: LanguageModelToolUseId,
2571 ui_text: impl Into<SharedString>,
2572 input: serde_json::Value,
2573 request: Arc<LanguageModelRequest>,
2574 tool: Arc<dyn Tool>,
2575 model: Arc<dyn LanguageModel>,
2576 window: Option<AnyWindowHandle>,
2577 cx: &mut Context<Thread>,
2578 ) {
2579 let task =
2580 self.spawn_tool_use(tool_use_id.clone(), request, input, tool, model, window, cx);
2581 self.tool_use
2582 .run_pending_tool(tool_use_id, ui_text.into(), task);
2583 }
2584
2585 fn spawn_tool_use(
2586 &mut self,
2587 tool_use_id: LanguageModelToolUseId,
2588 request: Arc<LanguageModelRequest>,
2589 input: serde_json::Value,
2590 tool: Arc<dyn Tool>,
2591 model: Arc<dyn LanguageModel>,
2592 window: Option<AnyWindowHandle>,
2593 cx: &mut Context<Thread>,
2594 ) -> Task<()> {
2595 let tool_name: Arc<str> = tool.name().into();
2596
2597 let tool_result = tool.run(
2598 input,
2599 request,
2600 self.project.clone(),
2601 self.action_log.clone(),
2602 model,
2603 window,
2604 cx,
2605 );
2606
2607 // Store the card separately if it exists
2608 if let Some(card) = tool_result.card.clone() {
2609 self.tool_use
2610 .insert_tool_result_card(tool_use_id.clone(), card);
2611 }
2612
2613 cx.spawn({
2614 async move |thread: WeakEntity<Thread>, cx| {
2615 let output = tool_result.output.await;
2616
2617 thread
2618 .update(cx, |thread, cx| {
2619 let pending_tool_use = thread.tool_use.insert_tool_output(
2620 tool_use_id.clone(),
2621 tool_name,
2622 output,
2623 thread.configured_model.as_ref(),
2624 thread.completion_mode,
2625 );
2626 thread.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2627 })
2628 .ok();
2629 }
2630 })
2631 }
2632
2633 fn tool_finished(
2634 &mut self,
2635 tool_use_id: LanguageModelToolUseId,
2636 pending_tool_use: Option<PendingToolUse>,
2637 canceled: bool,
2638 window: Option<AnyWindowHandle>,
2639 cx: &mut Context<Self>,
2640 ) {
2641 if self.all_tools_finished() {
2642 if let Some(ConfiguredModel { model, .. }) = self.configured_model.as_ref() {
2643 if !canceled {
2644 self.send_to_model(model.clone(), CompletionIntent::ToolResults, window, cx);
2645 }
2646 self.auto_capture_telemetry(cx);
2647 }
2648 }
2649
2650 cx.emit(ThreadEvent::ToolFinished {
2651 tool_use_id,
2652 pending_tool_use,
2653 });
2654 }
2655
2656 /// Cancels the last pending completion, if there are any pending.
2657 ///
2658 /// Returns whether a completion was canceled.
2659 pub fn cancel_last_completion(
2660 &mut self,
2661 window: Option<AnyWindowHandle>,
2662 cx: &mut Context<Self>,
2663 ) -> bool {
2664 let mut canceled = self.pending_completions.pop().is_some() || self.retry_state.is_some();
2665
2666 self.retry_state = None;
2667
2668 for pending_tool_use in self.tool_use.cancel_pending() {
2669 canceled = true;
2670 self.tool_finished(
2671 pending_tool_use.id.clone(),
2672 Some(pending_tool_use),
2673 true,
2674 window,
2675 cx,
2676 );
2677 }
2678
2679 if canceled {
2680 cx.emit(ThreadEvent::CompletionCanceled);
2681
2682 // When canceled, we always want to insert the checkpoint.
2683 // (We skip over finalize_pending_checkpoint, because it
2684 // would conclude we didn't have anything to insert here.)
2685 if let Some(checkpoint) = self.pending_checkpoint.take() {
2686 self.insert_checkpoint(checkpoint, cx);
2687 }
2688 } else {
2689 self.finalize_pending_checkpoint(cx);
2690 }
2691
2692 canceled
2693 }
2694
2695 /// Signals that any in-progress editing should be canceled.
2696 ///
2697 /// This method is used to notify listeners (like ActiveThread) that
2698 /// they should cancel any editing operations.
2699 pub fn cancel_editing(&mut self, cx: &mut Context<Self>) {
2700 cx.emit(ThreadEvent::CancelEditing);
2701 }
2702
2703 pub fn feedback(&self) -> Option<ThreadFeedback> {
2704 self.feedback
2705 }
2706
2707 pub fn message_feedback(&self, message_id: MessageId) -> Option<ThreadFeedback> {
2708 self.message_feedback.get(&message_id).copied()
2709 }
2710
2711 pub fn report_message_feedback(
2712 &mut self,
2713 message_id: MessageId,
2714 feedback: ThreadFeedback,
2715 cx: &mut Context<Self>,
2716 ) -> Task<Result<()>> {
2717 if self.message_feedback.get(&message_id) == Some(&feedback) {
2718 return Task::ready(Ok(()));
2719 }
2720
2721 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2722 let serialized_thread = self.serialize(cx);
2723 let thread_id = self.id().clone();
2724 let client = self.project.read(cx).client();
2725
2726 let enabled_tool_names: Vec<String> = self
2727 .profile
2728 .enabled_tools(cx)
2729 .iter()
2730 .map(|(name, _)| name.clone().into())
2731 .collect();
2732
2733 self.message_feedback.insert(message_id, feedback);
2734
2735 cx.notify();
2736
2737 let message_content = self
2738 .message(message_id)
2739 .map(|msg| msg.to_string())
2740 .unwrap_or_default();
2741
2742 cx.background_spawn(async move {
2743 let final_project_snapshot = final_project_snapshot.await;
2744 let serialized_thread = serialized_thread.await?;
2745 let thread_data =
2746 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
2747
2748 let rating = match feedback {
2749 ThreadFeedback::Positive => "positive",
2750 ThreadFeedback::Negative => "negative",
2751 };
2752 telemetry::event!(
2753 "Assistant Thread Rated",
2754 rating,
2755 thread_id,
2756 enabled_tool_names,
2757 message_id = message_id.0,
2758 message_content,
2759 thread_data,
2760 final_project_snapshot
2761 );
2762 client.telemetry().flush_events().await;
2763
2764 Ok(())
2765 })
2766 }
2767
2768 pub fn report_feedback(
2769 &mut self,
2770 feedback: ThreadFeedback,
2771 cx: &mut Context<Self>,
2772 ) -> Task<Result<()>> {
2773 let last_assistant_message_id = self
2774 .messages
2775 .iter()
2776 .rev()
2777 .find(|msg| msg.role == Role::Assistant)
2778 .map(|msg| msg.id);
2779
2780 if let Some(message_id) = last_assistant_message_id {
2781 self.report_message_feedback(message_id, feedback, cx)
2782 } else {
2783 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2784 let serialized_thread = self.serialize(cx);
2785 let thread_id = self.id().clone();
2786 let client = self.project.read(cx).client();
2787 self.feedback = Some(feedback);
2788 cx.notify();
2789
2790 cx.background_spawn(async move {
2791 let final_project_snapshot = final_project_snapshot.await;
2792 let serialized_thread = serialized_thread.await?;
2793 let thread_data = serde_json::to_value(serialized_thread)
2794 .unwrap_or_else(|_| serde_json::Value::Null);
2795
2796 let rating = match feedback {
2797 ThreadFeedback::Positive => "positive",
2798 ThreadFeedback::Negative => "negative",
2799 };
2800 telemetry::event!(
2801 "Assistant Thread Rated",
2802 rating,
2803 thread_id,
2804 thread_data,
2805 final_project_snapshot
2806 );
2807 client.telemetry().flush_events().await;
2808
2809 Ok(())
2810 })
2811 }
2812 }
2813
2814 /// Create a snapshot of the current project state including git information and unsaved buffers.
2815 fn project_snapshot(
2816 project: Entity<Project>,
2817 cx: &mut Context<Self>,
2818 ) -> Task<Arc<ProjectSnapshot>> {
2819 let git_store = project.read(cx).git_store().clone();
2820 let worktree_snapshots: Vec<_> = project
2821 .read(cx)
2822 .visible_worktrees(cx)
2823 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
2824 .collect();
2825
2826 cx.spawn(async move |_, cx| {
2827 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
2828
2829 let mut unsaved_buffers = Vec::new();
2830 cx.update(|app_cx| {
2831 let buffer_store = project.read(app_cx).buffer_store();
2832 for buffer_handle in buffer_store.read(app_cx).buffers() {
2833 let buffer = buffer_handle.read(app_cx);
2834 if buffer.is_dirty() {
2835 if let Some(file) = buffer.file() {
2836 let path = file.path().to_string_lossy().to_string();
2837 unsaved_buffers.push(path);
2838 }
2839 }
2840 }
2841 })
2842 .ok();
2843
2844 Arc::new(ProjectSnapshot {
2845 worktree_snapshots,
2846 unsaved_buffer_paths: unsaved_buffers,
2847 timestamp: Utc::now(),
2848 })
2849 })
2850 }
2851
2852 fn worktree_snapshot(
2853 worktree: Entity<project::Worktree>,
2854 git_store: Entity<GitStore>,
2855 cx: &App,
2856 ) -> Task<WorktreeSnapshot> {
2857 cx.spawn(async move |cx| {
2858 // Get worktree path and snapshot
2859 let worktree_info = cx.update(|app_cx| {
2860 let worktree = worktree.read(app_cx);
2861 let path = worktree.abs_path().to_string_lossy().to_string();
2862 let snapshot = worktree.snapshot();
2863 (path, snapshot)
2864 });
2865
2866 let Ok((worktree_path, _snapshot)) = worktree_info else {
2867 return WorktreeSnapshot {
2868 worktree_path: String::new(),
2869 git_state: None,
2870 };
2871 };
2872
2873 let git_state = git_store
2874 .update(cx, |git_store, cx| {
2875 git_store
2876 .repositories()
2877 .values()
2878 .find(|repo| {
2879 repo.read(cx)
2880 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
2881 .is_some()
2882 })
2883 .cloned()
2884 })
2885 .ok()
2886 .flatten()
2887 .map(|repo| {
2888 repo.update(cx, |repo, _| {
2889 let current_branch =
2890 repo.branch.as_ref().map(|branch| branch.name().to_owned());
2891 repo.send_job(None, |state, _| async move {
2892 let RepositoryState::Local { backend, .. } = state else {
2893 return GitState {
2894 remote_url: None,
2895 head_sha: None,
2896 current_branch,
2897 diff: None,
2898 };
2899 };
2900
2901 let remote_url = backend.remote_url("origin");
2902 let head_sha = backend.head_sha().await;
2903 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
2904
2905 GitState {
2906 remote_url,
2907 head_sha,
2908 current_branch,
2909 diff,
2910 }
2911 })
2912 })
2913 });
2914
2915 let git_state = match git_state {
2916 Some(git_state) => match git_state.ok() {
2917 Some(git_state) => git_state.await.ok(),
2918 None => None,
2919 },
2920 None => None,
2921 };
2922
2923 WorktreeSnapshot {
2924 worktree_path,
2925 git_state,
2926 }
2927 })
2928 }
2929
2930 pub fn to_markdown(&self, cx: &App) -> Result<String> {
2931 let mut markdown = Vec::new();
2932
2933 let summary = self.summary().or_default();
2934 writeln!(markdown, "# {summary}\n")?;
2935
2936 for message in self.messages() {
2937 writeln!(
2938 markdown,
2939 "## {role}\n",
2940 role = match message.role {
2941 Role::User => "User",
2942 Role::Assistant => "Agent",
2943 Role::System => "System",
2944 }
2945 )?;
2946
2947 if !message.loaded_context.text.is_empty() {
2948 writeln!(markdown, "{}", message.loaded_context.text)?;
2949 }
2950
2951 if !message.loaded_context.images.is_empty() {
2952 writeln!(
2953 markdown,
2954 "\n{} images attached as context.\n",
2955 message.loaded_context.images.len()
2956 )?;
2957 }
2958
2959 for segment in &message.segments {
2960 match segment {
2961 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
2962 MessageSegment::Thinking { text, .. } => {
2963 writeln!(markdown, "<think>\n{}\n</think>\n", text)?
2964 }
2965 MessageSegment::RedactedThinking(_) => {}
2966 }
2967 }
2968
2969 for tool_use in self.tool_uses_for_message(message.id, cx) {
2970 writeln!(
2971 markdown,
2972 "**Use Tool: {} ({})**",
2973 tool_use.name, tool_use.id
2974 )?;
2975 writeln!(markdown, "```json")?;
2976 writeln!(
2977 markdown,
2978 "{}",
2979 serde_json::to_string_pretty(&tool_use.input)?
2980 )?;
2981 writeln!(markdown, "```")?;
2982 }
2983
2984 for tool_result in self.tool_results_for_message(message.id) {
2985 write!(markdown, "\n**Tool Results: {}", tool_result.tool_use_id)?;
2986 if tool_result.is_error {
2987 write!(markdown, " (Error)")?;
2988 }
2989
2990 writeln!(markdown, "**\n")?;
2991 match &tool_result.content {
2992 LanguageModelToolResultContent::Text(text) => {
2993 writeln!(markdown, "{text}")?;
2994 }
2995 LanguageModelToolResultContent::Image(image) => {
2996 writeln!(markdown, "", image.source)?;
2997 }
2998 }
2999
3000 if let Some(output) = tool_result.output.as_ref() {
3001 writeln!(
3002 markdown,
3003 "\n\nDebug Output:\n\n```json\n{}\n```\n",
3004 serde_json::to_string_pretty(output)?
3005 )?;
3006 }
3007 }
3008 }
3009
3010 Ok(String::from_utf8_lossy(&markdown).to_string())
3011 }
3012
3013 pub fn keep_edits_in_range(
3014 &mut self,
3015 buffer: Entity<language::Buffer>,
3016 buffer_range: Range<language::Anchor>,
3017 cx: &mut Context<Self>,
3018 ) {
3019 self.action_log.update(cx, |action_log, cx| {
3020 action_log.keep_edits_in_range(buffer, buffer_range, cx)
3021 });
3022 }
3023
3024 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
3025 self.action_log
3026 .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
3027 }
3028
3029 pub fn reject_edits_in_ranges(
3030 &mut self,
3031 buffer: Entity<language::Buffer>,
3032 buffer_ranges: Vec<Range<language::Anchor>>,
3033 cx: &mut Context<Self>,
3034 ) -> Task<Result<()>> {
3035 self.action_log.update(cx, |action_log, cx| {
3036 action_log.reject_edits_in_ranges(buffer, buffer_ranges, cx)
3037 })
3038 }
3039
3040 pub fn action_log(&self) -> &Entity<ActionLog> {
3041 &self.action_log
3042 }
3043
3044 pub fn project(&self) -> &Entity<Project> {
3045 &self.project
3046 }
3047
3048 pub fn auto_capture_telemetry(&mut self, cx: &mut Context<Self>) {
3049 if !cx.has_flag::<feature_flags::ThreadAutoCaptureFeatureFlag>() {
3050 return;
3051 }
3052
3053 let now = Instant::now();
3054 if let Some(last) = self.last_auto_capture_at {
3055 if now.duration_since(last).as_secs() < 10 {
3056 return;
3057 }
3058 }
3059
3060 self.last_auto_capture_at = Some(now);
3061
3062 let thread_id = self.id().clone();
3063 let github_login = self
3064 .project
3065 .read(cx)
3066 .user_store()
3067 .read(cx)
3068 .current_user()
3069 .map(|user| user.github_login.clone());
3070 let client = self.project.read(cx).client();
3071 let serialize_task = self.serialize(cx);
3072
3073 cx.background_executor()
3074 .spawn(async move {
3075 if let Ok(serialized_thread) = serialize_task.await {
3076 if let Ok(thread_data) = serde_json::to_value(serialized_thread) {
3077 telemetry::event!(
3078 "Agent Thread Auto-Captured",
3079 thread_id = thread_id.to_string(),
3080 thread_data = thread_data,
3081 auto_capture_reason = "tracked_user",
3082 github_login = github_login
3083 );
3084
3085 client.telemetry().flush_events().await;
3086 }
3087 }
3088 })
3089 .detach();
3090 }
3091
3092 pub fn cumulative_token_usage(&self) -> TokenUsage {
3093 self.cumulative_token_usage
3094 }
3095
3096 pub fn token_usage_up_to_message(&self, message_id: MessageId) -> TotalTokenUsage {
3097 let Some(model) = self.configured_model.as_ref() else {
3098 return TotalTokenUsage::default();
3099 };
3100
3101 let max = model
3102 .model
3103 .max_token_count_for_mode(self.completion_mode().into());
3104
3105 let index = self
3106 .messages
3107 .iter()
3108 .position(|msg| msg.id == message_id)
3109 .unwrap_or(0);
3110
3111 if index == 0 {
3112 return TotalTokenUsage { total: 0, max };
3113 }
3114
3115 let token_usage = &self
3116 .request_token_usage
3117 .get(index - 1)
3118 .cloned()
3119 .unwrap_or_default();
3120
3121 TotalTokenUsage {
3122 total: token_usage.total_tokens(),
3123 max,
3124 }
3125 }
3126
3127 pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
3128 let model = self.configured_model.as_ref()?;
3129
3130 let max = model
3131 .model
3132 .max_token_count_for_mode(self.completion_mode().into());
3133
3134 if let Some(exceeded_error) = &self.exceeded_window_error {
3135 if model.model.id() == exceeded_error.model_id {
3136 return Some(TotalTokenUsage {
3137 total: exceeded_error.token_count,
3138 max,
3139 });
3140 }
3141 }
3142
3143 let total = self
3144 .token_usage_at_last_message()
3145 .unwrap_or_default()
3146 .total_tokens();
3147
3148 Some(TotalTokenUsage { total, max })
3149 }
3150
3151 fn token_usage_at_last_message(&self) -> Option<TokenUsage> {
3152 self.request_token_usage
3153 .get(self.messages.len().saturating_sub(1))
3154 .or_else(|| self.request_token_usage.last())
3155 .cloned()
3156 }
3157
3158 fn update_token_usage_at_last_message(&mut self, token_usage: TokenUsage) {
3159 let placeholder = self.token_usage_at_last_message().unwrap_or_default();
3160 self.request_token_usage
3161 .resize(self.messages.len(), placeholder);
3162
3163 if let Some(last) = self.request_token_usage.last_mut() {
3164 *last = token_usage;
3165 }
3166 }
3167
3168 fn update_model_request_usage(&self, amount: u32, limit: UsageLimit, cx: &mut Context<Self>) {
3169 self.project.update(cx, |project, cx| {
3170 project.user_store().update(cx, |user_store, cx| {
3171 user_store.update_model_request_usage(
3172 ModelRequestUsage(RequestUsage {
3173 amount: amount as i32,
3174 limit,
3175 }),
3176 cx,
3177 )
3178 })
3179 });
3180 }
3181
3182 pub fn deny_tool_use(
3183 &mut self,
3184 tool_use_id: LanguageModelToolUseId,
3185 tool_name: Arc<str>,
3186 window: Option<AnyWindowHandle>,
3187 cx: &mut Context<Self>,
3188 ) {
3189 let err = Err(anyhow::anyhow!(
3190 "Permission to run tool action denied by user"
3191 ));
3192
3193 self.tool_use.insert_tool_output(
3194 tool_use_id.clone(),
3195 tool_name,
3196 err,
3197 self.configured_model.as_ref(),
3198 self.completion_mode,
3199 );
3200 self.tool_finished(tool_use_id.clone(), None, true, window, cx);
3201 }
3202}
3203
3204#[derive(Debug, Clone, Error)]
3205pub enum ThreadError {
3206 #[error("Payment required")]
3207 PaymentRequired,
3208 #[error("Model request limit reached")]
3209 ModelRequestLimitReached { plan: Plan },
3210 #[error("Message {header}: {message}")]
3211 Message {
3212 header: SharedString,
3213 message: SharedString,
3214 },
3215}
3216
3217#[derive(Debug, Clone)]
3218pub enum ThreadEvent {
3219 ShowError(ThreadError),
3220 StreamedCompletion,
3221 ReceivedTextChunk,
3222 NewRequest,
3223 StreamedAssistantText(MessageId, String),
3224 StreamedAssistantThinking(MessageId, String),
3225 StreamedToolUse {
3226 tool_use_id: LanguageModelToolUseId,
3227 ui_text: Arc<str>,
3228 input: serde_json::Value,
3229 },
3230 MissingToolUse {
3231 tool_use_id: LanguageModelToolUseId,
3232 ui_text: Arc<str>,
3233 },
3234 InvalidToolInput {
3235 tool_use_id: LanguageModelToolUseId,
3236 ui_text: Arc<str>,
3237 invalid_input_json: Arc<str>,
3238 },
3239 Stopped(Result<StopReason, Arc<anyhow::Error>>),
3240 MessageAdded(MessageId),
3241 MessageEdited(MessageId),
3242 MessageDeleted(MessageId),
3243 SummaryGenerated,
3244 SummaryChanged,
3245 UsePendingTools {
3246 tool_uses: Vec<PendingToolUse>,
3247 },
3248 ToolFinished {
3249 #[allow(unused)]
3250 tool_use_id: LanguageModelToolUseId,
3251 /// The pending tool use that corresponds to this tool.
3252 pending_tool_use: Option<PendingToolUse>,
3253 },
3254 CheckpointChanged,
3255 ToolConfirmationNeeded,
3256 ToolUseLimitReached,
3257 CancelEditing,
3258 CompletionCanceled,
3259 ProfileChanged,
3260}
3261
3262impl EventEmitter<ThreadEvent> for Thread {}
3263
3264struct PendingCompletion {
3265 id: usize,
3266 queue_state: QueueState,
3267 _task: Task<()>,
3268}
3269
3270#[cfg(test)]
3271mod tests {
3272 use super::*;
3273 use crate::{
3274 context::load_context, context_store::ContextStore, thread_store, thread_store::ThreadStore,
3275 };
3276
3277 // Test-specific constants
3278 const TEST_RATE_LIMIT_RETRY_SECS: u64 = 30;
3279 use agent_settings::{AgentProfileId, AgentSettings, LanguageModelParameters};
3280 use assistant_tool::ToolRegistry;
3281 use assistant_tools;
3282 use futures::StreamExt;
3283 use futures::future::BoxFuture;
3284 use futures::stream::BoxStream;
3285 use gpui::TestAppContext;
3286 use http_client;
3287 use language_model::fake_provider::{FakeLanguageModel, FakeLanguageModelProvider};
3288 use language_model::{
3289 LanguageModelCompletionError, LanguageModelName, LanguageModelProviderId,
3290 LanguageModelProviderName, LanguageModelToolChoice,
3291 };
3292 use parking_lot::Mutex;
3293 use project::{FakeFs, Project};
3294 use prompt_store::PromptBuilder;
3295 use serde_json::json;
3296 use settings::{Settings, SettingsStore};
3297 use std::sync::Arc;
3298 use std::time::Duration;
3299 use theme::ThemeSettings;
3300 use util::path;
3301 use workspace::Workspace;
3302
3303 #[gpui::test]
3304 async fn test_message_with_context(cx: &mut TestAppContext) {
3305 init_test_settings(cx);
3306
3307 let project = create_test_project(
3308 cx,
3309 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3310 )
3311 .await;
3312
3313 let (_workspace, _thread_store, thread, context_store, model) =
3314 setup_test_environment(cx, project.clone()).await;
3315
3316 add_file_to_context(&project, &context_store, "test/code.rs", cx)
3317 .await
3318 .unwrap();
3319
3320 let context =
3321 context_store.read_with(cx, |store, _| store.context().next().cloned().unwrap());
3322 let loaded_context = cx
3323 .update(|cx| load_context(vec![context], &project, &None, cx))
3324 .await;
3325
3326 // Insert user message with context
3327 let message_id = thread.update(cx, |thread, cx| {
3328 thread.insert_user_message(
3329 "Please explain this code",
3330 loaded_context,
3331 None,
3332 Vec::new(),
3333 cx,
3334 )
3335 });
3336
3337 // Check content and context in message object
3338 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3339
3340 // Use different path format strings based on platform for the test
3341 #[cfg(windows)]
3342 let path_part = r"test\code.rs";
3343 #[cfg(not(windows))]
3344 let path_part = "test/code.rs";
3345
3346 let expected_context = format!(
3347 r#"
3348<context>
3349The following items were attached by the user. They are up-to-date and don't need to be re-read.
3350
3351<files>
3352```rs {path_part}
3353fn main() {{
3354 println!("Hello, world!");
3355}}
3356```
3357</files>
3358</context>
3359"#
3360 );
3361
3362 assert_eq!(message.role, Role::User);
3363 assert_eq!(message.segments.len(), 1);
3364 assert_eq!(
3365 message.segments[0],
3366 MessageSegment::Text("Please explain this code".to_string())
3367 );
3368 assert_eq!(message.loaded_context.text, expected_context);
3369
3370 // Check message in request
3371 let request = thread.update(cx, |thread, cx| {
3372 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3373 });
3374
3375 assert_eq!(request.messages.len(), 2);
3376 let expected_full_message = format!("{}Please explain this code", expected_context);
3377 assert_eq!(request.messages[1].string_contents(), expected_full_message);
3378 }
3379
3380 #[gpui::test]
3381 async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
3382 init_test_settings(cx);
3383
3384 let project = create_test_project(
3385 cx,
3386 json!({
3387 "file1.rs": "fn function1() {}\n",
3388 "file2.rs": "fn function2() {}\n",
3389 "file3.rs": "fn function3() {}\n",
3390 "file4.rs": "fn function4() {}\n",
3391 }),
3392 )
3393 .await;
3394
3395 let (_, _thread_store, thread, context_store, model) =
3396 setup_test_environment(cx, project.clone()).await;
3397
3398 // First message with context 1
3399 add_file_to_context(&project, &context_store, "test/file1.rs", cx)
3400 .await
3401 .unwrap();
3402 let new_contexts = context_store.update(cx, |store, cx| {
3403 store.new_context_for_thread(thread.read(cx), None)
3404 });
3405 assert_eq!(new_contexts.len(), 1);
3406 let loaded_context = cx
3407 .update(|cx| load_context(new_contexts, &project, &None, cx))
3408 .await;
3409 let message1_id = thread.update(cx, |thread, cx| {
3410 thread.insert_user_message("Message 1", loaded_context, None, Vec::new(), cx)
3411 });
3412
3413 // Second message with contexts 1 and 2 (context 1 should be skipped as it's already included)
3414 add_file_to_context(&project, &context_store, "test/file2.rs", cx)
3415 .await
3416 .unwrap();
3417 let new_contexts = context_store.update(cx, |store, cx| {
3418 store.new_context_for_thread(thread.read(cx), None)
3419 });
3420 assert_eq!(new_contexts.len(), 1);
3421 let loaded_context = cx
3422 .update(|cx| load_context(new_contexts, &project, &None, cx))
3423 .await;
3424 let message2_id = thread.update(cx, |thread, cx| {
3425 thread.insert_user_message("Message 2", loaded_context, None, Vec::new(), cx)
3426 });
3427
3428 // Third message with all three contexts (contexts 1 and 2 should be skipped)
3429 //
3430 add_file_to_context(&project, &context_store, "test/file3.rs", cx)
3431 .await
3432 .unwrap();
3433 let new_contexts = context_store.update(cx, |store, cx| {
3434 store.new_context_for_thread(thread.read(cx), None)
3435 });
3436 assert_eq!(new_contexts.len(), 1);
3437 let loaded_context = cx
3438 .update(|cx| load_context(new_contexts, &project, &None, cx))
3439 .await;
3440 let message3_id = thread.update(cx, |thread, cx| {
3441 thread.insert_user_message("Message 3", loaded_context, None, Vec::new(), cx)
3442 });
3443
3444 // Check what contexts are included in each message
3445 let (message1, message2, message3) = thread.read_with(cx, |thread, _| {
3446 (
3447 thread.message(message1_id).unwrap().clone(),
3448 thread.message(message2_id).unwrap().clone(),
3449 thread.message(message3_id).unwrap().clone(),
3450 )
3451 });
3452
3453 // First message should include context 1
3454 assert!(message1.loaded_context.text.contains("file1.rs"));
3455
3456 // Second message should include only context 2 (not 1)
3457 assert!(!message2.loaded_context.text.contains("file1.rs"));
3458 assert!(message2.loaded_context.text.contains("file2.rs"));
3459
3460 // Third message should include only context 3 (not 1 or 2)
3461 assert!(!message3.loaded_context.text.contains("file1.rs"));
3462 assert!(!message3.loaded_context.text.contains("file2.rs"));
3463 assert!(message3.loaded_context.text.contains("file3.rs"));
3464
3465 // Check entire request to make sure all contexts are properly included
3466 let request = thread.update(cx, |thread, cx| {
3467 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3468 });
3469
3470 // The request should contain all 3 messages
3471 assert_eq!(request.messages.len(), 4);
3472
3473 // Check that the contexts are properly formatted in each message
3474 assert!(request.messages[1].string_contents().contains("file1.rs"));
3475 assert!(!request.messages[1].string_contents().contains("file2.rs"));
3476 assert!(!request.messages[1].string_contents().contains("file3.rs"));
3477
3478 assert!(!request.messages[2].string_contents().contains("file1.rs"));
3479 assert!(request.messages[2].string_contents().contains("file2.rs"));
3480 assert!(!request.messages[2].string_contents().contains("file3.rs"));
3481
3482 assert!(!request.messages[3].string_contents().contains("file1.rs"));
3483 assert!(!request.messages[3].string_contents().contains("file2.rs"));
3484 assert!(request.messages[3].string_contents().contains("file3.rs"));
3485
3486 add_file_to_context(&project, &context_store, "test/file4.rs", cx)
3487 .await
3488 .unwrap();
3489 let new_contexts = context_store.update(cx, |store, cx| {
3490 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3491 });
3492 assert_eq!(new_contexts.len(), 3);
3493 let loaded_context = cx
3494 .update(|cx| load_context(new_contexts, &project, &None, cx))
3495 .await
3496 .loaded_context;
3497
3498 assert!(!loaded_context.text.contains("file1.rs"));
3499 assert!(loaded_context.text.contains("file2.rs"));
3500 assert!(loaded_context.text.contains("file3.rs"));
3501 assert!(loaded_context.text.contains("file4.rs"));
3502
3503 let new_contexts = context_store.update(cx, |store, cx| {
3504 // Remove file4.rs
3505 store.remove_context(&loaded_context.contexts[2].handle(), cx);
3506 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3507 });
3508 assert_eq!(new_contexts.len(), 2);
3509 let loaded_context = cx
3510 .update(|cx| load_context(new_contexts, &project, &None, cx))
3511 .await
3512 .loaded_context;
3513
3514 assert!(!loaded_context.text.contains("file1.rs"));
3515 assert!(loaded_context.text.contains("file2.rs"));
3516 assert!(loaded_context.text.contains("file3.rs"));
3517 assert!(!loaded_context.text.contains("file4.rs"));
3518
3519 let new_contexts = context_store.update(cx, |store, cx| {
3520 // Remove file3.rs
3521 store.remove_context(&loaded_context.contexts[1].handle(), cx);
3522 store.new_context_for_thread(thread.read(cx), Some(message2_id))
3523 });
3524 assert_eq!(new_contexts.len(), 1);
3525 let loaded_context = cx
3526 .update(|cx| load_context(new_contexts, &project, &None, cx))
3527 .await
3528 .loaded_context;
3529
3530 assert!(!loaded_context.text.contains("file1.rs"));
3531 assert!(loaded_context.text.contains("file2.rs"));
3532 assert!(!loaded_context.text.contains("file3.rs"));
3533 assert!(!loaded_context.text.contains("file4.rs"));
3534 }
3535
3536 #[gpui::test]
3537 async fn test_message_without_files(cx: &mut TestAppContext) {
3538 init_test_settings(cx);
3539
3540 let project = create_test_project(
3541 cx,
3542 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3543 )
3544 .await;
3545
3546 let (_, _thread_store, thread, _context_store, model) =
3547 setup_test_environment(cx, project.clone()).await;
3548
3549 // Insert user message without any context (empty context vector)
3550 let message_id = thread.update(cx, |thread, cx| {
3551 thread.insert_user_message(
3552 "What is the best way to learn Rust?",
3553 ContextLoadResult::default(),
3554 None,
3555 Vec::new(),
3556 cx,
3557 )
3558 });
3559
3560 // Check content and context in message object
3561 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3562
3563 // Context should be empty when no files are included
3564 assert_eq!(message.role, Role::User);
3565 assert_eq!(message.segments.len(), 1);
3566 assert_eq!(
3567 message.segments[0],
3568 MessageSegment::Text("What is the best way to learn Rust?".to_string())
3569 );
3570 assert_eq!(message.loaded_context.text, "");
3571
3572 // Check message in request
3573 let request = thread.update(cx, |thread, cx| {
3574 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3575 });
3576
3577 assert_eq!(request.messages.len(), 2);
3578 assert_eq!(
3579 request.messages[1].string_contents(),
3580 "What is the best way to learn Rust?"
3581 );
3582
3583 // Add second message, also without context
3584 let message2_id = thread.update(cx, |thread, cx| {
3585 thread.insert_user_message(
3586 "Are there any good books?",
3587 ContextLoadResult::default(),
3588 None,
3589 Vec::new(),
3590 cx,
3591 )
3592 });
3593
3594 let message2 =
3595 thread.read_with(cx, |thread, _| thread.message(message2_id).unwrap().clone());
3596 assert_eq!(message2.loaded_context.text, "");
3597
3598 // Check that both messages appear in the request
3599 let request = thread.update(cx, |thread, cx| {
3600 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3601 });
3602
3603 assert_eq!(request.messages.len(), 3);
3604 assert_eq!(
3605 request.messages[1].string_contents(),
3606 "What is the best way to learn Rust?"
3607 );
3608 assert_eq!(
3609 request.messages[2].string_contents(),
3610 "Are there any good books?"
3611 );
3612 }
3613
3614 #[gpui::test]
3615 #[ignore] // turn this test on when project_notifications tool is re-enabled
3616 async fn test_stale_buffer_notification(cx: &mut TestAppContext) {
3617 init_test_settings(cx);
3618
3619 let project = create_test_project(
3620 cx,
3621 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3622 )
3623 .await;
3624
3625 let (_workspace, _thread_store, thread, context_store, model) =
3626 setup_test_environment(cx, project.clone()).await;
3627
3628 // Add a buffer to the context. This will be a tracked buffer
3629 let buffer = add_file_to_context(&project, &context_store, "test/code.rs", cx)
3630 .await
3631 .unwrap();
3632
3633 let context = context_store
3634 .read_with(cx, |store, _| store.context().next().cloned())
3635 .unwrap();
3636 let loaded_context = cx
3637 .update(|cx| load_context(vec![context], &project, &None, cx))
3638 .await;
3639
3640 // Insert user message and assistant response
3641 thread.update(cx, |thread, cx| {
3642 thread.insert_user_message("Explain this code", loaded_context, None, Vec::new(), cx);
3643 thread.insert_assistant_message(
3644 vec![MessageSegment::Text("This code prints 42.".into())],
3645 cx,
3646 );
3647 });
3648 cx.run_until_parked();
3649
3650 // We shouldn't have a stale buffer notification yet
3651 let notifications = thread.read_with(cx, |thread, _| {
3652 find_tool_uses(thread, "project_notifications")
3653 });
3654 assert!(
3655 notifications.is_empty(),
3656 "Should not have stale buffer notification before buffer is modified"
3657 );
3658
3659 // Modify the buffer
3660 buffer.update(cx, |buffer, cx| {
3661 buffer.edit(
3662 [(1..1, "\n println!(\"Added a new line\");\n")],
3663 None,
3664 cx,
3665 );
3666 });
3667
3668 // Insert another user message
3669 thread.update(cx, |thread, cx| {
3670 thread.insert_user_message(
3671 "What does the code do now?",
3672 ContextLoadResult::default(),
3673 None,
3674 Vec::new(),
3675 cx,
3676 )
3677 });
3678 cx.run_until_parked();
3679
3680 // Check for the stale buffer warning
3681 thread.update(cx, |thread, cx| {
3682 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3683 });
3684 cx.run_until_parked();
3685
3686 let notifications = thread.read_with(cx, |thread, _cx| {
3687 find_tool_uses(thread, "project_notifications")
3688 });
3689
3690 let [notification] = notifications.as_slice() else {
3691 panic!("Should have a `project_notifications` tool use");
3692 };
3693
3694 let Some(notification_content) = notification.content.to_str() else {
3695 panic!("`project_notifications` should return text");
3696 };
3697
3698 assert!(notification_content.contains("These files have changed since the last read:"));
3699 assert!(notification_content.contains("code.rs"));
3700
3701 // Insert another user message and flush notifications again
3702 thread.update(cx, |thread, cx| {
3703 thread.insert_user_message(
3704 "Can you tell me more?",
3705 ContextLoadResult::default(),
3706 None,
3707 Vec::new(),
3708 cx,
3709 )
3710 });
3711
3712 thread.update(cx, |thread, cx| {
3713 thread.flush_notifications(model.clone(), CompletionIntent::UserPrompt, cx)
3714 });
3715 cx.run_until_parked();
3716
3717 // There should be no new notifications (we already flushed one)
3718 let notifications = thread.read_with(cx, |thread, _cx| {
3719 find_tool_uses(thread, "project_notifications")
3720 });
3721
3722 assert_eq!(
3723 notifications.len(),
3724 1,
3725 "Should still have only one notification after second flush - no duplicates"
3726 );
3727 }
3728
3729 fn find_tool_uses(thread: &Thread, tool_name: &str) -> Vec<LanguageModelToolResult> {
3730 thread
3731 .messages()
3732 .flat_map(|message| {
3733 thread
3734 .tool_results_for_message(message.id)
3735 .into_iter()
3736 .filter(|result| result.tool_name == tool_name.into())
3737 .cloned()
3738 .collect::<Vec<_>>()
3739 })
3740 .collect()
3741 }
3742
3743 #[gpui::test]
3744 async fn test_storing_profile_setting_per_thread(cx: &mut TestAppContext) {
3745 init_test_settings(cx);
3746
3747 let project = create_test_project(
3748 cx,
3749 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3750 )
3751 .await;
3752
3753 let (_workspace, thread_store, thread, _context_store, _model) =
3754 setup_test_environment(cx, project.clone()).await;
3755
3756 // Check that we are starting with the default profile
3757 let profile = cx.read(|cx| thread.read(cx).profile.clone());
3758 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3759 assert_eq!(
3760 profile,
3761 AgentProfile::new(AgentProfileId::default(), tool_set)
3762 );
3763 }
3764
3765 #[gpui::test]
3766 async fn test_serializing_thread_profile(cx: &mut TestAppContext) {
3767 init_test_settings(cx);
3768
3769 let project = create_test_project(
3770 cx,
3771 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3772 )
3773 .await;
3774
3775 let (_workspace, thread_store, thread, _context_store, _model) =
3776 setup_test_environment(cx, project.clone()).await;
3777
3778 // Profile gets serialized with default values
3779 let serialized = thread
3780 .update(cx, |thread, cx| thread.serialize(cx))
3781 .await
3782 .unwrap();
3783
3784 assert_eq!(serialized.profile, Some(AgentProfileId::default()));
3785
3786 let deserialized = cx.update(|cx| {
3787 thread.update(cx, |thread, cx| {
3788 Thread::deserialize(
3789 thread.id.clone(),
3790 serialized,
3791 thread.project.clone(),
3792 thread.tools.clone(),
3793 thread.prompt_builder.clone(),
3794 thread.project_context.clone(),
3795 None,
3796 cx,
3797 )
3798 })
3799 });
3800 let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3801
3802 assert_eq!(
3803 deserialized.profile,
3804 AgentProfile::new(AgentProfileId::default(), tool_set)
3805 );
3806 }
3807
3808 #[gpui::test]
3809 async fn test_temperature_setting(cx: &mut TestAppContext) {
3810 init_test_settings(cx);
3811
3812 let project = create_test_project(
3813 cx,
3814 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
3815 )
3816 .await;
3817
3818 let (_workspace, _thread_store, thread, _context_store, model) =
3819 setup_test_environment(cx, project.clone()).await;
3820
3821 // Both model and provider
3822 cx.update(|cx| {
3823 AgentSettings::override_global(
3824 AgentSettings {
3825 model_parameters: vec![LanguageModelParameters {
3826 provider: Some(model.provider_id().0.to_string().into()),
3827 model: Some(model.id().0.clone()),
3828 temperature: Some(0.66),
3829 }],
3830 ..AgentSettings::get_global(cx).clone()
3831 },
3832 cx,
3833 );
3834 });
3835
3836 let request = thread.update(cx, |thread, cx| {
3837 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3838 });
3839 assert_eq!(request.temperature, Some(0.66));
3840
3841 // Only model
3842 cx.update(|cx| {
3843 AgentSettings::override_global(
3844 AgentSettings {
3845 model_parameters: vec![LanguageModelParameters {
3846 provider: None,
3847 model: Some(model.id().0.clone()),
3848 temperature: Some(0.66),
3849 }],
3850 ..AgentSettings::get_global(cx).clone()
3851 },
3852 cx,
3853 );
3854 });
3855
3856 let request = thread.update(cx, |thread, cx| {
3857 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3858 });
3859 assert_eq!(request.temperature, Some(0.66));
3860
3861 // Only provider
3862 cx.update(|cx| {
3863 AgentSettings::override_global(
3864 AgentSettings {
3865 model_parameters: vec![LanguageModelParameters {
3866 provider: Some(model.provider_id().0.to_string().into()),
3867 model: None,
3868 temperature: Some(0.66),
3869 }],
3870 ..AgentSettings::get_global(cx).clone()
3871 },
3872 cx,
3873 );
3874 });
3875
3876 let request = thread.update(cx, |thread, cx| {
3877 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3878 });
3879 assert_eq!(request.temperature, Some(0.66));
3880
3881 // Same model name, different provider
3882 cx.update(|cx| {
3883 AgentSettings::override_global(
3884 AgentSettings {
3885 model_parameters: vec![LanguageModelParameters {
3886 provider: Some("anthropic".into()),
3887 model: Some(model.id().0.clone()),
3888 temperature: Some(0.66),
3889 }],
3890 ..AgentSettings::get_global(cx).clone()
3891 },
3892 cx,
3893 );
3894 });
3895
3896 let request = thread.update(cx, |thread, cx| {
3897 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3898 });
3899 assert_eq!(request.temperature, None);
3900 }
3901
3902 #[gpui::test]
3903 async fn test_thread_summary(cx: &mut TestAppContext) {
3904 init_test_settings(cx);
3905
3906 let project = create_test_project(cx, json!({})).await;
3907
3908 let (_, _thread_store, thread, _context_store, model) =
3909 setup_test_environment(cx, project.clone()).await;
3910
3911 // Initial state should be pending
3912 thread.read_with(cx, |thread, _| {
3913 assert!(matches!(thread.summary(), ThreadSummary::Pending));
3914 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3915 });
3916
3917 // Manually setting the summary should not be allowed in this state
3918 thread.update(cx, |thread, cx| {
3919 thread.set_summary("This should not work", cx);
3920 });
3921
3922 thread.read_with(cx, |thread, _| {
3923 assert!(matches!(thread.summary(), ThreadSummary::Pending));
3924 });
3925
3926 // Send a message
3927 thread.update(cx, |thread, cx| {
3928 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
3929 thread.send_to_model(
3930 model.clone(),
3931 CompletionIntent::ThreadSummarization,
3932 None,
3933 cx,
3934 );
3935 });
3936
3937 let fake_model = model.as_fake();
3938 simulate_successful_response(&fake_model, cx);
3939
3940 // Should start generating summary when there are >= 2 messages
3941 thread.read_with(cx, |thread, _| {
3942 assert_eq!(*thread.summary(), ThreadSummary::Generating);
3943 });
3944
3945 // Should not be able to set the summary while generating
3946 thread.update(cx, |thread, cx| {
3947 thread.set_summary("This should not work either", cx);
3948 });
3949
3950 thread.read_with(cx, |thread, _| {
3951 assert!(matches!(thread.summary(), ThreadSummary::Generating));
3952 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3953 });
3954
3955 cx.run_until_parked();
3956 fake_model.stream_last_completion_response("Brief");
3957 fake_model.stream_last_completion_response(" Introduction");
3958 fake_model.end_last_completion_stream();
3959 cx.run_until_parked();
3960
3961 // Summary should be set
3962 thread.read_with(cx, |thread, _| {
3963 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
3964 assert_eq!(thread.summary().or_default(), "Brief Introduction");
3965 });
3966
3967 // Now we should be able to set a summary
3968 thread.update(cx, |thread, cx| {
3969 thread.set_summary("Brief Intro", cx);
3970 });
3971
3972 thread.read_with(cx, |thread, _| {
3973 assert_eq!(thread.summary().or_default(), "Brief Intro");
3974 });
3975
3976 // Test setting an empty summary (should default to DEFAULT)
3977 thread.update(cx, |thread, cx| {
3978 thread.set_summary("", cx);
3979 });
3980
3981 thread.read_with(cx, |thread, _| {
3982 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
3983 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3984 });
3985 }
3986
3987 #[gpui::test]
3988 async fn test_thread_summary_error_set_manually(cx: &mut TestAppContext) {
3989 init_test_settings(cx);
3990
3991 let project = create_test_project(cx, json!({})).await;
3992
3993 let (_, _thread_store, thread, _context_store, model) =
3994 setup_test_environment(cx, project.clone()).await;
3995
3996 test_summarize_error(&model, &thread, cx);
3997
3998 // Now we should be able to set a summary
3999 thread.update(cx, |thread, cx| {
4000 thread.set_summary("Brief Intro", cx);
4001 });
4002
4003 thread.read_with(cx, |thread, _| {
4004 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4005 assert_eq!(thread.summary().or_default(), "Brief Intro");
4006 });
4007 }
4008
4009 #[gpui::test]
4010 async fn test_thread_summary_error_retry(cx: &mut TestAppContext) {
4011 init_test_settings(cx);
4012
4013 let project = create_test_project(cx, json!({})).await;
4014
4015 let (_, _thread_store, thread, _context_store, model) =
4016 setup_test_environment(cx, project.clone()).await;
4017
4018 test_summarize_error(&model, &thread, cx);
4019
4020 // Sending another message should not trigger another summarize request
4021 thread.update(cx, |thread, cx| {
4022 thread.insert_user_message(
4023 "How are you?",
4024 ContextLoadResult::default(),
4025 None,
4026 vec![],
4027 cx,
4028 );
4029 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4030 });
4031
4032 let fake_model = model.as_fake();
4033 simulate_successful_response(&fake_model, cx);
4034
4035 thread.read_with(cx, |thread, _| {
4036 // State is still Error, not Generating
4037 assert!(matches!(thread.summary(), ThreadSummary::Error));
4038 });
4039
4040 // But the summarize request can be invoked manually
4041 thread.update(cx, |thread, cx| {
4042 thread.summarize(cx);
4043 });
4044
4045 thread.read_with(cx, |thread, _| {
4046 assert!(matches!(thread.summary(), ThreadSummary::Generating));
4047 });
4048
4049 cx.run_until_parked();
4050 fake_model.stream_last_completion_response("A successful summary");
4051 fake_model.end_last_completion_stream();
4052 cx.run_until_parked();
4053
4054 thread.read_with(cx, |thread, _| {
4055 assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
4056 assert_eq!(thread.summary().or_default(), "A successful summary");
4057 });
4058 }
4059
4060 // Helper to create a model that returns errors
4061 enum TestError {
4062 Overloaded,
4063 InternalServerError,
4064 }
4065
4066 struct ErrorInjector {
4067 inner: Arc<FakeLanguageModel>,
4068 error_type: TestError,
4069 }
4070
4071 impl ErrorInjector {
4072 fn new(error_type: TestError) -> Self {
4073 Self {
4074 inner: Arc::new(FakeLanguageModel::default()),
4075 error_type,
4076 }
4077 }
4078 }
4079
4080 impl LanguageModel for ErrorInjector {
4081 fn id(&self) -> LanguageModelId {
4082 self.inner.id()
4083 }
4084
4085 fn name(&self) -> LanguageModelName {
4086 self.inner.name()
4087 }
4088
4089 fn provider_id(&self) -> LanguageModelProviderId {
4090 self.inner.provider_id()
4091 }
4092
4093 fn provider_name(&self) -> LanguageModelProviderName {
4094 self.inner.provider_name()
4095 }
4096
4097 fn supports_tools(&self) -> bool {
4098 self.inner.supports_tools()
4099 }
4100
4101 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4102 self.inner.supports_tool_choice(choice)
4103 }
4104
4105 fn supports_images(&self) -> bool {
4106 self.inner.supports_images()
4107 }
4108
4109 fn telemetry_id(&self) -> String {
4110 self.inner.telemetry_id()
4111 }
4112
4113 fn max_token_count(&self) -> u64 {
4114 self.inner.max_token_count()
4115 }
4116
4117 fn count_tokens(
4118 &self,
4119 request: LanguageModelRequest,
4120 cx: &App,
4121 ) -> BoxFuture<'static, Result<u64>> {
4122 self.inner.count_tokens(request, cx)
4123 }
4124
4125 fn stream_completion(
4126 &self,
4127 _request: LanguageModelRequest,
4128 _cx: &AsyncApp,
4129 ) -> BoxFuture<
4130 'static,
4131 Result<
4132 BoxStream<
4133 'static,
4134 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4135 >,
4136 LanguageModelCompletionError,
4137 >,
4138 > {
4139 let error = match self.error_type {
4140 TestError::Overloaded => LanguageModelCompletionError::ServerOverloaded {
4141 provider: self.provider_name(),
4142 retry_after: None,
4143 },
4144 TestError::InternalServerError => {
4145 LanguageModelCompletionError::ApiInternalServerError {
4146 provider: self.provider_name(),
4147 message: "I'm a teapot orbiting the sun".to_string(),
4148 }
4149 }
4150 };
4151 async move {
4152 let stream = futures::stream::once(async move { Err(error) });
4153 Ok(stream.boxed())
4154 }
4155 .boxed()
4156 }
4157
4158 fn as_fake(&self) -> &FakeLanguageModel {
4159 &self.inner
4160 }
4161 }
4162
4163 #[gpui::test]
4164 async fn test_retry_on_overloaded_error(cx: &mut TestAppContext) {
4165 init_test_settings(cx);
4166
4167 let project = create_test_project(cx, json!({})).await;
4168 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4169
4170 // Create model that returns overloaded error
4171 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4172
4173 // Insert a user message
4174 thread.update(cx, |thread, cx| {
4175 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4176 });
4177
4178 // Start completion
4179 thread.update(cx, |thread, cx| {
4180 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4181 });
4182
4183 cx.run_until_parked();
4184
4185 thread.read_with(cx, |thread, _| {
4186 assert!(thread.retry_state.is_some(), "Should have retry state");
4187 let retry_state = thread.retry_state.as_ref().unwrap();
4188 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4189 assert_eq!(
4190 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
4191 "Should retry MAX_RETRY_ATTEMPTS times for overloaded errors"
4192 );
4193 });
4194
4195 // Check that a retry message was added
4196 thread.read_with(cx, |thread, _| {
4197 let mut messages = thread.messages();
4198 assert!(
4199 messages.any(|msg| {
4200 msg.role == Role::System
4201 && msg.ui_only
4202 && msg.segments.iter().any(|seg| {
4203 if let MessageSegment::Text(text) = seg {
4204 text.contains("overloaded")
4205 && text
4206 .contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS))
4207 } else {
4208 false
4209 }
4210 })
4211 }),
4212 "Should have added a system retry message"
4213 );
4214 });
4215
4216 let retry_count = thread.update(cx, |thread, _| {
4217 thread
4218 .messages
4219 .iter()
4220 .filter(|m| {
4221 m.ui_only
4222 && m.segments.iter().any(|s| {
4223 if let MessageSegment::Text(text) = s {
4224 text.contains("Retrying") && text.contains("seconds")
4225 } else {
4226 false
4227 }
4228 })
4229 })
4230 .count()
4231 });
4232
4233 assert_eq!(retry_count, 1, "Should have one retry message");
4234 }
4235
4236 #[gpui::test]
4237 async fn test_retry_on_internal_server_error(cx: &mut TestAppContext) {
4238 init_test_settings(cx);
4239
4240 let project = create_test_project(cx, json!({})).await;
4241 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4242
4243 // Create model that returns internal server error
4244 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4245
4246 // Insert a user message
4247 thread.update(cx, |thread, cx| {
4248 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4249 });
4250
4251 // Start completion
4252 thread.update(cx, |thread, cx| {
4253 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4254 });
4255
4256 cx.run_until_parked();
4257
4258 // Check retry state on thread
4259 thread.read_with(cx, |thread, _| {
4260 assert!(thread.retry_state.is_some(), "Should have retry state");
4261 let retry_state = thread.retry_state.as_ref().unwrap();
4262 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4263 assert_eq!(
4264 retry_state.max_attempts, 1,
4265 "Should have correct max attempts"
4266 );
4267 });
4268
4269 // Check that a retry message was added with provider name
4270 thread.read_with(cx, |thread, _| {
4271 let mut messages = thread.messages();
4272 assert!(
4273 messages.any(|msg| {
4274 msg.role == Role::System
4275 && msg.ui_only
4276 && msg.segments.iter().any(|seg| {
4277 if let MessageSegment::Text(text) = seg {
4278 text.contains("internal")
4279 && text.contains("Fake")
4280 && text.contains("Retrying in")
4281 && !text.contains("attempt")
4282 } else {
4283 false
4284 }
4285 })
4286 }),
4287 "Should have added a system retry message with provider name"
4288 );
4289 });
4290
4291 // Count retry messages
4292 let retry_count = thread.update(cx, |thread, _| {
4293 thread
4294 .messages
4295 .iter()
4296 .filter(|m| {
4297 m.ui_only
4298 && m.segments.iter().any(|s| {
4299 if let MessageSegment::Text(text) = s {
4300 text.contains("Retrying") && text.contains("seconds")
4301 } else {
4302 false
4303 }
4304 })
4305 })
4306 .count()
4307 });
4308
4309 assert_eq!(retry_count, 1, "Should have one retry message");
4310 }
4311
4312 #[gpui::test]
4313 async fn test_exponential_backoff_on_retries(cx: &mut TestAppContext) {
4314 init_test_settings(cx);
4315
4316 let project = create_test_project(cx, json!({})).await;
4317 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4318
4319 // Create model that returns internal server error
4320 let model = Arc::new(ErrorInjector::new(TestError::InternalServerError));
4321
4322 // Insert a user message
4323 thread.update(cx, |thread, cx| {
4324 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4325 });
4326
4327 // Track retry events and completion count
4328 // Track completion events
4329 let completion_count = Arc::new(Mutex::new(0));
4330 let completion_count_clone = completion_count.clone();
4331
4332 let _subscription = thread.update(cx, |_, cx| {
4333 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4334 if let ThreadEvent::NewRequest = event {
4335 *completion_count_clone.lock() += 1;
4336 }
4337 })
4338 });
4339
4340 // First attempt
4341 thread.update(cx, |thread, cx| {
4342 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4343 });
4344 cx.run_until_parked();
4345
4346 // Should have scheduled first retry - count retry messages
4347 let retry_count = thread.update(cx, |thread, _| {
4348 thread
4349 .messages
4350 .iter()
4351 .filter(|m| {
4352 m.ui_only
4353 && m.segments.iter().any(|s| {
4354 if let MessageSegment::Text(text) = s {
4355 text.contains("Retrying") && text.contains("seconds")
4356 } else {
4357 false
4358 }
4359 })
4360 })
4361 .count()
4362 });
4363 assert_eq!(retry_count, 1, "Should have scheduled first retry");
4364
4365 // Check retry state
4366 thread.read_with(cx, |thread, _| {
4367 assert!(thread.retry_state.is_some(), "Should have retry state");
4368 let retry_state = thread.retry_state.as_ref().unwrap();
4369 assert_eq!(retry_state.attempt, 1, "Should be first retry attempt");
4370 assert_eq!(
4371 retry_state.max_attempts, 1,
4372 "Internal server errors should only retry once"
4373 );
4374 });
4375
4376 // Advance clock for first retry
4377 cx.executor().advance_clock(BASE_RETRY_DELAY);
4378 cx.run_until_parked();
4379
4380 // Should have scheduled second retry - count retry messages
4381 let retry_count = thread.update(cx, |thread, _| {
4382 thread
4383 .messages
4384 .iter()
4385 .filter(|m| {
4386 m.ui_only
4387 && m.segments.iter().any(|s| {
4388 if let MessageSegment::Text(text) = s {
4389 text.contains("Retrying") && text.contains("seconds")
4390 } else {
4391 false
4392 }
4393 })
4394 })
4395 .count()
4396 });
4397 assert_eq!(
4398 retry_count, 1,
4399 "Should have only one retry for internal server errors"
4400 );
4401
4402 // For internal server errors, we only retry once and then give up
4403 // Check that retry_state is cleared after the single retry
4404 thread.read_with(cx, |thread, _| {
4405 assert!(
4406 thread.retry_state.is_none(),
4407 "Retry state should be cleared after single retry"
4408 );
4409 });
4410
4411 // Verify total attempts (1 initial + 1 retry)
4412 assert_eq!(
4413 *completion_count.lock(),
4414 2,
4415 "Should have attempted once plus 1 retry"
4416 );
4417 }
4418
4419 #[gpui::test]
4420 async fn test_max_retries_exceeded(cx: &mut TestAppContext) {
4421 init_test_settings(cx);
4422
4423 let project = create_test_project(cx, json!({})).await;
4424 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4425
4426 // Create model that returns overloaded error
4427 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
4428
4429 // Insert a user message
4430 thread.update(cx, |thread, cx| {
4431 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4432 });
4433
4434 // Track events
4435 let stopped_with_error = Arc::new(Mutex::new(false));
4436 let stopped_with_error_clone = stopped_with_error.clone();
4437
4438 let _subscription = thread.update(cx, |_, cx| {
4439 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4440 if let ThreadEvent::Stopped(Err(_)) = event {
4441 *stopped_with_error_clone.lock() = true;
4442 }
4443 })
4444 });
4445
4446 // Start initial completion
4447 thread.update(cx, |thread, cx| {
4448 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4449 });
4450 cx.run_until_parked();
4451
4452 // Advance through all retries
4453 for _ in 0..MAX_RETRY_ATTEMPTS {
4454 cx.executor().advance_clock(BASE_RETRY_DELAY);
4455 cx.run_until_parked();
4456 }
4457
4458 let retry_count = thread.update(cx, |thread, _| {
4459 thread
4460 .messages
4461 .iter()
4462 .filter(|m| {
4463 m.ui_only
4464 && m.segments.iter().any(|s| {
4465 if let MessageSegment::Text(text) = s {
4466 text.contains("Retrying") && text.contains("seconds")
4467 } else {
4468 false
4469 }
4470 })
4471 })
4472 .count()
4473 });
4474
4475 // After max retries, should emit Stopped(Err(...)) event
4476 assert_eq!(
4477 retry_count, MAX_RETRY_ATTEMPTS as usize,
4478 "Should have attempted MAX_RETRY_ATTEMPTS retries for overloaded errors"
4479 );
4480 assert!(
4481 *stopped_with_error.lock(),
4482 "Should emit Stopped(Err(...)) event after max retries exceeded"
4483 );
4484
4485 // Retry state should be cleared
4486 thread.read_with(cx, |thread, _| {
4487 assert!(
4488 thread.retry_state.is_none(),
4489 "Retry state should be cleared after max retries"
4490 );
4491
4492 // Verify we have the expected number of retry messages
4493 let retry_messages = thread
4494 .messages
4495 .iter()
4496 .filter(|msg| msg.ui_only && msg.role == Role::System)
4497 .count();
4498 assert_eq!(
4499 retry_messages, MAX_RETRY_ATTEMPTS as usize,
4500 "Should have MAX_RETRY_ATTEMPTS retry messages for overloaded errors"
4501 );
4502 });
4503 }
4504
4505 #[gpui::test]
4506 async fn test_retry_message_removed_on_retry(cx: &mut TestAppContext) {
4507 init_test_settings(cx);
4508
4509 let project = create_test_project(cx, json!({})).await;
4510 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4511
4512 // We'll use a wrapper to switch behavior after first failure
4513 struct RetryTestModel {
4514 inner: Arc<FakeLanguageModel>,
4515 failed_once: Arc<Mutex<bool>>,
4516 }
4517
4518 impl LanguageModel for RetryTestModel {
4519 fn id(&self) -> LanguageModelId {
4520 self.inner.id()
4521 }
4522
4523 fn name(&self) -> LanguageModelName {
4524 self.inner.name()
4525 }
4526
4527 fn provider_id(&self) -> LanguageModelProviderId {
4528 self.inner.provider_id()
4529 }
4530
4531 fn provider_name(&self) -> LanguageModelProviderName {
4532 self.inner.provider_name()
4533 }
4534
4535 fn supports_tools(&self) -> bool {
4536 self.inner.supports_tools()
4537 }
4538
4539 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4540 self.inner.supports_tool_choice(choice)
4541 }
4542
4543 fn supports_images(&self) -> bool {
4544 self.inner.supports_images()
4545 }
4546
4547 fn telemetry_id(&self) -> String {
4548 self.inner.telemetry_id()
4549 }
4550
4551 fn max_token_count(&self) -> u64 {
4552 self.inner.max_token_count()
4553 }
4554
4555 fn count_tokens(
4556 &self,
4557 request: LanguageModelRequest,
4558 cx: &App,
4559 ) -> BoxFuture<'static, Result<u64>> {
4560 self.inner.count_tokens(request, cx)
4561 }
4562
4563 fn stream_completion(
4564 &self,
4565 request: LanguageModelRequest,
4566 cx: &AsyncApp,
4567 ) -> BoxFuture<
4568 'static,
4569 Result<
4570 BoxStream<
4571 'static,
4572 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4573 >,
4574 LanguageModelCompletionError,
4575 >,
4576 > {
4577 if !*self.failed_once.lock() {
4578 *self.failed_once.lock() = true;
4579 let provider = self.provider_name();
4580 // Return error on first attempt
4581 let stream = futures::stream::once(async move {
4582 Err(LanguageModelCompletionError::ServerOverloaded {
4583 provider,
4584 retry_after: None,
4585 })
4586 });
4587 async move { Ok(stream.boxed()) }.boxed()
4588 } else {
4589 // Succeed on retry
4590 self.inner.stream_completion(request, cx)
4591 }
4592 }
4593
4594 fn as_fake(&self) -> &FakeLanguageModel {
4595 &self.inner
4596 }
4597 }
4598
4599 let model = Arc::new(RetryTestModel {
4600 inner: Arc::new(FakeLanguageModel::default()),
4601 failed_once: Arc::new(Mutex::new(false)),
4602 });
4603
4604 // Insert a user message
4605 thread.update(cx, |thread, cx| {
4606 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4607 });
4608
4609 // Track message deletions
4610 // Track when retry completes successfully
4611 let retry_completed = Arc::new(Mutex::new(false));
4612 let retry_completed_clone = retry_completed.clone();
4613
4614 let _subscription = thread.update(cx, |_, cx| {
4615 cx.subscribe(&thread, move |_, _, event: &ThreadEvent, _| {
4616 if let ThreadEvent::StreamedCompletion = event {
4617 *retry_completed_clone.lock() = true;
4618 }
4619 })
4620 });
4621
4622 // Start completion
4623 thread.update(cx, |thread, cx| {
4624 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4625 });
4626 cx.run_until_parked();
4627
4628 // Get the retry message ID
4629 let retry_message_id = thread.read_with(cx, |thread, _| {
4630 thread
4631 .messages()
4632 .find(|msg| msg.role == Role::System && msg.ui_only)
4633 .map(|msg| msg.id)
4634 .expect("Should have a retry message")
4635 });
4636
4637 // Wait for retry
4638 cx.executor().advance_clock(BASE_RETRY_DELAY);
4639 cx.run_until_parked();
4640
4641 // Stream some successful content
4642 let fake_model = model.as_fake();
4643 // After the retry, there should be a new pending completion
4644 let pending = fake_model.pending_completions();
4645 assert!(
4646 !pending.is_empty(),
4647 "Should have a pending completion after retry"
4648 );
4649 fake_model.stream_completion_response(&pending[0], "Success!");
4650 fake_model.end_completion_stream(&pending[0]);
4651 cx.run_until_parked();
4652
4653 // Check that the retry completed successfully
4654 assert!(
4655 *retry_completed.lock(),
4656 "Retry should have completed successfully"
4657 );
4658
4659 // Retry message should still exist but be marked as ui_only
4660 thread.read_with(cx, |thread, _| {
4661 let retry_msg = thread
4662 .message(retry_message_id)
4663 .expect("Retry message should still exist");
4664 assert!(retry_msg.ui_only, "Retry message should be ui_only");
4665 assert_eq!(
4666 retry_msg.role,
4667 Role::System,
4668 "Retry message should have System role"
4669 );
4670 });
4671 }
4672
4673 #[gpui::test]
4674 async fn test_successful_completion_clears_retry_state(cx: &mut TestAppContext) {
4675 init_test_settings(cx);
4676
4677 let project = create_test_project(cx, json!({})).await;
4678 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4679
4680 // Create a model that fails once then succeeds
4681 struct FailOnceModel {
4682 inner: Arc<FakeLanguageModel>,
4683 failed_once: Arc<Mutex<bool>>,
4684 }
4685
4686 impl LanguageModel for FailOnceModel {
4687 fn id(&self) -> LanguageModelId {
4688 self.inner.id()
4689 }
4690
4691 fn name(&self) -> LanguageModelName {
4692 self.inner.name()
4693 }
4694
4695 fn provider_id(&self) -> LanguageModelProviderId {
4696 self.inner.provider_id()
4697 }
4698
4699 fn provider_name(&self) -> LanguageModelProviderName {
4700 self.inner.provider_name()
4701 }
4702
4703 fn supports_tools(&self) -> bool {
4704 self.inner.supports_tools()
4705 }
4706
4707 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4708 self.inner.supports_tool_choice(choice)
4709 }
4710
4711 fn supports_images(&self) -> bool {
4712 self.inner.supports_images()
4713 }
4714
4715 fn telemetry_id(&self) -> String {
4716 self.inner.telemetry_id()
4717 }
4718
4719 fn max_token_count(&self) -> u64 {
4720 self.inner.max_token_count()
4721 }
4722
4723 fn count_tokens(
4724 &self,
4725 request: LanguageModelRequest,
4726 cx: &App,
4727 ) -> BoxFuture<'static, Result<u64>> {
4728 self.inner.count_tokens(request, cx)
4729 }
4730
4731 fn stream_completion(
4732 &self,
4733 request: LanguageModelRequest,
4734 cx: &AsyncApp,
4735 ) -> BoxFuture<
4736 'static,
4737 Result<
4738 BoxStream<
4739 'static,
4740 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4741 >,
4742 LanguageModelCompletionError,
4743 >,
4744 > {
4745 if !*self.failed_once.lock() {
4746 *self.failed_once.lock() = true;
4747 let provider = self.provider_name();
4748 // Return error on first attempt
4749 let stream = futures::stream::once(async move {
4750 Err(LanguageModelCompletionError::ServerOverloaded {
4751 provider,
4752 retry_after: None,
4753 })
4754 });
4755 async move { Ok(stream.boxed()) }.boxed()
4756 } else {
4757 // Succeed on retry
4758 self.inner.stream_completion(request, cx)
4759 }
4760 }
4761 }
4762
4763 let fail_once_model = Arc::new(FailOnceModel {
4764 inner: Arc::new(FakeLanguageModel::default()),
4765 failed_once: Arc::new(Mutex::new(false)),
4766 });
4767
4768 // Insert a user message
4769 thread.update(cx, |thread, cx| {
4770 thread.insert_user_message(
4771 "Test message",
4772 ContextLoadResult::default(),
4773 None,
4774 vec![],
4775 cx,
4776 );
4777 });
4778
4779 // Start completion with fail-once model
4780 thread.update(cx, |thread, cx| {
4781 thread.send_to_model(
4782 fail_once_model.clone(),
4783 CompletionIntent::UserPrompt,
4784 None,
4785 cx,
4786 );
4787 });
4788
4789 cx.run_until_parked();
4790
4791 // Verify retry state exists after first failure
4792 thread.read_with(cx, |thread, _| {
4793 assert!(
4794 thread.retry_state.is_some(),
4795 "Should have retry state after failure"
4796 );
4797 });
4798
4799 // Wait for retry delay
4800 cx.executor().advance_clock(BASE_RETRY_DELAY);
4801 cx.run_until_parked();
4802
4803 // The retry should now use our FailOnceModel which should succeed
4804 // We need to help the FakeLanguageModel complete the stream
4805 let inner_fake = fail_once_model.inner.clone();
4806
4807 // Wait a bit for the retry to start
4808 cx.run_until_parked();
4809
4810 // Check for pending completions and complete them
4811 if let Some(pending) = inner_fake.pending_completions().first() {
4812 inner_fake.stream_completion_response(pending, "Success!");
4813 inner_fake.end_completion_stream(pending);
4814 }
4815 cx.run_until_parked();
4816
4817 thread.read_with(cx, |thread, _| {
4818 assert!(
4819 thread.retry_state.is_none(),
4820 "Retry state should be cleared after successful completion"
4821 );
4822
4823 let has_assistant_message = thread
4824 .messages
4825 .iter()
4826 .any(|msg| msg.role == Role::Assistant && !msg.ui_only);
4827 assert!(
4828 has_assistant_message,
4829 "Should have an assistant message after successful retry"
4830 );
4831 });
4832 }
4833
4834 #[gpui::test]
4835 async fn test_rate_limit_retry_single_attempt(cx: &mut TestAppContext) {
4836 init_test_settings(cx);
4837
4838 let project = create_test_project(cx, json!({})).await;
4839 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
4840
4841 // Create a model that returns rate limit error with retry_after
4842 struct RateLimitModel {
4843 inner: Arc<FakeLanguageModel>,
4844 }
4845
4846 impl LanguageModel for RateLimitModel {
4847 fn id(&self) -> LanguageModelId {
4848 self.inner.id()
4849 }
4850
4851 fn name(&self) -> LanguageModelName {
4852 self.inner.name()
4853 }
4854
4855 fn provider_id(&self) -> LanguageModelProviderId {
4856 self.inner.provider_id()
4857 }
4858
4859 fn provider_name(&self) -> LanguageModelProviderName {
4860 self.inner.provider_name()
4861 }
4862
4863 fn supports_tools(&self) -> bool {
4864 self.inner.supports_tools()
4865 }
4866
4867 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
4868 self.inner.supports_tool_choice(choice)
4869 }
4870
4871 fn supports_images(&self) -> bool {
4872 self.inner.supports_images()
4873 }
4874
4875 fn telemetry_id(&self) -> String {
4876 self.inner.telemetry_id()
4877 }
4878
4879 fn max_token_count(&self) -> u64 {
4880 self.inner.max_token_count()
4881 }
4882
4883 fn count_tokens(
4884 &self,
4885 request: LanguageModelRequest,
4886 cx: &App,
4887 ) -> BoxFuture<'static, Result<u64>> {
4888 self.inner.count_tokens(request, cx)
4889 }
4890
4891 fn stream_completion(
4892 &self,
4893 _request: LanguageModelRequest,
4894 _cx: &AsyncApp,
4895 ) -> BoxFuture<
4896 'static,
4897 Result<
4898 BoxStream<
4899 'static,
4900 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
4901 >,
4902 LanguageModelCompletionError,
4903 >,
4904 > {
4905 let provider = self.provider_name();
4906 async move {
4907 let stream = futures::stream::once(async move {
4908 Err(LanguageModelCompletionError::RateLimitExceeded {
4909 provider,
4910 retry_after: Some(Duration::from_secs(TEST_RATE_LIMIT_RETRY_SECS)),
4911 })
4912 });
4913 Ok(stream.boxed())
4914 }
4915 .boxed()
4916 }
4917
4918 fn as_fake(&self) -> &FakeLanguageModel {
4919 &self.inner
4920 }
4921 }
4922
4923 let model = Arc::new(RateLimitModel {
4924 inner: Arc::new(FakeLanguageModel::default()),
4925 });
4926
4927 // Insert a user message
4928 thread.update(cx, |thread, cx| {
4929 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
4930 });
4931
4932 // Start completion
4933 thread.update(cx, |thread, cx| {
4934 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
4935 });
4936
4937 cx.run_until_parked();
4938
4939 let retry_count = thread.update(cx, |thread, _| {
4940 thread
4941 .messages
4942 .iter()
4943 .filter(|m| {
4944 m.ui_only
4945 && m.segments.iter().any(|s| {
4946 if let MessageSegment::Text(text) = s {
4947 text.contains("rate limit exceeded")
4948 } else {
4949 false
4950 }
4951 })
4952 })
4953 .count()
4954 });
4955 assert_eq!(retry_count, 1, "Should have scheduled one retry");
4956
4957 thread.read_with(cx, |thread, _| {
4958 assert!(
4959 thread.retry_state.is_some(),
4960 "Rate limit errors should set retry_state"
4961 );
4962 if let Some(retry_state) = &thread.retry_state {
4963 assert_eq!(
4964 retry_state.max_attempts, MAX_RETRY_ATTEMPTS,
4965 "Rate limit errors should use MAX_RETRY_ATTEMPTS"
4966 );
4967 }
4968 });
4969
4970 // Verify we have one retry message
4971 thread.read_with(cx, |thread, _| {
4972 let retry_messages = thread
4973 .messages
4974 .iter()
4975 .filter(|msg| {
4976 msg.ui_only
4977 && msg.segments.iter().any(|seg| {
4978 if let MessageSegment::Text(text) = seg {
4979 text.contains("rate limit exceeded")
4980 } else {
4981 false
4982 }
4983 })
4984 })
4985 .count();
4986 assert_eq!(
4987 retry_messages, 1,
4988 "Should have one rate limit retry message"
4989 );
4990 });
4991
4992 // Check that retry message doesn't include attempt count
4993 thread.read_with(cx, |thread, _| {
4994 let retry_message = thread
4995 .messages
4996 .iter()
4997 .find(|msg| msg.role == Role::System && msg.ui_only)
4998 .expect("Should have a retry message");
4999
5000 // Check that the message contains attempt count since we use retry_state
5001 if let Some(MessageSegment::Text(text)) = retry_message.segments.first() {
5002 assert!(
5003 text.contains(&format!("attempt 1 of {}", MAX_RETRY_ATTEMPTS)),
5004 "Rate limit retry message should contain attempt count with MAX_RETRY_ATTEMPTS"
5005 );
5006 assert!(
5007 text.contains("Retrying"),
5008 "Rate limit retry message should contain retry text"
5009 );
5010 }
5011 });
5012 }
5013
5014 #[gpui::test]
5015 async fn test_ui_only_messages_not_sent_to_model(cx: &mut TestAppContext) {
5016 init_test_settings(cx);
5017
5018 let project = create_test_project(cx, json!({})).await;
5019 let (_, _, thread, _, model) = setup_test_environment(cx, project.clone()).await;
5020
5021 // Insert a regular user message
5022 thread.update(cx, |thread, cx| {
5023 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5024 });
5025
5026 // Insert a UI-only message (like our retry notifications)
5027 thread.update(cx, |thread, cx| {
5028 let id = thread.next_message_id.post_inc();
5029 thread.messages.push(Message {
5030 id,
5031 role: Role::System,
5032 segments: vec![MessageSegment::Text(
5033 "This is a UI-only message that should not be sent to the model".to_string(),
5034 )],
5035 loaded_context: LoadedContext::default(),
5036 creases: Vec::new(),
5037 is_hidden: true,
5038 ui_only: true,
5039 });
5040 cx.emit(ThreadEvent::MessageAdded(id));
5041 });
5042
5043 // Insert another regular message
5044 thread.update(cx, |thread, cx| {
5045 thread.insert_user_message(
5046 "How are you?",
5047 ContextLoadResult::default(),
5048 None,
5049 vec![],
5050 cx,
5051 );
5052 });
5053
5054 // Generate the completion request
5055 let request = thread.update(cx, |thread, cx| {
5056 thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
5057 });
5058
5059 // Verify that the request only contains non-UI-only messages
5060 // Should have system prompt + 2 user messages, but not the UI-only message
5061 let user_messages: Vec<_> = request
5062 .messages
5063 .iter()
5064 .filter(|msg| msg.role == Role::User)
5065 .collect();
5066 assert_eq!(
5067 user_messages.len(),
5068 2,
5069 "Should have exactly 2 user messages"
5070 );
5071
5072 // Verify the UI-only content is not present anywhere in the request
5073 let request_text = request
5074 .messages
5075 .iter()
5076 .flat_map(|msg| &msg.content)
5077 .filter_map(|content| match content {
5078 MessageContent::Text(text) => Some(text.as_str()),
5079 _ => None,
5080 })
5081 .collect::<String>();
5082
5083 assert!(
5084 !request_text.contains("UI-only message"),
5085 "UI-only message content should not be in the request"
5086 );
5087
5088 // Verify the thread still has all 3 messages (including UI-only)
5089 thread.read_with(cx, |thread, _| {
5090 assert_eq!(
5091 thread.messages().count(),
5092 3,
5093 "Thread should have 3 messages"
5094 );
5095 assert_eq!(
5096 thread.messages().filter(|m| m.ui_only).count(),
5097 1,
5098 "Thread should have 1 UI-only message"
5099 );
5100 });
5101
5102 // Verify that UI-only messages are not serialized
5103 let serialized = thread
5104 .update(cx, |thread, cx| thread.serialize(cx))
5105 .await
5106 .unwrap();
5107 assert_eq!(
5108 serialized.messages.len(),
5109 2,
5110 "Serialized thread should only have 2 messages (no UI-only)"
5111 );
5112 }
5113
5114 #[gpui::test]
5115 async fn test_retry_cancelled_on_stop(cx: &mut TestAppContext) {
5116 init_test_settings(cx);
5117
5118 let project = create_test_project(cx, json!({})).await;
5119 let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
5120
5121 // Create model that returns overloaded error
5122 let model = Arc::new(ErrorInjector::new(TestError::Overloaded));
5123
5124 // Insert a user message
5125 thread.update(cx, |thread, cx| {
5126 thread.insert_user_message("Hello!", ContextLoadResult::default(), None, vec![], cx);
5127 });
5128
5129 // Start completion
5130 thread.update(cx, |thread, cx| {
5131 thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
5132 });
5133
5134 cx.run_until_parked();
5135
5136 // Verify retry was scheduled by checking for retry message
5137 let has_retry_message = thread.read_with(cx, |thread, _| {
5138 thread.messages.iter().any(|m| {
5139 m.ui_only
5140 && m.segments.iter().any(|s| {
5141 if let MessageSegment::Text(text) = s {
5142 text.contains("Retrying") && text.contains("seconds")
5143 } else {
5144 false
5145 }
5146 })
5147 })
5148 });
5149 assert!(has_retry_message, "Should have scheduled a retry");
5150
5151 // Cancel the completion before the retry happens
5152 thread.update(cx, |thread, cx| {
5153 thread.cancel_last_completion(None, cx);
5154 });
5155
5156 cx.run_until_parked();
5157
5158 // The retry should not have happened - no pending completions
5159 let fake_model = model.as_fake();
5160 assert_eq!(
5161 fake_model.pending_completions().len(),
5162 0,
5163 "Should have no pending completions after cancellation"
5164 );
5165
5166 // Verify the retry was cancelled by checking retry state
5167 thread.read_with(cx, |thread, _| {
5168 if let Some(retry_state) = &thread.retry_state {
5169 panic!(
5170 "retry_state should be cleared after cancellation, but found: attempt={}, max_attempts={}, intent={:?}",
5171 retry_state.attempt, retry_state.max_attempts, retry_state.intent
5172 );
5173 }
5174 });
5175 }
5176
5177 fn test_summarize_error(
5178 model: &Arc<dyn LanguageModel>,
5179 thread: &Entity<Thread>,
5180 cx: &mut TestAppContext,
5181 ) {
5182 thread.update(cx, |thread, cx| {
5183 thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
5184 thread.send_to_model(
5185 model.clone(),
5186 CompletionIntent::ThreadSummarization,
5187 None,
5188 cx,
5189 );
5190 });
5191
5192 let fake_model = model.as_fake();
5193 simulate_successful_response(&fake_model, cx);
5194
5195 thread.read_with(cx, |thread, _| {
5196 assert!(matches!(thread.summary(), ThreadSummary::Generating));
5197 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5198 });
5199
5200 // Simulate summary request ending
5201 cx.run_until_parked();
5202 fake_model.end_last_completion_stream();
5203 cx.run_until_parked();
5204
5205 // State is set to Error and default message
5206 thread.read_with(cx, |thread, _| {
5207 assert!(matches!(thread.summary(), ThreadSummary::Error));
5208 assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
5209 });
5210 }
5211
5212 fn simulate_successful_response(fake_model: &FakeLanguageModel, cx: &mut TestAppContext) {
5213 cx.run_until_parked();
5214 fake_model.stream_last_completion_response("Assistant response");
5215 fake_model.end_last_completion_stream();
5216 cx.run_until_parked();
5217 }
5218
5219 fn init_test_settings(cx: &mut TestAppContext) {
5220 cx.update(|cx| {
5221 let settings_store = SettingsStore::test(cx);
5222 cx.set_global(settings_store);
5223 language::init(cx);
5224 Project::init_settings(cx);
5225 AgentSettings::register(cx);
5226 prompt_store::init(cx);
5227 thread_store::init(cx);
5228 workspace::init_settings(cx);
5229 language_model::init_settings(cx);
5230 ThemeSettings::register(cx);
5231 ToolRegistry::default_global(cx);
5232 assistant_tool::init(cx);
5233
5234 let http_client = Arc::new(http_client::HttpClientWithUrl::new(
5235 http_client::FakeHttpClient::with_200_response(),
5236 "http://localhost".to_string(),
5237 None,
5238 ));
5239 assistant_tools::init(http_client, cx);
5240 });
5241 }
5242
5243 // Helper to create a test project with test files
5244 async fn create_test_project(
5245 cx: &mut TestAppContext,
5246 files: serde_json::Value,
5247 ) -> Entity<Project> {
5248 let fs = FakeFs::new(cx.executor());
5249 fs.insert_tree(path!("/test"), files).await;
5250 Project::test(fs, [path!("/test").as_ref()], cx).await
5251 }
5252
5253 async fn setup_test_environment(
5254 cx: &mut TestAppContext,
5255 project: Entity<Project>,
5256 ) -> (
5257 Entity<Workspace>,
5258 Entity<ThreadStore>,
5259 Entity<Thread>,
5260 Entity<ContextStore>,
5261 Arc<dyn LanguageModel>,
5262 ) {
5263 let (workspace, cx) =
5264 cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
5265
5266 let thread_store = cx
5267 .update(|_, cx| {
5268 ThreadStore::load(
5269 project.clone(),
5270 cx.new(|_| ToolWorkingSet::default()),
5271 None,
5272 Arc::new(PromptBuilder::new(None).unwrap()),
5273 cx,
5274 )
5275 })
5276 .await
5277 .unwrap();
5278
5279 let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
5280 let context_store = cx.new(|_cx| ContextStore::new(project.downgrade(), None));
5281
5282 let provider = Arc::new(FakeLanguageModelProvider);
5283 let model = provider.test_model();
5284 let model: Arc<dyn LanguageModel> = Arc::new(model);
5285
5286 cx.update(|_, cx| {
5287 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
5288 registry.set_default_model(
5289 Some(ConfiguredModel {
5290 provider: provider.clone(),
5291 model: model.clone(),
5292 }),
5293 cx,
5294 );
5295 registry.set_thread_summary_model(
5296 Some(ConfiguredModel {
5297 provider,
5298 model: model.clone(),
5299 }),
5300 cx,
5301 );
5302 })
5303 });
5304
5305 (workspace, thread_store, thread, context_store, model)
5306 }
5307
5308 async fn add_file_to_context(
5309 project: &Entity<Project>,
5310 context_store: &Entity<ContextStore>,
5311 path: &str,
5312 cx: &mut TestAppContext,
5313 ) -> Result<Entity<language::Buffer>> {
5314 let buffer_path = project
5315 .read_with(cx, |project, cx| project.find_project_path(path, cx))
5316 .unwrap();
5317
5318 let buffer = project
5319 .update(cx, |project, cx| {
5320 project.open_buffer(buffer_path.clone(), cx)
5321 })
5322 .await
5323 .unwrap();
5324
5325 context_store.update(cx, |context_store, cx| {
5326 context_store.add_file_from_buffer(&buffer_path, buffer.clone(), false, cx);
5327 });
5328
5329 Ok(buffer)
5330 }
5331}