thread.rs

   1use std::io::Write;
   2use std::ops::Range;
   3use std::sync::Arc;
   4use std::time::Instant;
   5
   6use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
   7use anyhow::{Result, anyhow};
   8use assistant_tool::{ActionLog, AnyToolCard, Tool, ToolWorkingSet};
   9use chrono::{DateTime, Utc};
  10use client::{ModelRequestUsage, RequestUsage};
  11use collections::{HashMap, HashSet};
  12use editor::display_map::CreaseMetadata;
  13use feature_flags::{self, FeatureFlagAppExt};
  14use futures::future::Shared;
  15use futures::{FutureExt, StreamExt as _};
  16use git::repository::DiffType;
  17use gpui::{
  18    AnyWindowHandle, App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task,
  19    WeakEntity,
  20};
  21use language_model::{
  22    ConfiguredModel, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
  23    LanguageModelId, LanguageModelKnownError, LanguageModelRegistry, LanguageModelRequest,
  24    LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
  25    LanguageModelToolResultContent, LanguageModelToolUseId, MessageContent,
  26    ModelRequestLimitReachedError, PaymentRequiredError, Role, SelectedModel, StopReason,
  27    TokenUsage,
  28};
  29use postage::stream::Stream as _;
  30use project::Project;
  31use project::git_store::{GitStore, GitStoreCheckpoint, RepositoryState};
  32use prompt_store::{ModelContext, PromptBuilder};
  33use proto::Plan;
  34use schemars::JsonSchema;
  35use serde::{Deserialize, Serialize};
  36use settings::Settings;
  37use thiserror::Error;
  38use ui::Window;
  39use util::{ResultExt as _, post_inc};
  40
  41use uuid::Uuid;
  42use zed_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
  43
  44use crate::ThreadStore;
  45use crate::agent_profile::AgentProfile;
  46use crate::context::{AgentContext, AgentContextHandle, ContextLoadResult, LoadedContext};
  47use crate::thread_store::{
  48    SerializedCrease, SerializedLanguageModel, SerializedMessage, SerializedMessageSegment,
  49    SerializedThread, SerializedToolResult, SerializedToolUse, SharedProjectContext,
  50};
  51use crate::tool_use::{PendingToolUse, ToolUse, ToolUseMetadata, ToolUseState};
  52
  53#[derive(
  54    Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
  55)]
  56pub struct ThreadId(Arc<str>);
  57
  58impl ThreadId {
  59    pub fn new() -> Self {
  60        Self(Uuid::new_v4().to_string().into())
  61    }
  62}
  63
  64impl std::fmt::Display for ThreadId {
  65    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
  66        write!(f, "{}", self.0)
  67    }
  68}
  69
  70impl From<&str> for ThreadId {
  71    fn from(value: &str) -> Self {
  72        Self(value.into())
  73    }
  74}
  75
  76/// The ID of the user prompt that initiated a request.
  77///
  78/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
  79#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
  80pub struct PromptId(Arc<str>);
  81
  82impl PromptId {
  83    pub fn new() -> Self {
  84        Self(Uuid::new_v4().to_string().into())
  85    }
  86}
  87
  88impl std::fmt::Display for PromptId {
  89    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
  90        write!(f, "{}", self.0)
  91    }
  92}
  93
  94#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
  95pub struct MessageId(pub(crate) usize);
  96
  97impl MessageId {
  98    fn post_inc(&mut self) -> Self {
  99        Self(post_inc(&mut self.0))
 100    }
 101}
 102
 103/// Stored information that can be used to resurrect a context crease when creating an editor for a past message.
 104#[derive(Clone, Debug)]
 105pub struct MessageCrease {
 106    pub range: Range<usize>,
 107    pub metadata: CreaseMetadata,
 108    /// None for a deserialized message, Some otherwise.
 109    pub context: Option<AgentContextHandle>,
 110}
 111
 112/// A message in a [`Thread`].
 113#[derive(Debug, Clone)]
 114pub struct Message {
 115    pub id: MessageId,
 116    pub role: Role,
 117    pub segments: Vec<MessageSegment>,
 118    pub loaded_context: LoadedContext,
 119    pub creases: Vec<MessageCrease>,
 120    pub is_hidden: bool,
 121}
 122
 123impl Message {
 124    /// Returns whether the message contains any meaningful text that should be displayed
 125    /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
 126    pub fn should_display_content(&self) -> bool {
 127        self.segments.iter().all(|segment| segment.should_display())
 128    }
 129
 130    pub fn push_thinking(&mut self, text: &str, signature: Option<String>) {
 131        if let Some(MessageSegment::Thinking {
 132            text: segment,
 133            signature: current_signature,
 134        }) = self.segments.last_mut()
 135        {
 136            if let Some(signature) = signature {
 137                *current_signature = Some(signature);
 138            }
 139            segment.push_str(text);
 140        } else {
 141            self.segments.push(MessageSegment::Thinking {
 142                text: text.to_string(),
 143                signature,
 144            });
 145        }
 146    }
 147
 148    pub fn push_text(&mut self, text: &str) {
 149        if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
 150            segment.push_str(text);
 151        } else {
 152            self.segments.push(MessageSegment::Text(text.to_string()));
 153        }
 154    }
 155
 156    pub fn to_string(&self) -> String {
 157        let mut result = String::new();
 158
 159        if !self.loaded_context.text.is_empty() {
 160            result.push_str(&self.loaded_context.text);
 161        }
 162
 163        for segment in &self.segments {
 164            match segment {
 165                MessageSegment::Text(text) => result.push_str(text),
 166                MessageSegment::Thinking { text, .. } => {
 167                    result.push_str("<think>\n");
 168                    result.push_str(text);
 169                    result.push_str("\n</think>");
 170                }
 171                MessageSegment::RedactedThinking(_) => {}
 172            }
 173        }
 174
 175        result
 176    }
 177}
 178
 179#[derive(Debug, Clone, PartialEq, Eq)]
 180pub enum MessageSegment {
 181    Text(String),
 182    Thinking {
 183        text: String,
 184        signature: Option<String>,
 185    },
 186    RedactedThinking(Vec<u8>),
 187}
 188
 189impl MessageSegment {
 190    pub fn should_display(&self) -> bool {
 191        match self {
 192            Self::Text(text) => text.is_empty(),
 193            Self::Thinking { text, .. } => text.is_empty(),
 194            Self::RedactedThinking(_) => false,
 195        }
 196    }
 197}
 198
 199#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
 200pub struct ProjectSnapshot {
 201    pub worktree_snapshots: Vec<WorktreeSnapshot>,
 202    pub unsaved_buffer_paths: Vec<String>,
 203    pub timestamp: DateTime<Utc>,
 204}
 205
 206#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
 207pub struct WorktreeSnapshot {
 208    pub worktree_path: String,
 209    pub git_state: Option<GitState>,
 210}
 211
 212#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
 213pub struct GitState {
 214    pub remote_url: Option<String>,
 215    pub head_sha: Option<String>,
 216    pub current_branch: Option<String>,
 217    pub diff: Option<String>,
 218}
 219
 220#[derive(Clone, Debug)]
 221pub struct ThreadCheckpoint {
 222    message_id: MessageId,
 223    git_checkpoint: GitStoreCheckpoint,
 224}
 225
 226#[derive(Copy, Clone, Debug, PartialEq, Eq)]
 227pub enum ThreadFeedback {
 228    Positive,
 229    Negative,
 230}
 231
 232pub enum LastRestoreCheckpoint {
 233    Pending {
 234        message_id: MessageId,
 235    },
 236    Error {
 237        message_id: MessageId,
 238        error: String,
 239    },
 240}
 241
 242impl LastRestoreCheckpoint {
 243    pub fn message_id(&self) -> MessageId {
 244        match self {
 245            LastRestoreCheckpoint::Pending { message_id } => *message_id,
 246            LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
 247        }
 248    }
 249}
 250
 251#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
 252pub enum DetailedSummaryState {
 253    #[default]
 254    NotGenerated,
 255    Generating {
 256        message_id: MessageId,
 257    },
 258    Generated {
 259        text: SharedString,
 260        message_id: MessageId,
 261    },
 262}
 263
 264impl DetailedSummaryState {
 265    fn text(&self) -> Option<SharedString> {
 266        if let Self::Generated { text, .. } = self {
 267            Some(text.clone())
 268        } else {
 269            None
 270        }
 271    }
 272}
 273
 274#[derive(Default, Debug)]
 275pub struct TotalTokenUsage {
 276    pub total: u64,
 277    pub max: u64,
 278}
 279
 280impl TotalTokenUsage {
 281    pub fn ratio(&self) -> TokenUsageRatio {
 282        #[cfg(debug_assertions)]
 283        let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
 284            .unwrap_or("0.8".to_string())
 285            .parse()
 286            .unwrap();
 287        #[cfg(not(debug_assertions))]
 288        let warning_threshold: f32 = 0.8;
 289
 290        // When the maximum is unknown because there is no selected model,
 291        // avoid showing the token limit warning.
 292        if self.max == 0 {
 293            TokenUsageRatio::Normal
 294        } else if self.total >= self.max {
 295            TokenUsageRatio::Exceeded
 296        } else if self.total as f32 / self.max as f32 >= warning_threshold {
 297            TokenUsageRatio::Warning
 298        } else {
 299            TokenUsageRatio::Normal
 300        }
 301    }
 302
 303    pub fn add(&self, tokens: u64) -> TotalTokenUsage {
 304        TotalTokenUsage {
 305            total: self.total + tokens,
 306            max: self.max,
 307        }
 308    }
 309}
 310
 311#[derive(Debug, Default, PartialEq, Eq)]
 312pub enum TokenUsageRatio {
 313    #[default]
 314    Normal,
 315    Warning,
 316    Exceeded,
 317}
 318
 319#[derive(Debug, Clone, Copy)]
 320pub enum QueueState {
 321    Sending,
 322    Queued { position: usize },
 323    Started,
 324}
 325
 326/// A thread of conversation with the LLM.
 327pub struct Thread {
 328    id: ThreadId,
 329    updated_at: DateTime<Utc>,
 330    summary: ThreadSummary,
 331    pending_summary: Task<Option<()>>,
 332    detailed_summary_task: Task<Option<()>>,
 333    detailed_summary_tx: postage::watch::Sender<DetailedSummaryState>,
 334    detailed_summary_rx: postage::watch::Receiver<DetailedSummaryState>,
 335    completion_mode: agent_settings::CompletionMode,
 336    messages: Vec<Message>,
 337    next_message_id: MessageId,
 338    last_prompt_id: PromptId,
 339    project_context: SharedProjectContext,
 340    checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
 341    completion_count: usize,
 342    pending_completions: Vec<PendingCompletion>,
 343    project: Entity<Project>,
 344    prompt_builder: Arc<PromptBuilder>,
 345    tools: Entity<ToolWorkingSet>,
 346    tool_use: ToolUseState,
 347    action_log: Entity<ActionLog>,
 348    last_restore_checkpoint: Option<LastRestoreCheckpoint>,
 349    pending_checkpoint: Option<ThreadCheckpoint>,
 350    initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
 351    request_token_usage: Vec<TokenUsage>,
 352    cumulative_token_usage: TokenUsage,
 353    exceeded_window_error: Option<ExceededWindowError>,
 354    tool_use_limit_reached: bool,
 355    feedback: Option<ThreadFeedback>,
 356    message_feedback: HashMap<MessageId, ThreadFeedback>,
 357    last_auto_capture_at: Option<Instant>,
 358    last_received_chunk_at: Option<Instant>,
 359    request_callback: Option<
 360        Box<dyn FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>])>,
 361    >,
 362    remaining_turns: u32,
 363    configured_model: Option<ConfiguredModel>,
 364    profile: AgentProfile,
 365}
 366
 367#[derive(Clone, Debug, PartialEq, Eq)]
 368pub enum ThreadSummary {
 369    Pending,
 370    Generating,
 371    Ready(SharedString),
 372    Error,
 373}
 374
 375impl ThreadSummary {
 376    pub const DEFAULT: SharedString = SharedString::new_static("New Thread");
 377
 378    pub fn or_default(&self) -> SharedString {
 379        self.unwrap_or(Self::DEFAULT)
 380    }
 381
 382    pub fn unwrap_or(&self, message: impl Into<SharedString>) -> SharedString {
 383        self.ready().unwrap_or_else(|| message.into())
 384    }
 385
 386    pub fn ready(&self) -> Option<SharedString> {
 387        match self {
 388            ThreadSummary::Ready(summary) => Some(summary.clone()),
 389            ThreadSummary::Pending | ThreadSummary::Generating | ThreadSummary::Error => None,
 390        }
 391    }
 392}
 393
 394#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
 395pub struct ExceededWindowError {
 396    /// Model used when last message exceeded context window
 397    model_id: LanguageModelId,
 398    /// Token count including last message
 399    token_count: u64,
 400}
 401
 402impl Thread {
 403    pub fn new(
 404        project: Entity<Project>,
 405        tools: Entity<ToolWorkingSet>,
 406        prompt_builder: Arc<PromptBuilder>,
 407        system_prompt: SharedProjectContext,
 408        cx: &mut Context<Self>,
 409    ) -> Self {
 410        let (detailed_summary_tx, detailed_summary_rx) = postage::watch::channel();
 411        let configured_model = LanguageModelRegistry::read_global(cx).default_model();
 412        let profile_id = AgentSettings::get_global(cx).default_profile.clone();
 413
 414        Self {
 415            id: ThreadId::new(),
 416            updated_at: Utc::now(),
 417            summary: ThreadSummary::Pending,
 418            pending_summary: Task::ready(None),
 419            detailed_summary_task: Task::ready(None),
 420            detailed_summary_tx,
 421            detailed_summary_rx,
 422            completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
 423            messages: Vec::new(),
 424            next_message_id: MessageId(0),
 425            last_prompt_id: PromptId::new(),
 426            project_context: system_prompt,
 427            checkpoints_by_message: HashMap::default(),
 428            completion_count: 0,
 429            pending_completions: Vec::new(),
 430            project: project.clone(),
 431            prompt_builder,
 432            tools: tools.clone(),
 433            last_restore_checkpoint: None,
 434            pending_checkpoint: None,
 435            tool_use: ToolUseState::new(tools.clone()),
 436            action_log: cx.new(|_| ActionLog::new(project.clone())),
 437            initial_project_snapshot: {
 438                let project_snapshot = Self::project_snapshot(project, cx);
 439                cx.foreground_executor()
 440                    .spawn(async move { Some(project_snapshot.await) })
 441                    .shared()
 442            },
 443            request_token_usage: Vec::new(),
 444            cumulative_token_usage: TokenUsage::default(),
 445            exceeded_window_error: None,
 446            tool_use_limit_reached: false,
 447            feedback: None,
 448            message_feedback: HashMap::default(),
 449            last_auto_capture_at: None,
 450            last_received_chunk_at: None,
 451            request_callback: None,
 452            remaining_turns: u32::MAX,
 453            configured_model,
 454            profile: AgentProfile::new(profile_id, tools),
 455        }
 456    }
 457
 458    pub fn deserialize(
 459        id: ThreadId,
 460        serialized: SerializedThread,
 461        project: Entity<Project>,
 462        tools: Entity<ToolWorkingSet>,
 463        prompt_builder: Arc<PromptBuilder>,
 464        project_context: SharedProjectContext,
 465        window: Option<&mut Window>, // None in headless mode
 466        cx: &mut Context<Self>,
 467    ) -> Self {
 468        let next_message_id = MessageId(
 469            serialized
 470                .messages
 471                .last()
 472                .map(|message| message.id.0 + 1)
 473                .unwrap_or(0),
 474        );
 475        let tool_use = ToolUseState::from_serialized_messages(
 476            tools.clone(),
 477            &serialized.messages,
 478            project.clone(),
 479            window,
 480            cx,
 481        );
 482        let (detailed_summary_tx, detailed_summary_rx) =
 483            postage::watch::channel_with(serialized.detailed_summary_state);
 484
 485        let configured_model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
 486            serialized
 487                .model
 488                .and_then(|model| {
 489                    let model = SelectedModel {
 490                        provider: model.provider.clone().into(),
 491                        model: model.model.clone().into(),
 492                    };
 493                    registry.select_model(&model, cx)
 494                })
 495                .or_else(|| registry.default_model())
 496        });
 497
 498        let completion_mode = serialized
 499            .completion_mode
 500            .unwrap_or_else(|| AgentSettings::get_global(cx).preferred_completion_mode);
 501        let profile_id = serialized
 502            .profile
 503            .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
 504
 505        Self {
 506            id,
 507            updated_at: serialized.updated_at,
 508            summary: ThreadSummary::Ready(serialized.summary),
 509            pending_summary: Task::ready(None),
 510            detailed_summary_task: Task::ready(None),
 511            detailed_summary_tx,
 512            detailed_summary_rx,
 513            completion_mode,
 514            messages: serialized
 515                .messages
 516                .into_iter()
 517                .map(|message| Message {
 518                    id: message.id,
 519                    role: message.role,
 520                    segments: message
 521                        .segments
 522                        .into_iter()
 523                        .map(|segment| match segment {
 524                            SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
 525                            SerializedMessageSegment::Thinking { text, signature } => {
 526                                MessageSegment::Thinking { text, signature }
 527                            }
 528                            SerializedMessageSegment::RedactedThinking { data } => {
 529                                MessageSegment::RedactedThinking(data)
 530                            }
 531                        })
 532                        .collect(),
 533                    loaded_context: LoadedContext {
 534                        contexts: Vec::new(),
 535                        text: message.context,
 536                        images: Vec::new(),
 537                    },
 538                    creases: message
 539                        .creases
 540                        .into_iter()
 541                        .map(|crease| MessageCrease {
 542                            range: crease.start..crease.end,
 543                            metadata: CreaseMetadata {
 544                                icon_path: crease.icon_path,
 545                                label: crease.label,
 546                            },
 547                            context: None,
 548                        })
 549                        .collect(),
 550                    is_hidden: message.is_hidden,
 551                })
 552                .collect(),
 553            next_message_id,
 554            last_prompt_id: PromptId::new(),
 555            project_context,
 556            checkpoints_by_message: HashMap::default(),
 557            completion_count: 0,
 558            pending_completions: Vec::new(),
 559            last_restore_checkpoint: None,
 560            pending_checkpoint: None,
 561            project: project.clone(),
 562            prompt_builder,
 563            tools: tools.clone(),
 564            tool_use,
 565            action_log: cx.new(|_| ActionLog::new(project)),
 566            initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
 567            request_token_usage: serialized.request_token_usage,
 568            cumulative_token_usage: serialized.cumulative_token_usage,
 569            exceeded_window_error: None,
 570            tool_use_limit_reached: serialized.tool_use_limit_reached,
 571            feedback: None,
 572            message_feedback: HashMap::default(),
 573            last_auto_capture_at: None,
 574            last_received_chunk_at: None,
 575            request_callback: None,
 576            remaining_turns: u32::MAX,
 577            configured_model,
 578            profile: AgentProfile::new(profile_id, tools),
 579        }
 580    }
 581
 582    pub fn set_request_callback(
 583        &mut self,
 584        callback: impl 'static
 585        + FnMut(&LanguageModelRequest, &[Result<LanguageModelCompletionEvent, String>]),
 586    ) {
 587        self.request_callback = Some(Box::new(callback));
 588    }
 589
 590    pub fn id(&self) -> &ThreadId {
 591        &self.id
 592    }
 593
 594    pub fn profile(&self) -> &AgentProfile {
 595        &self.profile
 596    }
 597
 598    pub fn set_profile(&mut self, id: AgentProfileId, cx: &mut Context<Self>) {
 599        if &id != self.profile.id() {
 600            self.profile = AgentProfile::new(id, self.tools.clone());
 601            cx.emit(ThreadEvent::ProfileChanged);
 602        }
 603    }
 604
 605    pub fn is_empty(&self) -> bool {
 606        self.messages.is_empty()
 607    }
 608
 609    pub fn updated_at(&self) -> DateTime<Utc> {
 610        self.updated_at
 611    }
 612
 613    pub fn touch_updated_at(&mut self) {
 614        self.updated_at = Utc::now();
 615    }
 616
 617    pub fn advance_prompt_id(&mut self) {
 618        self.last_prompt_id = PromptId::new();
 619    }
 620
 621    pub fn project_context(&self) -> SharedProjectContext {
 622        self.project_context.clone()
 623    }
 624
 625    pub fn get_or_init_configured_model(&mut self, cx: &App) -> Option<ConfiguredModel> {
 626        if self.configured_model.is_none() {
 627            self.configured_model = LanguageModelRegistry::read_global(cx).default_model();
 628        }
 629        self.configured_model.clone()
 630    }
 631
 632    pub fn configured_model(&self) -> Option<ConfiguredModel> {
 633        self.configured_model.clone()
 634    }
 635
 636    pub fn set_configured_model(&mut self, model: Option<ConfiguredModel>, cx: &mut Context<Self>) {
 637        self.configured_model = model;
 638        cx.notify();
 639    }
 640
 641    pub fn summary(&self) -> &ThreadSummary {
 642        &self.summary
 643    }
 644
 645    pub fn set_summary(&mut self, new_summary: impl Into<SharedString>, cx: &mut Context<Self>) {
 646        let current_summary = match &self.summary {
 647            ThreadSummary::Pending | ThreadSummary::Generating => return,
 648            ThreadSummary::Ready(summary) => summary,
 649            ThreadSummary::Error => &ThreadSummary::DEFAULT,
 650        };
 651
 652        let mut new_summary = new_summary.into();
 653
 654        if new_summary.is_empty() {
 655            new_summary = ThreadSummary::DEFAULT;
 656        }
 657
 658        if current_summary != &new_summary {
 659            self.summary = ThreadSummary::Ready(new_summary);
 660            cx.emit(ThreadEvent::SummaryChanged);
 661        }
 662    }
 663
 664    pub fn completion_mode(&self) -> CompletionMode {
 665        self.completion_mode
 666    }
 667
 668    pub fn set_completion_mode(&mut self, mode: CompletionMode) {
 669        self.completion_mode = mode;
 670    }
 671
 672    pub fn message(&self, id: MessageId) -> Option<&Message> {
 673        let index = self
 674            .messages
 675            .binary_search_by(|message| message.id.cmp(&id))
 676            .ok()?;
 677
 678        self.messages.get(index)
 679    }
 680
 681    pub fn messages(&self) -> impl ExactSizeIterator<Item = &Message> {
 682        self.messages.iter()
 683    }
 684
 685    pub fn is_generating(&self) -> bool {
 686        !self.pending_completions.is_empty() || !self.all_tools_finished()
 687    }
 688
 689    /// Indicates whether streaming of language model events is stale.
 690    /// When `is_generating()` is false, this method returns `None`.
 691    pub fn is_generation_stale(&self) -> Option<bool> {
 692        const STALE_THRESHOLD: u128 = 250;
 693
 694        self.last_received_chunk_at
 695            .map(|instant| instant.elapsed().as_millis() > STALE_THRESHOLD)
 696    }
 697
 698    fn received_chunk(&mut self) {
 699        self.last_received_chunk_at = Some(Instant::now());
 700    }
 701
 702    pub fn queue_state(&self) -> Option<QueueState> {
 703        self.pending_completions
 704            .first()
 705            .map(|pending_completion| pending_completion.queue_state)
 706    }
 707
 708    pub fn tools(&self) -> &Entity<ToolWorkingSet> {
 709        &self.tools
 710    }
 711
 712    pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
 713        self.tool_use
 714            .pending_tool_uses()
 715            .into_iter()
 716            .find(|tool_use| &tool_use.id == id)
 717    }
 718
 719    pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
 720        self.tool_use
 721            .pending_tool_uses()
 722            .into_iter()
 723            .filter(|tool_use| tool_use.status.needs_confirmation())
 724    }
 725
 726    pub fn has_pending_tool_uses(&self) -> bool {
 727        !self.tool_use.pending_tool_uses().is_empty()
 728    }
 729
 730    pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
 731        self.checkpoints_by_message.get(&id).cloned()
 732    }
 733
 734    pub fn restore_checkpoint(
 735        &mut self,
 736        checkpoint: ThreadCheckpoint,
 737        cx: &mut Context<Self>,
 738    ) -> Task<Result<()>> {
 739        self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
 740            message_id: checkpoint.message_id,
 741        });
 742        cx.emit(ThreadEvent::CheckpointChanged);
 743        cx.notify();
 744
 745        let git_store = self.project().read(cx).git_store().clone();
 746        let restore = git_store.update(cx, |git_store, cx| {
 747            git_store.restore_checkpoint(checkpoint.git_checkpoint.clone(), cx)
 748        });
 749
 750        cx.spawn(async move |this, cx| {
 751            let result = restore.await;
 752            this.update(cx, |this, cx| {
 753                if let Err(err) = result.as_ref() {
 754                    this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
 755                        message_id: checkpoint.message_id,
 756                        error: err.to_string(),
 757                    });
 758                } else {
 759                    this.truncate(checkpoint.message_id, cx);
 760                    this.last_restore_checkpoint = None;
 761                }
 762                this.pending_checkpoint = None;
 763                cx.emit(ThreadEvent::CheckpointChanged);
 764                cx.notify();
 765            })?;
 766            result
 767        })
 768    }
 769
 770    fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
 771        let pending_checkpoint = if self.is_generating() {
 772            return;
 773        } else if let Some(checkpoint) = self.pending_checkpoint.take() {
 774            checkpoint
 775        } else {
 776            return;
 777        };
 778
 779        self.finalize_checkpoint(pending_checkpoint, cx);
 780    }
 781
 782    fn finalize_checkpoint(
 783        &mut self,
 784        pending_checkpoint: ThreadCheckpoint,
 785        cx: &mut Context<Self>,
 786    ) {
 787        let git_store = self.project.read(cx).git_store().clone();
 788        let final_checkpoint = git_store.update(cx, |git_store, cx| git_store.checkpoint(cx));
 789        cx.spawn(async move |this, cx| match final_checkpoint.await {
 790            Ok(final_checkpoint) => {
 791                let equal = git_store
 792                    .update(cx, |store, cx| {
 793                        store.compare_checkpoints(
 794                            pending_checkpoint.git_checkpoint.clone(),
 795                            final_checkpoint.clone(),
 796                            cx,
 797                        )
 798                    })?
 799                    .await
 800                    .unwrap_or(false);
 801
 802                if !equal {
 803                    this.update(cx, |this, cx| {
 804                        this.insert_checkpoint(pending_checkpoint, cx)
 805                    })?;
 806                }
 807
 808                Ok(())
 809            }
 810            Err(_) => this.update(cx, |this, cx| {
 811                this.insert_checkpoint(pending_checkpoint, cx)
 812            }),
 813        })
 814        .detach();
 815    }
 816
 817    fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
 818        self.checkpoints_by_message
 819            .insert(checkpoint.message_id, checkpoint);
 820        cx.emit(ThreadEvent::CheckpointChanged);
 821        cx.notify();
 822    }
 823
 824    pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
 825        self.last_restore_checkpoint.as_ref()
 826    }
 827
 828    pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
 829        let Some(message_ix) = self
 830            .messages
 831            .iter()
 832            .rposition(|message| message.id == message_id)
 833        else {
 834            return;
 835        };
 836        for deleted_message in self.messages.drain(message_ix..) {
 837            self.checkpoints_by_message.remove(&deleted_message.id);
 838        }
 839        cx.notify();
 840    }
 841
 842    pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AgentContext> {
 843        self.messages
 844            .iter()
 845            .find(|message| message.id == id)
 846            .into_iter()
 847            .flat_map(|message| message.loaded_context.contexts.iter())
 848    }
 849
 850    pub fn is_turn_end(&self, ix: usize) -> bool {
 851        if self.messages.is_empty() {
 852            return false;
 853        }
 854
 855        if !self.is_generating() && ix == self.messages.len() - 1 {
 856            return true;
 857        }
 858
 859        let Some(message) = self.messages.get(ix) else {
 860            return false;
 861        };
 862
 863        if message.role != Role::Assistant {
 864            return false;
 865        }
 866
 867        self.messages
 868            .get(ix + 1)
 869            .and_then(|message| {
 870                self.message(message.id)
 871                    .map(|next_message| next_message.role == Role::User && !next_message.is_hidden)
 872            })
 873            .unwrap_or(false)
 874    }
 875
 876    pub fn tool_use_limit_reached(&self) -> bool {
 877        self.tool_use_limit_reached
 878    }
 879
 880    /// Returns whether all of the tool uses have finished running.
 881    pub fn all_tools_finished(&self) -> bool {
 882        // If the only pending tool uses left are the ones with errors, then
 883        // that means that we've finished running all of the pending tools.
 884        self.tool_use
 885            .pending_tool_uses()
 886            .iter()
 887            .all(|pending_tool_use| pending_tool_use.status.is_error())
 888    }
 889
 890    /// Returns whether any pending tool uses may perform edits
 891    pub fn has_pending_edit_tool_uses(&self) -> bool {
 892        self.tool_use
 893            .pending_tool_uses()
 894            .iter()
 895            .filter(|pending_tool_use| !pending_tool_use.status.is_error())
 896            .any(|pending_tool_use| pending_tool_use.may_perform_edits)
 897    }
 898
 899    pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
 900        self.tool_use.tool_uses_for_message(id, cx)
 901    }
 902
 903    pub fn tool_results_for_message(
 904        &self,
 905        assistant_message_id: MessageId,
 906    ) -> Vec<&LanguageModelToolResult> {
 907        self.tool_use.tool_results_for_message(assistant_message_id)
 908    }
 909
 910    pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
 911        self.tool_use.tool_result(id)
 912    }
 913
 914    pub fn output_for_tool(&self, id: &LanguageModelToolUseId) -> Option<&Arc<str>> {
 915        match &self.tool_use.tool_result(id)?.content {
 916            LanguageModelToolResultContent::Text(text) => Some(text),
 917            LanguageModelToolResultContent::Image(_) => {
 918                // TODO: We should display image
 919                None
 920            }
 921        }
 922    }
 923
 924    pub fn card_for_tool(&self, id: &LanguageModelToolUseId) -> Option<AnyToolCard> {
 925        self.tool_use.tool_result_card(id).cloned()
 926    }
 927
 928    /// Return tools that are both enabled and supported by the model
 929    pub fn available_tools(
 930        &self,
 931        cx: &App,
 932        model: Arc<dyn LanguageModel>,
 933    ) -> Vec<LanguageModelRequestTool> {
 934        if model.supports_tools() {
 935            resolve_tool_name_conflicts(self.profile.enabled_tools(cx).as_slice())
 936                .into_iter()
 937                .filter_map(|(name, tool)| {
 938                    // Skip tools that cannot be supported
 939                    let input_schema = tool.input_schema(model.tool_input_format()).ok()?;
 940                    Some(LanguageModelRequestTool {
 941                        name,
 942                        description: tool.description(),
 943                        input_schema,
 944                    })
 945                })
 946                .collect()
 947        } else {
 948            Vec::default()
 949        }
 950    }
 951
 952    pub fn insert_user_message(
 953        &mut self,
 954        text: impl Into<String>,
 955        loaded_context: ContextLoadResult,
 956        git_checkpoint: Option<GitStoreCheckpoint>,
 957        creases: Vec<MessageCrease>,
 958        cx: &mut Context<Self>,
 959    ) -> MessageId {
 960        if !loaded_context.referenced_buffers.is_empty() {
 961            self.action_log.update(cx, |log, cx| {
 962                for buffer in loaded_context.referenced_buffers {
 963                    log.buffer_read(buffer, cx);
 964                }
 965            });
 966        }
 967
 968        let message_id = self.insert_message(
 969            Role::User,
 970            vec![MessageSegment::Text(text.into())],
 971            loaded_context.loaded_context,
 972            creases,
 973            false,
 974            cx,
 975        );
 976
 977        if let Some(git_checkpoint) = git_checkpoint {
 978            self.pending_checkpoint = Some(ThreadCheckpoint {
 979                message_id,
 980                git_checkpoint,
 981            });
 982        }
 983
 984        self.auto_capture_telemetry(cx);
 985
 986        message_id
 987    }
 988
 989    pub fn insert_invisible_continue_message(&mut self, cx: &mut Context<Self>) -> MessageId {
 990        let id = self.insert_message(
 991            Role::User,
 992            vec![MessageSegment::Text("Continue where you left off".into())],
 993            LoadedContext::default(),
 994            vec![],
 995            true,
 996            cx,
 997        );
 998        self.pending_checkpoint = None;
 999
1000        id
1001    }
1002
1003    pub fn insert_assistant_message(
1004        &mut self,
1005        segments: Vec<MessageSegment>,
1006        cx: &mut Context<Self>,
1007    ) -> MessageId {
1008        self.insert_message(
1009            Role::Assistant,
1010            segments,
1011            LoadedContext::default(),
1012            Vec::new(),
1013            false,
1014            cx,
1015        )
1016    }
1017
1018    pub fn insert_message(
1019        &mut self,
1020        role: Role,
1021        segments: Vec<MessageSegment>,
1022        loaded_context: LoadedContext,
1023        creases: Vec<MessageCrease>,
1024        is_hidden: bool,
1025        cx: &mut Context<Self>,
1026    ) -> MessageId {
1027        let id = self.next_message_id.post_inc();
1028        self.messages.push(Message {
1029            id,
1030            role,
1031            segments,
1032            loaded_context,
1033            creases,
1034            is_hidden,
1035        });
1036        self.touch_updated_at();
1037        cx.emit(ThreadEvent::MessageAdded(id));
1038        id
1039    }
1040
1041    pub fn edit_message(
1042        &mut self,
1043        id: MessageId,
1044        new_role: Role,
1045        new_segments: Vec<MessageSegment>,
1046        creases: Vec<MessageCrease>,
1047        loaded_context: Option<LoadedContext>,
1048        checkpoint: Option<GitStoreCheckpoint>,
1049        cx: &mut Context<Self>,
1050    ) -> bool {
1051        let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
1052            return false;
1053        };
1054        message.role = new_role;
1055        message.segments = new_segments;
1056        message.creases = creases;
1057        if let Some(context) = loaded_context {
1058            message.loaded_context = context;
1059        }
1060        if let Some(git_checkpoint) = checkpoint {
1061            self.checkpoints_by_message.insert(
1062                id,
1063                ThreadCheckpoint {
1064                    message_id: id,
1065                    git_checkpoint,
1066                },
1067            );
1068        }
1069        self.touch_updated_at();
1070        cx.emit(ThreadEvent::MessageEdited(id));
1071        true
1072    }
1073
1074    pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
1075        let Some(index) = self.messages.iter().position(|message| message.id == id) else {
1076            return false;
1077        };
1078        self.messages.remove(index);
1079        self.touch_updated_at();
1080        cx.emit(ThreadEvent::MessageDeleted(id));
1081        true
1082    }
1083
1084    /// Returns the representation of this [`Thread`] in a textual form.
1085    ///
1086    /// This is the representation we use when attaching a thread as context to another thread.
1087    pub fn text(&self) -> String {
1088        let mut text = String::new();
1089
1090        for message in &self.messages {
1091            text.push_str(match message.role {
1092                language_model::Role::User => "User:",
1093                language_model::Role::Assistant => "Agent:",
1094                language_model::Role::System => "System:",
1095            });
1096            text.push('\n');
1097
1098            for segment in &message.segments {
1099                match segment {
1100                    MessageSegment::Text(content) => text.push_str(content),
1101                    MessageSegment::Thinking { text: content, .. } => {
1102                        text.push_str(&format!("<think>{}</think>", content))
1103                    }
1104                    MessageSegment::RedactedThinking(_) => {}
1105                }
1106            }
1107            text.push('\n');
1108        }
1109
1110        text
1111    }
1112
1113    /// Serializes this thread into a format for storage or telemetry.
1114    pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
1115        let initial_project_snapshot = self.initial_project_snapshot.clone();
1116        cx.spawn(async move |this, cx| {
1117            let initial_project_snapshot = initial_project_snapshot.await;
1118            this.read_with(cx, |this, cx| SerializedThread {
1119                version: SerializedThread::VERSION.to_string(),
1120                summary: this.summary().or_default(),
1121                updated_at: this.updated_at(),
1122                messages: this
1123                    .messages()
1124                    .map(|message| SerializedMessage {
1125                        id: message.id,
1126                        role: message.role,
1127                        segments: message
1128                            .segments
1129                            .iter()
1130                            .map(|segment| match segment {
1131                                MessageSegment::Text(text) => {
1132                                    SerializedMessageSegment::Text { text: text.clone() }
1133                                }
1134                                MessageSegment::Thinking { text, signature } => {
1135                                    SerializedMessageSegment::Thinking {
1136                                        text: text.clone(),
1137                                        signature: signature.clone(),
1138                                    }
1139                                }
1140                                MessageSegment::RedactedThinking(data) => {
1141                                    SerializedMessageSegment::RedactedThinking {
1142                                        data: data.clone(),
1143                                    }
1144                                }
1145                            })
1146                            .collect(),
1147                        tool_uses: this
1148                            .tool_uses_for_message(message.id, cx)
1149                            .into_iter()
1150                            .map(|tool_use| SerializedToolUse {
1151                                id: tool_use.id,
1152                                name: tool_use.name,
1153                                input: tool_use.input,
1154                            })
1155                            .collect(),
1156                        tool_results: this
1157                            .tool_results_for_message(message.id)
1158                            .into_iter()
1159                            .map(|tool_result| SerializedToolResult {
1160                                tool_use_id: tool_result.tool_use_id.clone(),
1161                                is_error: tool_result.is_error,
1162                                content: tool_result.content.clone(),
1163                                output: tool_result.output.clone(),
1164                            })
1165                            .collect(),
1166                        context: message.loaded_context.text.clone(),
1167                        creases: message
1168                            .creases
1169                            .iter()
1170                            .map(|crease| SerializedCrease {
1171                                start: crease.range.start,
1172                                end: crease.range.end,
1173                                icon_path: crease.metadata.icon_path.clone(),
1174                                label: crease.metadata.label.clone(),
1175                            })
1176                            .collect(),
1177                        is_hidden: message.is_hidden,
1178                    })
1179                    .collect(),
1180                initial_project_snapshot,
1181                cumulative_token_usage: this.cumulative_token_usage,
1182                request_token_usage: this.request_token_usage.clone(),
1183                detailed_summary_state: this.detailed_summary_rx.borrow().clone(),
1184                exceeded_window_error: this.exceeded_window_error.clone(),
1185                model: this
1186                    .configured_model
1187                    .as_ref()
1188                    .map(|model| SerializedLanguageModel {
1189                        provider: model.provider.id().0.to_string(),
1190                        model: model.model.id().0.to_string(),
1191                    }),
1192                completion_mode: Some(this.completion_mode),
1193                tool_use_limit_reached: this.tool_use_limit_reached,
1194                profile: Some(this.profile.id().clone()),
1195            })
1196        })
1197    }
1198
1199    pub fn remaining_turns(&self) -> u32 {
1200        self.remaining_turns
1201    }
1202
1203    pub fn set_remaining_turns(&mut self, remaining_turns: u32) {
1204        self.remaining_turns = remaining_turns;
1205    }
1206
1207    pub fn send_to_model(
1208        &mut self,
1209        model: Arc<dyn LanguageModel>,
1210        intent: CompletionIntent,
1211        window: Option<AnyWindowHandle>,
1212        cx: &mut Context<Self>,
1213    ) {
1214        if self.remaining_turns == 0 {
1215            return;
1216        }
1217
1218        self.remaining_turns -= 1;
1219
1220        let request = self.to_completion_request(model.clone(), intent, cx);
1221
1222        self.stream_completion(request, model, window, cx);
1223    }
1224
1225    pub fn used_tools_since_last_user_message(&self) -> bool {
1226        for message in self.messages.iter().rev() {
1227            if self.tool_use.message_has_tool_results(message.id) {
1228                return true;
1229            } else if message.role == Role::User {
1230                return false;
1231            }
1232        }
1233
1234        false
1235    }
1236
1237    pub fn to_completion_request(
1238        &self,
1239        model: Arc<dyn LanguageModel>,
1240        intent: CompletionIntent,
1241        cx: &mut Context<Self>,
1242    ) -> LanguageModelRequest {
1243        let mut request = LanguageModelRequest {
1244            thread_id: Some(self.id.to_string()),
1245            prompt_id: Some(self.last_prompt_id.to_string()),
1246            intent: Some(intent),
1247            mode: None,
1248            messages: vec![],
1249            tools: Vec::new(),
1250            tool_choice: None,
1251            stop: Vec::new(),
1252            temperature: AgentSettings::temperature_for_model(&model, cx),
1253        };
1254
1255        let available_tools = self.available_tools(cx, model.clone());
1256        let available_tool_names = available_tools
1257            .iter()
1258            .map(|tool| tool.name.clone())
1259            .collect();
1260
1261        let model_context = &ModelContext {
1262            available_tools: available_tool_names,
1263        };
1264
1265        if let Some(project_context) = self.project_context.borrow().as_ref() {
1266            match self
1267                .prompt_builder
1268                .generate_assistant_system_prompt(project_context, model_context)
1269            {
1270                Err(err) => {
1271                    let message = format!("{err:?}").into();
1272                    log::error!("{message}");
1273                    cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1274                        header: "Error generating system prompt".into(),
1275                        message,
1276                    }));
1277                }
1278                Ok(system_prompt) => {
1279                    request.messages.push(LanguageModelRequestMessage {
1280                        role: Role::System,
1281                        content: vec![MessageContent::Text(system_prompt)],
1282                        cache: true,
1283                    });
1284                }
1285            }
1286        } else {
1287            let message = "Context for system prompt unexpectedly not ready.".into();
1288            log::error!("{message}");
1289            cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1290                header: "Error generating system prompt".into(),
1291                message,
1292            }));
1293        }
1294
1295        let mut message_ix_to_cache = None;
1296        for message in &self.messages {
1297            let mut request_message = LanguageModelRequestMessage {
1298                role: message.role,
1299                content: Vec::new(),
1300                cache: false,
1301            };
1302
1303            message
1304                .loaded_context
1305                .add_to_request_message(&mut request_message);
1306
1307            for segment in &message.segments {
1308                match segment {
1309                    MessageSegment::Text(text) => {
1310                        if !text.is_empty() {
1311                            request_message
1312                                .content
1313                                .push(MessageContent::Text(text.into()));
1314                        }
1315                    }
1316                    MessageSegment::Thinking { text, signature } => {
1317                        if !text.is_empty() {
1318                            request_message.content.push(MessageContent::Thinking {
1319                                text: text.into(),
1320                                signature: signature.clone(),
1321                            });
1322                        }
1323                    }
1324                    MessageSegment::RedactedThinking(data) => {
1325                        request_message
1326                            .content
1327                            .push(MessageContent::RedactedThinking(data.clone()));
1328                    }
1329                };
1330            }
1331
1332            let mut cache_message = true;
1333            let mut tool_results_message = LanguageModelRequestMessage {
1334                role: Role::User,
1335                content: Vec::new(),
1336                cache: false,
1337            };
1338            for (tool_use, tool_result) in self.tool_use.tool_results(message.id) {
1339                if let Some(tool_result) = tool_result {
1340                    request_message
1341                        .content
1342                        .push(MessageContent::ToolUse(tool_use.clone()));
1343                    tool_results_message
1344                        .content
1345                        .push(MessageContent::ToolResult(LanguageModelToolResult {
1346                            tool_use_id: tool_use.id.clone(),
1347                            tool_name: tool_result.tool_name.clone(),
1348                            is_error: tool_result.is_error,
1349                            content: if tool_result.content.is_empty() {
1350                                // Surprisingly, the API fails if we return an empty string here.
1351                                // It thinks we are sending a tool use without a tool result.
1352                                "<Tool returned an empty string>".into()
1353                            } else {
1354                                tool_result.content.clone()
1355                            },
1356                            output: None,
1357                        }));
1358                } else {
1359                    cache_message = false;
1360                    log::debug!(
1361                        "skipped tool use {:?} because it is still pending",
1362                        tool_use
1363                    );
1364                }
1365            }
1366
1367            if cache_message {
1368                message_ix_to_cache = Some(request.messages.len());
1369            }
1370            request.messages.push(request_message);
1371
1372            if !tool_results_message.content.is_empty() {
1373                if cache_message {
1374                    message_ix_to_cache = Some(request.messages.len());
1375                }
1376                request.messages.push(tool_results_message);
1377            }
1378        }
1379
1380        // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
1381        if let Some(message_ix_to_cache) = message_ix_to_cache {
1382            request.messages[message_ix_to_cache].cache = true;
1383        }
1384
1385        request.tools = available_tools;
1386        request.mode = if model.supports_max_mode() {
1387            Some(self.completion_mode.into())
1388        } else {
1389            Some(CompletionMode::Normal.into())
1390        };
1391
1392        request
1393    }
1394
1395    fn to_summarize_request(
1396        &self,
1397        model: &Arc<dyn LanguageModel>,
1398        intent: CompletionIntent,
1399        added_user_message: String,
1400        cx: &App,
1401    ) -> LanguageModelRequest {
1402        let mut request = LanguageModelRequest {
1403            thread_id: None,
1404            prompt_id: None,
1405            intent: Some(intent),
1406            mode: None,
1407            messages: vec![],
1408            tools: Vec::new(),
1409            tool_choice: None,
1410            stop: Vec::new(),
1411            temperature: AgentSettings::temperature_for_model(model, cx),
1412        };
1413
1414        for message in &self.messages {
1415            let mut request_message = LanguageModelRequestMessage {
1416                role: message.role,
1417                content: Vec::new(),
1418                cache: false,
1419            };
1420
1421            for segment in &message.segments {
1422                match segment {
1423                    MessageSegment::Text(text) => request_message
1424                        .content
1425                        .push(MessageContent::Text(text.clone())),
1426                    MessageSegment::Thinking { .. } => {}
1427                    MessageSegment::RedactedThinking(_) => {}
1428                }
1429            }
1430
1431            if request_message.content.is_empty() {
1432                continue;
1433            }
1434
1435            request.messages.push(request_message);
1436        }
1437
1438        request.messages.push(LanguageModelRequestMessage {
1439            role: Role::User,
1440            content: vec![MessageContent::Text(added_user_message)],
1441            cache: false,
1442        });
1443
1444        request
1445    }
1446
1447    pub fn stream_completion(
1448        &mut self,
1449        request: LanguageModelRequest,
1450        model: Arc<dyn LanguageModel>,
1451        window: Option<AnyWindowHandle>,
1452        cx: &mut Context<Self>,
1453    ) {
1454        self.tool_use_limit_reached = false;
1455
1456        let pending_completion_id = post_inc(&mut self.completion_count);
1457        let mut request_callback_parameters = if self.request_callback.is_some() {
1458            Some((request.clone(), Vec::new()))
1459        } else {
1460            None
1461        };
1462        let prompt_id = self.last_prompt_id.clone();
1463        let tool_use_metadata = ToolUseMetadata {
1464            model: model.clone(),
1465            thread_id: self.id.clone(),
1466            prompt_id: prompt_id.clone(),
1467        };
1468
1469        self.last_received_chunk_at = Some(Instant::now());
1470
1471        let task = cx.spawn(async move |thread, cx| {
1472            let stream_completion_future = model.stream_completion(request, &cx);
1473            let initial_token_usage =
1474                thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage);
1475            let stream_completion = async {
1476                let mut events = stream_completion_future.await?;
1477
1478                let mut stop_reason = StopReason::EndTurn;
1479                let mut current_token_usage = TokenUsage::default();
1480
1481                thread
1482                    .update(cx, |_thread, cx| {
1483                        cx.emit(ThreadEvent::NewRequest);
1484                    })
1485                    .ok();
1486
1487                let mut request_assistant_message_id = None;
1488
1489                while let Some(event) = events.next().await {
1490                    if let Some((_, response_events)) = request_callback_parameters.as_mut() {
1491                        response_events
1492                            .push(event.as_ref().map_err(|error| error.to_string()).cloned());
1493                    }
1494
1495                    thread.update(cx, |thread, cx| {
1496                        let event = match event {
1497                            Ok(event) => event,
1498                            Err(LanguageModelCompletionError::BadInputJson {
1499                                id,
1500                                tool_name,
1501                                raw_input: invalid_input_json,
1502                                json_parse_error,
1503                            }) => {
1504                                thread.receive_invalid_tool_json(
1505                                    id,
1506                                    tool_name,
1507                                    invalid_input_json,
1508                                    json_parse_error,
1509                                    window,
1510                                    cx,
1511                                );
1512                                return Ok(());
1513                            }
1514                            Err(LanguageModelCompletionError::Other(error)) => {
1515                                return Err(error);
1516                            }
1517                            Err(err @ LanguageModelCompletionError::RateLimit(..)) => {
1518                                return Err(err.into());
1519                            }
1520                        };
1521
1522                        match event {
1523                            LanguageModelCompletionEvent::StartMessage { .. } => {
1524                                request_assistant_message_id =
1525                                    Some(thread.insert_assistant_message(
1526                                        vec![MessageSegment::Text(String::new())],
1527                                        cx,
1528                                    ));
1529                            }
1530                            LanguageModelCompletionEvent::Stop(reason) => {
1531                                stop_reason = reason;
1532                            }
1533                            LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1534                                thread.update_token_usage_at_last_message(token_usage);
1535                                thread.cumulative_token_usage = thread.cumulative_token_usage
1536                                    + token_usage
1537                                    - current_token_usage;
1538                                current_token_usage = token_usage;
1539                            }
1540                            LanguageModelCompletionEvent::Text(chunk) => {
1541                                thread.received_chunk();
1542
1543                                cx.emit(ThreadEvent::ReceivedTextChunk);
1544                                if let Some(last_message) = thread.messages.last_mut() {
1545                                    if last_message.role == Role::Assistant
1546                                        && !thread.tool_use.has_tool_results(last_message.id)
1547                                    {
1548                                        last_message.push_text(&chunk);
1549                                        cx.emit(ThreadEvent::StreamedAssistantText(
1550                                            last_message.id,
1551                                            chunk,
1552                                        ));
1553                                    } else {
1554                                        // If we won't have an Assistant message yet, assume this chunk marks the beginning
1555                                        // of a new Assistant response.
1556                                        //
1557                                        // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1558                                        // will result in duplicating the text of the chunk in the rendered Markdown.
1559                                        request_assistant_message_id =
1560                                            Some(thread.insert_assistant_message(
1561                                                vec![MessageSegment::Text(chunk.to_string())],
1562                                                cx,
1563                                            ));
1564                                    };
1565                                }
1566                            }
1567                            LanguageModelCompletionEvent::Thinking {
1568                                text: chunk,
1569                                signature,
1570                            } => {
1571                                thread.received_chunk();
1572
1573                                if let Some(last_message) = thread.messages.last_mut() {
1574                                    if last_message.role == Role::Assistant
1575                                        && !thread.tool_use.has_tool_results(last_message.id)
1576                                    {
1577                                        last_message.push_thinking(&chunk, signature);
1578                                        cx.emit(ThreadEvent::StreamedAssistantThinking(
1579                                            last_message.id,
1580                                            chunk,
1581                                        ));
1582                                    } else {
1583                                        // If we won't have an Assistant message yet, assume this chunk marks the beginning
1584                                        // of a new Assistant response.
1585                                        //
1586                                        // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1587                                        // will result in duplicating the text of the chunk in the rendered Markdown.
1588                                        request_assistant_message_id =
1589                                            Some(thread.insert_assistant_message(
1590                                                vec![MessageSegment::Thinking {
1591                                                    text: chunk.to_string(),
1592                                                    signature,
1593                                                }],
1594                                                cx,
1595                                            ));
1596                                    };
1597                                }
1598                            }
1599                            LanguageModelCompletionEvent::ToolUse(tool_use) => {
1600                                let last_assistant_message_id = request_assistant_message_id
1601                                    .unwrap_or_else(|| {
1602                                        let new_assistant_message_id =
1603                                            thread.insert_assistant_message(vec![], cx);
1604                                        request_assistant_message_id =
1605                                            Some(new_assistant_message_id);
1606                                        new_assistant_message_id
1607                                    });
1608
1609                                let tool_use_id = tool_use.id.clone();
1610                                let streamed_input = if tool_use.is_input_complete {
1611                                    None
1612                                } else {
1613                                    Some((&tool_use.input).clone())
1614                                };
1615
1616                                let ui_text = thread.tool_use.request_tool_use(
1617                                    last_assistant_message_id,
1618                                    tool_use,
1619                                    tool_use_metadata.clone(),
1620                                    cx,
1621                                );
1622
1623                                if let Some(input) = streamed_input {
1624                                    cx.emit(ThreadEvent::StreamedToolUse {
1625                                        tool_use_id,
1626                                        ui_text,
1627                                        input,
1628                                    });
1629                                }
1630                            }
1631                            LanguageModelCompletionEvent::StatusUpdate(status_update) => {
1632                                if let Some(completion) = thread
1633                                    .pending_completions
1634                                    .iter_mut()
1635                                    .find(|completion| completion.id == pending_completion_id)
1636                                {
1637                                    match status_update {
1638                                        CompletionRequestStatus::Queued {
1639                                            position,
1640                                        } => {
1641                                            completion.queue_state = QueueState::Queued { position };
1642                                        }
1643                                        CompletionRequestStatus::Started => {
1644                                            completion.queue_state =  QueueState::Started;
1645                                        }
1646                                        CompletionRequestStatus::Failed {
1647                                            code, message, request_id
1648                                        } => {
1649                                            anyhow::bail!("completion request failed. request_id: {request_id}, code: {code}, message: {message}");
1650                                        }
1651                                        CompletionRequestStatus::UsageUpdated {
1652                                            amount, limit
1653                                        } => {
1654                                            thread.update_model_request_usage(amount as u32, limit, cx);
1655                                        }
1656                                        CompletionRequestStatus::ToolUseLimitReached => {
1657                                            thread.tool_use_limit_reached = true;
1658                                            cx.emit(ThreadEvent::ToolUseLimitReached);
1659                                        }
1660                                    }
1661                                }
1662                            }
1663                        }
1664
1665                        thread.touch_updated_at();
1666                        cx.emit(ThreadEvent::StreamedCompletion);
1667                        cx.notify();
1668
1669                        thread.auto_capture_telemetry(cx);
1670                        Ok(())
1671                    })??;
1672
1673                    smol::future::yield_now().await;
1674                }
1675
1676                thread.update(cx, |thread, cx| {
1677                    thread.last_received_chunk_at = None;
1678                    thread
1679                        .pending_completions
1680                        .retain(|completion| completion.id != pending_completion_id);
1681
1682                    // If there is a response without tool use, summarize the message. Otherwise,
1683                    // allow two tool uses before summarizing.
1684                    if matches!(thread.summary, ThreadSummary::Pending)
1685                        && thread.messages.len() >= 2
1686                        && (!thread.has_pending_tool_uses() || thread.messages.len() >= 6)
1687                    {
1688                        thread.summarize(cx);
1689                    }
1690                })?;
1691
1692                anyhow::Ok(stop_reason)
1693            };
1694
1695            let result = stream_completion.await;
1696
1697            thread
1698                .update(cx, |thread, cx| {
1699                    thread.finalize_pending_checkpoint(cx);
1700                    match result.as_ref() {
1701                        Ok(stop_reason) => match stop_reason {
1702                            StopReason::ToolUse => {
1703                                let tool_uses = thread.use_pending_tools(window, cx, model.clone());
1704                                cx.emit(ThreadEvent::UsePendingTools { tool_uses });
1705                            }
1706                            StopReason::EndTurn | StopReason::MaxTokens  => {
1707                                thread.project.update(cx, |project, cx| {
1708                                    project.set_agent_location(None, cx);
1709                                });
1710                            }
1711                            StopReason::Refusal => {
1712                                thread.project.update(cx, |project, cx| {
1713                                    project.set_agent_location(None, cx);
1714                                });
1715
1716                                // Remove the turn that was refused.
1717                                //
1718                                // https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/handle-streaming-refusals#reset-context-after-refusal
1719                                {
1720                                    let mut messages_to_remove = Vec::new();
1721
1722                                    for (ix, message) in thread.messages.iter().enumerate().rev() {
1723                                        messages_to_remove.push(message.id);
1724
1725                                        if message.role == Role::User {
1726                                            if ix == 0 {
1727                                                break;
1728                                            }
1729
1730                                            if let Some(prev_message) = thread.messages.get(ix - 1) {
1731                                                if prev_message.role == Role::Assistant {
1732                                                    break;
1733                                                }
1734                                            }
1735                                        }
1736                                    }
1737
1738                                    for message_id in messages_to_remove {
1739                                        thread.delete_message(message_id, cx);
1740                                    }
1741                                }
1742
1743                                cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1744                                    header: "Language model refusal".into(),
1745                                    message: "Model refused to generate content for safety reasons.".into(),
1746                                }));
1747                            }
1748                        },
1749                        Err(error) => {
1750                            thread.project.update(cx, |project, cx| {
1751                                project.set_agent_location(None, cx);
1752                            });
1753
1754                            if error.is::<PaymentRequiredError>() {
1755                                cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1756                            } else if let Some(error) =
1757                                error.downcast_ref::<ModelRequestLimitReachedError>()
1758                            {
1759                                cx.emit(ThreadEvent::ShowError(
1760                                    ThreadError::ModelRequestLimitReached { plan: error.plan },
1761                                ));
1762                            } else if let Some(known_error) =
1763                                error.downcast_ref::<LanguageModelKnownError>()
1764                            {
1765                                match known_error {
1766                                    LanguageModelKnownError::ContextWindowLimitExceeded {
1767                                        tokens,
1768                                    } => {
1769                                        thread.exceeded_window_error = Some(ExceededWindowError {
1770                                            model_id: model.id(),
1771                                            token_count: *tokens,
1772                                        });
1773                                        cx.notify();
1774                                    }
1775                                }
1776                            } else {
1777                                let error_message = error
1778                                    .chain()
1779                                    .map(|err| err.to_string())
1780                                    .collect::<Vec<_>>()
1781                                    .join("\n");
1782                                cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1783                                    header: "Error interacting with language model".into(),
1784                                    message: SharedString::from(error_message.clone()),
1785                                }));
1786                            }
1787
1788                            thread.cancel_last_completion(window, cx);
1789                        }
1790                    }
1791
1792                    cx.emit(ThreadEvent::Stopped(result.map_err(Arc::new)));
1793
1794                    if let Some((request_callback, (request, response_events))) = thread
1795                        .request_callback
1796                        .as_mut()
1797                        .zip(request_callback_parameters.as_ref())
1798                    {
1799                        request_callback(request, response_events);
1800                    }
1801
1802                    thread.auto_capture_telemetry(cx);
1803
1804                    if let Ok(initial_usage) = initial_token_usage {
1805                        let usage = thread.cumulative_token_usage - initial_usage;
1806
1807                        telemetry::event!(
1808                            "Assistant Thread Completion",
1809                            thread_id = thread.id().to_string(),
1810                            prompt_id = prompt_id,
1811                            model = model.telemetry_id(),
1812                            model_provider = model.provider_id().to_string(),
1813                            input_tokens = usage.input_tokens,
1814                            output_tokens = usage.output_tokens,
1815                            cache_creation_input_tokens = usage.cache_creation_input_tokens,
1816                            cache_read_input_tokens = usage.cache_read_input_tokens,
1817                        );
1818                    }
1819                })
1820                .ok();
1821        });
1822
1823        self.pending_completions.push(PendingCompletion {
1824            id: pending_completion_id,
1825            queue_state: QueueState::Sending,
1826            _task: task,
1827        });
1828    }
1829
1830    pub fn summarize(&mut self, cx: &mut Context<Self>) {
1831        let Some(model) = LanguageModelRegistry::read_global(cx).thread_summary_model() else {
1832            println!("No thread summary model");
1833            return;
1834        };
1835
1836        if !model.provider.is_authenticated(cx) {
1837            return;
1838        }
1839
1840        let added_user_message = include_str!("./prompts/summarize_thread_prompt.txt");
1841
1842        let request = self.to_summarize_request(
1843            &model.model,
1844            CompletionIntent::ThreadSummarization,
1845            added_user_message.into(),
1846            cx,
1847        );
1848
1849        self.summary = ThreadSummary::Generating;
1850
1851        self.pending_summary = cx.spawn(async move |this, cx| {
1852            let result = async {
1853                let mut messages = model.model.stream_completion(request, &cx).await?;
1854
1855                let mut new_summary = String::new();
1856                while let Some(event) = messages.next().await {
1857                    let Ok(event) = event else {
1858                        continue;
1859                    };
1860                    let text = match event {
1861                        LanguageModelCompletionEvent::Text(text) => text,
1862                        LanguageModelCompletionEvent::StatusUpdate(
1863                            CompletionRequestStatus::UsageUpdated { amount, limit },
1864                        ) => {
1865                            this.update(cx, |thread, cx| {
1866                                thread.update_model_request_usage(amount as u32, limit, cx);
1867                            })?;
1868                            continue;
1869                        }
1870                        _ => continue,
1871                    };
1872
1873                    let mut lines = text.lines();
1874                    new_summary.extend(lines.next());
1875
1876                    // Stop if the LLM generated multiple lines.
1877                    if lines.next().is_some() {
1878                        break;
1879                    }
1880                }
1881
1882                anyhow::Ok(new_summary)
1883            }
1884            .await;
1885
1886            this.update(cx, |this, cx| {
1887                match result {
1888                    Ok(new_summary) => {
1889                        if new_summary.is_empty() {
1890                            this.summary = ThreadSummary::Error;
1891                        } else {
1892                            this.summary = ThreadSummary::Ready(new_summary.into());
1893                        }
1894                    }
1895                    Err(err) => {
1896                        this.summary = ThreadSummary::Error;
1897                        log::error!("Failed to generate thread summary: {}", err);
1898                    }
1899                }
1900                cx.emit(ThreadEvent::SummaryGenerated);
1901            })
1902            .log_err()?;
1903
1904            Some(())
1905        });
1906    }
1907
1908    pub fn start_generating_detailed_summary_if_needed(
1909        &mut self,
1910        thread_store: WeakEntity<ThreadStore>,
1911        cx: &mut Context<Self>,
1912    ) {
1913        let Some(last_message_id) = self.messages.last().map(|message| message.id) else {
1914            return;
1915        };
1916
1917        match &*self.detailed_summary_rx.borrow() {
1918            DetailedSummaryState::Generating { message_id, .. }
1919            | DetailedSummaryState::Generated { message_id, .. }
1920                if *message_id == last_message_id =>
1921            {
1922                // Already up-to-date
1923                return;
1924            }
1925            _ => {}
1926        }
1927
1928        let Some(ConfiguredModel { model, provider }) =
1929            LanguageModelRegistry::read_global(cx).thread_summary_model()
1930        else {
1931            return;
1932        };
1933
1934        if !provider.is_authenticated(cx) {
1935            return;
1936        }
1937
1938        let added_user_message = include_str!("./prompts/summarize_thread_detailed_prompt.txt");
1939
1940        let request = self.to_summarize_request(
1941            &model,
1942            CompletionIntent::ThreadContextSummarization,
1943            added_user_message.into(),
1944            cx,
1945        );
1946
1947        *self.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generating {
1948            message_id: last_message_id,
1949        };
1950
1951        // Replace the detailed summarization task if there is one, cancelling it. It would probably
1952        // be better to allow the old task to complete, but this would require logic for choosing
1953        // which result to prefer (the old task could complete after the new one, resulting in a
1954        // stale summary).
1955        self.detailed_summary_task = cx.spawn(async move |thread, cx| {
1956            let stream = model.stream_completion_text(request, &cx);
1957            let Some(mut messages) = stream.await.log_err() else {
1958                thread
1959                    .update(cx, |thread, _cx| {
1960                        *thread.detailed_summary_tx.borrow_mut() =
1961                            DetailedSummaryState::NotGenerated;
1962                    })
1963                    .ok()?;
1964                return None;
1965            };
1966
1967            let mut new_detailed_summary = String::new();
1968
1969            while let Some(chunk) = messages.stream.next().await {
1970                if let Some(chunk) = chunk.log_err() {
1971                    new_detailed_summary.push_str(&chunk);
1972                }
1973            }
1974
1975            thread
1976                .update(cx, |thread, _cx| {
1977                    *thread.detailed_summary_tx.borrow_mut() = DetailedSummaryState::Generated {
1978                        text: new_detailed_summary.into(),
1979                        message_id: last_message_id,
1980                    };
1981                })
1982                .ok()?;
1983
1984            // Save thread so its summary can be reused later
1985            if let Some(thread) = thread.upgrade() {
1986                if let Ok(Ok(save_task)) = cx.update(|cx| {
1987                    thread_store
1988                        .update(cx, |thread_store, cx| thread_store.save_thread(&thread, cx))
1989                }) {
1990                    save_task.await.log_err();
1991                }
1992            }
1993
1994            Some(())
1995        });
1996    }
1997
1998    pub async fn wait_for_detailed_summary_or_text(
1999        this: &Entity<Self>,
2000        cx: &mut AsyncApp,
2001    ) -> Option<SharedString> {
2002        let mut detailed_summary_rx = this
2003            .read_with(cx, |this, _cx| this.detailed_summary_rx.clone())
2004            .ok()?;
2005        loop {
2006            match detailed_summary_rx.recv().await? {
2007                DetailedSummaryState::Generating { .. } => {}
2008                DetailedSummaryState::NotGenerated => {
2009                    return this.read_with(cx, |this, _cx| this.text().into()).ok();
2010                }
2011                DetailedSummaryState::Generated { text, .. } => return Some(text),
2012            }
2013        }
2014    }
2015
2016    pub fn latest_detailed_summary_or_text(&self) -> SharedString {
2017        self.detailed_summary_rx
2018            .borrow()
2019            .text()
2020            .unwrap_or_else(|| self.text().into())
2021    }
2022
2023    pub fn is_generating_detailed_summary(&self) -> bool {
2024        matches!(
2025            &*self.detailed_summary_rx.borrow(),
2026            DetailedSummaryState::Generating { .. }
2027        )
2028    }
2029
2030    pub fn use_pending_tools(
2031        &mut self,
2032        window: Option<AnyWindowHandle>,
2033        cx: &mut Context<Self>,
2034        model: Arc<dyn LanguageModel>,
2035    ) -> Vec<PendingToolUse> {
2036        self.auto_capture_telemetry(cx);
2037        let request =
2038            Arc::new(self.to_completion_request(model.clone(), CompletionIntent::ToolResults, cx));
2039        let pending_tool_uses = self
2040            .tool_use
2041            .pending_tool_uses()
2042            .into_iter()
2043            .filter(|tool_use| tool_use.status.is_idle())
2044            .cloned()
2045            .collect::<Vec<_>>();
2046
2047        for tool_use in pending_tool_uses.iter() {
2048            if let Some(tool) = self.tools.read(cx).tool(&tool_use.name, cx) {
2049                if tool.needs_confirmation(&tool_use.input, cx)
2050                    && !AgentSettings::get_global(cx).always_allow_tool_actions
2051                {
2052                    self.tool_use.confirm_tool_use(
2053                        tool_use.id.clone(),
2054                        tool_use.ui_text.clone(),
2055                        tool_use.input.clone(),
2056                        request.clone(),
2057                        tool,
2058                    );
2059                    cx.emit(ThreadEvent::ToolConfirmationNeeded);
2060                } else {
2061                    self.run_tool(
2062                        tool_use.id.clone(),
2063                        tool_use.ui_text.clone(),
2064                        tool_use.input.clone(),
2065                        request.clone(),
2066                        tool,
2067                        model.clone(),
2068                        window,
2069                        cx,
2070                    );
2071                }
2072            } else {
2073                self.handle_hallucinated_tool_use(
2074                    tool_use.id.clone(),
2075                    tool_use.name.clone(),
2076                    window,
2077                    cx,
2078                );
2079            }
2080        }
2081
2082        pending_tool_uses
2083    }
2084
2085    pub fn handle_hallucinated_tool_use(
2086        &mut self,
2087        tool_use_id: LanguageModelToolUseId,
2088        hallucinated_tool_name: Arc<str>,
2089        window: Option<AnyWindowHandle>,
2090        cx: &mut Context<Thread>,
2091    ) {
2092        let available_tools = self.profile.enabled_tools(cx);
2093
2094        let tool_list = available_tools
2095            .iter()
2096            .map(|tool| format!("- {}: {}", tool.name(), tool.description()))
2097            .collect::<Vec<_>>()
2098            .join("\n");
2099
2100        let error_message = format!(
2101            "The tool '{}' doesn't exist or is not enabled. Available tools:\n{}",
2102            hallucinated_tool_name, tool_list
2103        );
2104
2105        let pending_tool_use = self.tool_use.insert_tool_output(
2106            tool_use_id.clone(),
2107            hallucinated_tool_name,
2108            Err(anyhow!("Missing tool call: {error_message}")),
2109            self.configured_model.as_ref(),
2110        );
2111
2112        cx.emit(ThreadEvent::MissingToolUse {
2113            tool_use_id: tool_use_id.clone(),
2114            ui_text: error_message.into(),
2115        });
2116
2117        self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2118    }
2119
2120    pub fn receive_invalid_tool_json(
2121        &mut self,
2122        tool_use_id: LanguageModelToolUseId,
2123        tool_name: Arc<str>,
2124        invalid_json: Arc<str>,
2125        error: String,
2126        window: Option<AnyWindowHandle>,
2127        cx: &mut Context<Thread>,
2128    ) {
2129        log::error!("The model returned invalid input JSON: {invalid_json}");
2130
2131        let pending_tool_use = self.tool_use.insert_tool_output(
2132            tool_use_id.clone(),
2133            tool_name,
2134            Err(anyhow!("Error parsing input JSON: {error}")),
2135            self.configured_model.as_ref(),
2136        );
2137        let ui_text = if let Some(pending_tool_use) = &pending_tool_use {
2138            pending_tool_use.ui_text.clone()
2139        } else {
2140            log::error!(
2141                "There was no pending tool use for tool use {tool_use_id}, even though it finished (with invalid input JSON)."
2142            );
2143            format!("Unknown tool {}", tool_use_id).into()
2144        };
2145
2146        cx.emit(ThreadEvent::InvalidToolInput {
2147            tool_use_id: tool_use_id.clone(),
2148            ui_text,
2149            invalid_input_json: invalid_json,
2150        });
2151
2152        self.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2153    }
2154
2155    pub fn run_tool(
2156        &mut self,
2157        tool_use_id: LanguageModelToolUseId,
2158        ui_text: impl Into<SharedString>,
2159        input: serde_json::Value,
2160        request: Arc<LanguageModelRequest>,
2161        tool: Arc<dyn Tool>,
2162        model: Arc<dyn LanguageModel>,
2163        window: Option<AnyWindowHandle>,
2164        cx: &mut Context<Thread>,
2165    ) {
2166        let task =
2167            self.spawn_tool_use(tool_use_id.clone(), request, input, tool, model, window, cx);
2168        self.tool_use
2169            .run_pending_tool(tool_use_id, ui_text.into(), task);
2170    }
2171
2172    fn spawn_tool_use(
2173        &mut self,
2174        tool_use_id: LanguageModelToolUseId,
2175        request: Arc<LanguageModelRequest>,
2176        input: serde_json::Value,
2177        tool: Arc<dyn Tool>,
2178        model: Arc<dyn LanguageModel>,
2179        window: Option<AnyWindowHandle>,
2180        cx: &mut Context<Thread>,
2181    ) -> Task<()> {
2182        let tool_name: Arc<str> = tool.name().into();
2183
2184        let tool_result = tool.run(
2185            input,
2186            request,
2187            self.project.clone(),
2188            self.action_log.clone(),
2189            model,
2190            window,
2191            cx,
2192        );
2193
2194        // Store the card separately if it exists
2195        if let Some(card) = tool_result.card.clone() {
2196            self.tool_use
2197                .insert_tool_result_card(tool_use_id.clone(), card);
2198        }
2199
2200        cx.spawn({
2201            async move |thread: WeakEntity<Thread>, cx| {
2202                let output = tool_result.output.await;
2203
2204                thread
2205                    .update(cx, |thread, cx| {
2206                        let pending_tool_use = thread.tool_use.insert_tool_output(
2207                            tool_use_id.clone(),
2208                            tool_name,
2209                            output,
2210                            thread.configured_model.as_ref(),
2211                        );
2212                        thread.tool_finished(tool_use_id, pending_tool_use, false, window, cx);
2213                    })
2214                    .ok();
2215            }
2216        })
2217    }
2218
2219    fn tool_finished(
2220        &mut self,
2221        tool_use_id: LanguageModelToolUseId,
2222        pending_tool_use: Option<PendingToolUse>,
2223        canceled: bool,
2224        window: Option<AnyWindowHandle>,
2225        cx: &mut Context<Self>,
2226    ) {
2227        if self.all_tools_finished() {
2228            if let Some(ConfiguredModel { model, .. }) = self.configured_model.as_ref() {
2229                if !canceled {
2230                    self.send_to_model(model.clone(), CompletionIntent::ToolResults, window, cx);
2231                }
2232                self.auto_capture_telemetry(cx);
2233            }
2234        }
2235
2236        cx.emit(ThreadEvent::ToolFinished {
2237            tool_use_id,
2238            pending_tool_use,
2239        });
2240    }
2241
2242    /// Cancels the last pending completion, if there are any pending.
2243    ///
2244    /// Returns whether a completion was canceled.
2245    pub fn cancel_last_completion(
2246        &mut self,
2247        window: Option<AnyWindowHandle>,
2248        cx: &mut Context<Self>,
2249    ) -> bool {
2250        let mut canceled = self.pending_completions.pop().is_some();
2251
2252        for pending_tool_use in self.tool_use.cancel_pending() {
2253            canceled = true;
2254            self.tool_finished(
2255                pending_tool_use.id.clone(),
2256                Some(pending_tool_use),
2257                true,
2258                window,
2259                cx,
2260            );
2261        }
2262
2263        if canceled {
2264            cx.emit(ThreadEvent::CompletionCanceled);
2265
2266            // When canceled, we always want to insert the checkpoint.
2267            // (We skip over finalize_pending_checkpoint, because it
2268            // would conclude we didn't have anything to insert here.)
2269            if let Some(checkpoint) = self.pending_checkpoint.take() {
2270                self.insert_checkpoint(checkpoint, cx);
2271            }
2272        } else {
2273            self.finalize_pending_checkpoint(cx);
2274        }
2275
2276        canceled
2277    }
2278
2279    /// Signals that any in-progress editing should be canceled.
2280    ///
2281    /// This method is used to notify listeners (like ActiveThread) that
2282    /// they should cancel any editing operations.
2283    pub fn cancel_editing(&mut self, cx: &mut Context<Self>) {
2284        cx.emit(ThreadEvent::CancelEditing);
2285    }
2286
2287    pub fn feedback(&self) -> Option<ThreadFeedback> {
2288        self.feedback
2289    }
2290
2291    pub fn message_feedback(&self, message_id: MessageId) -> Option<ThreadFeedback> {
2292        self.message_feedback.get(&message_id).copied()
2293    }
2294
2295    pub fn report_message_feedback(
2296        &mut self,
2297        message_id: MessageId,
2298        feedback: ThreadFeedback,
2299        cx: &mut Context<Self>,
2300    ) -> Task<Result<()>> {
2301        if self.message_feedback.get(&message_id) == Some(&feedback) {
2302            return Task::ready(Ok(()));
2303        }
2304
2305        let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2306        let serialized_thread = self.serialize(cx);
2307        let thread_id = self.id().clone();
2308        let client = self.project.read(cx).client();
2309
2310        let enabled_tool_names: Vec<String> = self
2311            .profile
2312            .enabled_tools(cx)
2313            .iter()
2314            .map(|tool| tool.name())
2315            .collect();
2316
2317        self.message_feedback.insert(message_id, feedback);
2318
2319        cx.notify();
2320
2321        let message_content = self
2322            .message(message_id)
2323            .map(|msg| msg.to_string())
2324            .unwrap_or_default();
2325
2326        cx.background_spawn(async move {
2327            let final_project_snapshot = final_project_snapshot.await;
2328            let serialized_thread = serialized_thread.await?;
2329            let thread_data =
2330                serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
2331
2332            let rating = match feedback {
2333                ThreadFeedback::Positive => "positive",
2334                ThreadFeedback::Negative => "negative",
2335            };
2336            telemetry::event!(
2337                "Assistant Thread Rated",
2338                rating,
2339                thread_id,
2340                enabled_tool_names,
2341                message_id = message_id.0,
2342                message_content,
2343                thread_data,
2344                final_project_snapshot
2345            );
2346            client.telemetry().flush_events().await;
2347
2348            Ok(())
2349        })
2350    }
2351
2352    pub fn report_feedback(
2353        &mut self,
2354        feedback: ThreadFeedback,
2355        cx: &mut Context<Self>,
2356    ) -> Task<Result<()>> {
2357        let last_assistant_message_id = self
2358            .messages
2359            .iter()
2360            .rev()
2361            .find(|msg| msg.role == Role::Assistant)
2362            .map(|msg| msg.id);
2363
2364        if let Some(message_id) = last_assistant_message_id {
2365            self.report_message_feedback(message_id, feedback, cx)
2366        } else {
2367            let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
2368            let serialized_thread = self.serialize(cx);
2369            let thread_id = self.id().clone();
2370            let client = self.project.read(cx).client();
2371            self.feedback = Some(feedback);
2372            cx.notify();
2373
2374            cx.background_spawn(async move {
2375                let final_project_snapshot = final_project_snapshot.await;
2376                let serialized_thread = serialized_thread.await?;
2377                let thread_data = serde_json::to_value(serialized_thread)
2378                    .unwrap_or_else(|_| serde_json::Value::Null);
2379
2380                let rating = match feedback {
2381                    ThreadFeedback::Positive => "positive",
2382                    ThreadFeedback::Negative => "negative",
2383                };
2384                telemetry::event!(
2385                    "Assistant Thread Rated",
2386                    rating,
2387                    thread_id,
2388                    thread_data,
2389                    final_project_snapshot
2390                );
2391                client.telemetry().flush_events().await;
2392
2393                Ok(())
2394            })
2395        }
2396    }
2397
2398    /// Create a snapshot of the current project state including git information and unsaved buffers.
2399    fn project_snapshot(
2400        project: Entity<Project>,
2401        cx: &mut Context<Self>,
2402    ) -> Task<Arc<ProjectSnapshot>> {
2403        let git_store = project.read(cx).git_store().clone();
2404        let worktree_snapshots: Vec<_> = project
2405            .read(cx)
2406            .visible_worktrees(cx)
2407            .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
2408            .collect();
2409
2410        cx.spawn(async move |_, cx| {
2411            let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
2412
2413            let mut unsaved_buffers = Vec::new();
2414            cx.update(|app_cx| {
2415                let buffer_store = project.read(app_cx).buffer_store();
2416                for buffer_handle in buffer_store.read(app_cx).buffers() {
2417                    let buffer = buffer_handle.read(app_cx);
2418                    if buffer.is_dirty() {
2419                        if let Some(file) = buffer.file() {
2420                            let path = file.path().to_string_lossy().to_string();
2421                            unsaved_buffers.push(path);
2422                        }
2423                    }
2424                }
2425            })
2426            .ok();
2427
2428            Arc::new(ProjectSnapshot {
2429                worktree_snapshots,
2430                unsaved_buffer_paths: unsaved_buffers,
2431                timestamp: Utc::now(),
2432            })
2433        })
2434    }
2435
2436    fn worktree_snapshot(
2437        worktree: Entity<project::Worktree>,
2438        git_store: Entity<GitStore>,
2439        cx: &App,
2440    ) -> Task<WorktreeSnapshot> {
2441        cx.spawn(async move |cx| {
2442            // Get worktree path and snapshot
2443            let worktree_info = cx.update(|app_cx| {
2444                let worktree = worktree.read(app_cx);
2445                let path = worktree.abs_path().to_string_lossy().to_string();
2446                let snapshot = worktree.snapshot();
2447                (path, snapshot)
2448            });
2449
2450            let Ok((worktree_path, _snapshot)) = worktree_info else {
2451                return WorktreeSnapshot {
2452                    worktree_path: String::new(),
2453                    git_state: None,
2454                };
2455            };
2456
2457            let git_state = git_store
2458                .update(cx, |git_store, cx| {
2459                    git_store
2460                        .repositories()
2461                        .values()
2462                        .find(|repo| {
2463                            repo.read(cx)
2464                                .abs_path_to_repo_path(&worktree.read(cx).abs_path())
2465                                .is_some()
2466                        })
2467                        .cloned()
2468                })
2469                .ok()
2470                .flatten()
2471                .map(|repo| {
2472                    repo.update(cx, |repo, _| {
2473                        let current_branch =
2474                            repo.branch.as_ref().map(|branch| branch.name().to_owned());
2475                        repo.send_job(None, |state, _| async move {
2476                            let RepositoryState::Local { backend, .. } = state else {
2477                                return GitState {
2478                                    remote_url: None,
2479                                    head_sha: None,
2480                                    current_branch,
2481                                    diff: None,
2482                                };
2483                            };
2484
2485                            let remote_url = backend.remote_url("origin");
2486                            let head_sha = backend.head_sha().await;
2487                            let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
2488
2489                            GitState {
2490                                remote_url,
2491                                head_sha,
2492                                current_branch,
2493                                diff,
2494                            }
2495                        })
2496                    })
2497                });
2498
2499            let git_state = match git_state {
2500                Some(git_state) => match git_state.ok() {
2501                    Some(git_state) => git_state.await.ok(),
2502                    None => None,
2503                },
2504                None => None,
2505            };
2506
2507            WorktreeSnapshot {
2508                worktree_path,
2509                git_state,
2510            }
2511        })
2512    }
2513
2514    pub fn to_markdown(&self, cx: &App) -> Result<String> {
2515        let mut markdown = Vec::new();
2516
2517        let summary = self.summary().or_default();
2518        writeln!(markdown, "# {summary}\n")?;
2519
2520        for message in self.messages() {
2521            writeln!(
2522                markdown,
2523                "## {role}\n",
2524                role = match message.role {
2525                    Role::User => "User",
2526                    Role::Assistant => "Agent",
2527                    Role::System => "System",
2528                }
2529            )?;
2530
2531            if !message.loaded_context.text.is_empty() {
2532                writeln!(markdown, "{}", message.loaded_context.text)?;
2533            }
2534
2535            if !message.loaded_context.images.is_empty() {
2536                writeln!(
2537                    markdown,
2538                    "\n{} images attached as context.\n",
2539                    message.loaded_context.images.len()
2540                )?;
2541            }
2542
2543            for segment in &message.segments {
2544                match segment {
2545                    MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
2546                    MessageSegment::Thinking { text, .. } => {
2547                        writeln!(markdown, "<think>\n{}\n</think>\n", text)?
2548                    }
2549                    MessageSegment::RedactedThinking(_) => {}
2550                }
2551            }
2552
2553            for tool_use in self.tool_uses_for_message(message.id, cx) {
2554                writeln!(
2555                    markdown,
2556                    "**Use Tool: {} ({})**",
2557                    tool_use.name, tool_use.id
2558                )?;
2559                writeln!(markdown, "```json")?;
2560                writeln!(
2561                    markdown,
2562                    "{}",
2563                    serde_json::to_string_pretty(&tool_use.input)?
2564                )?;
2565                writeln!(markdown, "```")?;
2566            }
2567
2568            for tool_result in self.tool_results_for_message(message.id) {
2569                write!(markdown, "\n**Tool Results: {}", tool_result.tool_use_id)?;
2570                if tool_result.is_error {
2571                    write!(markdown, " (Error)")?;
2572                }
2573
2574                writeln!(markdown, "**\n")?;
2575                match &tool_result.content {
2576                    LanguageModelToolResultContent::Text(text) => {
2577                        writeln!(markdown, "{text}")?;
2578                    }
2579                    LanguageModelToolResultContent::Image(image) => {
2580                        writeln!(markdown, "![Image](data:base64,{})", image.source)?;
2581                    }
2582                }
2583
2584                if let Some(output) = tool_result.output.as_ref() {
2585                    writeln!(
2586                        markdown,
2587                        "\n\nDebug Output:\n\n```json\n{}\n```\n",
2588                        serde_json::to_string_pretty(output)?
2589                    )?;
2590                }
2591            }
2592        }
2593
2594        Ok(String::from_utf8_lossy(&markdown).to_string())
2595    }
2596
2597    pub fn keep_edits_in_range(
2598        &mut self,
2599        buffer: Entity<language::Buffer>,
2600        buffer_range: Range<language::Anchor>,
2601        cx: &mut Context<Self>,
2602    ) {
2603        self.action_log.update(cx, |action_log, cx| {
2604            action_log.keep_edits_in_range(buffer, buffer_range, cx)
2605        });
2606    }
2607
2608    pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
2609        self.action_log
2610            .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
2611    }
2612
2613    pub fn reject_edits_in_ranges(
2614        &mut self,
2615        buffer: Entity<language::Buffer>,
2616        buffer_ranges: Vec<Range<language::Anchor>>,
2617        cx: &mut Context<Self>,
2618    ) -> Task<Result<()>> {
2619        self.action_log.update(cx, |action_log, cx| {
2620            action_log.reject_edits_in_ranges(buffer, buffer_ranges, cx)
2621        })
2622    }
2623
2624    pub fn action_log(&self) -> &Entity<ActionLog> {
2625        &self.action_log
2626    }
2627
2628    pub fn project(&self) -> &Entity<Project> {
2629        &self.project
2630    }
2631
2632    pub fn auto_capture_telemetry(&mut self, cx: &mut Context<Self>) {
2633        if !cx.has_flag::<feature_flags::ThreadAutoCaptureFeatureFlag>() {
2634            return;
2635        }
2636
2637        let now = Instant::now();
2638        if let Some(last) = self.last_auto_capture_at {
2639            if now.duration_since(last).as_secs() < 10 {
2640                return;
2641            }
2642        }
2643
2644        self.last_auto_capture_at = Some(now);
2645
2646        let thread_id = self.id().clone();
2647        let github_login = self
2648            .project
2649            .read(cx)
2650            .user_store()
2651            .read(cx)
2652            .current_user()
2653            .map(|user| user.github_login.clone());
2654        let client = self.project.read(cx).client();
2655        let serialize_task = self.serialize(cx);
2656
2657        cx.background_executor()
2658            .spawn(async move {
2659                if let Ok(serialized_thread) = serialize_task.await {
2660                    if let Ok(thread_data) = serde_json::to_value(serialized_thread) {
2661                        telemetry::event!(
2662                            "Agent Thread Auto-Captured",
2663                            thread_id = thread_id.to_string(),
2664                            thread_data = thread_data,
2665                            auto_capture_reason = "tracked_user",
2666                            github_login = github_login
2667                        );
2668
2669                        client.telemetry().flush_events().await;
2670                    }
2671                }
2672            })
2673            .detach();
2674    }
2675
2676    pub fn cumulative_token_usage(&self) -> TokenUsage {
2677        self.cumulative_token_usage
2678    }
2679
2680    pub fn token_usage_up_to_message(&self, message_id: MessageId) -> TotalTokenUsage {
2681        let Some(model) = self.configured_model.as_ref() else {
2682            return TotalTokenUsage::default();
2683        };
2684
2685        let max = model.model.max_token_count();
2686
2687        let index = self
2688            .messages
2689            .iter()
2690            .position(|msg| msg.id == message_id)
2691            .unwrap_or(0);
2692
2693        if index == 0 {
2694            return TotalTokenUsage { total: 0, max };
2695        }
2696
2697        let token_usage = &self
2698            .request_token_usage
2699            .get(index - 1)
2700            .cloned()
2701            .unwrap_or_default();
2702
2703        TotalTokenUsage {
2704            total: token_usage.total_tokens(),
2705            max,
2706        }
2707    }
2708
2709    pub fn total_token_usage(&self) -> Option<TotalTokenUsage> {
2710        let model = self.configured_model.as_ref()?;
2711
2712        let max = model.model.max_token_count();
2713
2714        if let Some(exceeded_error) = &self.exceeded_window_error {
2715            if model.model.id() == exceeded_error.model_id {
2716                return Some(TotalTokenUsage {
2717                    total: exceeded_error.token_count,
2718                    max,
2719                });
2720            }
2721        }
2722
2723        let total = self
2724            .token_usage_at_last_message()
2725            .unwrap_or_default()
2726            .total_tokens();
2727
2728        Some(TotalTokenUsage { total, max })
2729    }
2730
2731    fn token_usage_at_last_message(&self) -> Option<TokenUsage> {
2732        self.request_token_usage
2733            .get(self.messages.len().saturating_sub(1))
2734            .or_else(|| self.request_token_usage.last())
2735            .cloned()
2736    }
2737
2738    fn update_token_usage_at_last_message(&mut self, token_usage: TokenUsage) {
2739        let placeholder = self.token_usage_at_last_message().unwrap_or_default();
2740        self.request_token_usage
2741            .resize(self.messages.len(), placeholder);
2742
2743        if let Some(last) = self.request_token_usage.last_mut() {
2744            *last = token_usage;
2745        }
2746    }
2747
2748    fn update_model_request_usage(&self, amount: u32, limit: UsageLimit, cx: &mut Context<Self>) {
2749        self.project.update(cx, |project, cx| {
2750            project.user_store().update(cx, |user_store, cx| {
2751                user_store.update_model_request_usage(
2752                    ModelRequestUsage(RequestUsage {
2753                        amount: amount as i32,
2754                        limit,
2755                    }),
2756                    cx,
2757                )
2758            })
2759        });
2760    }
2761
2762    pub fn deny_tool_use(
2763        &mut self,
2764        tool_use_id: LanguageModelToolUseId,
2765        tool_name: Arc<str>,
2766        window: Option<AnyWindowHandle>,
2767        cx: &mut Context<Self>,
2768    ) {
2769        let err = Err(anyhow::anyhow!(
2770            "Permission to run tool action denied by user"
2771        ));
2772
2773        self.tool_use.insert_tool_output(
2774            tool_use_id.clone(),
2775            tool_name,
2776            err,
2777            self.configured_model.as_ref(),
2778        );
2779        self.tool_finished(tool_use_id.clone(), None, true, window, cx);
2780    }
2781}
2782
2783#[derive(Debug, Clone, Error)]
2784pub enum ThreadError {
2785    #[error("Payment required")]
2786    PaymentRequired,
2787    #[error("Model request limit reached")]
2788    ModelRequestLimitReached { plan: Plan },
2789    #[error("Message {header}: {message}")]
2790    Message {
2791        header: SharedString,
2792        message: SharedString,
2793    },
2794}
2795
2796#[derive(Debug, Clone)]
2797pub enum ThreadEvent {
2798    ShowError(ThreadError),
2799    StreamedCompletion,
2800    ReceivedTextChunk,
2801    NewRequest,
2802    StreamedAssistantText(MessageId, String),
2803    StreamedAssistantThinking(MessageId, String),
2804    StreamedToolUse {
2805        tool_use_id: LanguageModelToolUseId,
2806        ui_text: Arc<str>,
2807        input: serde_json::Value,
2808    },
2809    MissingToolUse {
2810        tool_use_id: LanguageModelToolUseId,
2811        ui_text: Arc<str>,
2812    },
2813    InvalidToolInput {
2814        tool_use_id: LanguageModelToolUseId,
2815        ui_text: Arc<str>,
2816        invalid_input_json: Arc<str>,
2817    },
2818    Stopped(Result<StopReason, Arc<anyhow::Error>>),
2819    MessageAdded(MessageId),
2820    MessageEdited(MessageId),
2821    MessageDeleted(MessageId),
2822    SummaryGenerated,
2823    SummaryChanged,
2824    UsePendingTools {
2825        tool_uses: Vec<PendingToolUse>,
2826    },
2827    ToolFinished {
2828        #[allow(unused)]
2829        tool_use_id: LanguageModelToolUseId,
2830        /// The pending tool use that corresponds to this tool.
2831        pending_tool_use: Option<PendingToolUse>,
2832    },
2833    CheckpointChanged,
2834    ToolConfirmationNeeded,
2835    ToolUseLimitReached,
2836    CancelEditing,
2837    CompletionCanceled,
2838    ProfileChanged,
2839}
2840
2841impl EventEmitter<ThreadEvent> for Thread {}
2842
2843struct PendingCompletion {
2844    id: usize,
2845    queue_state: QueueState,
2846    _task: Task<()>,
2847}
2848
2849/// Resolves tool name conflicts by ensuring all tool names are unique.
2850///
2851/// When multiple tools have the same name, this function applies the following rules:
2852/// 1. Native tools always keep their original name
2853/// 2. Context server tools get prefixed with their server ID and an underscore
2854/// 3. All tool names are truncated to MAX_TOOL_NAME_LENGTH (64 characters)
2855/// 4. If conflicts still exist after prefixing, the conflicting tools are filtered out
2856///
2857/// Note: This function assumes that built-in tools occur before MCP tools in the tools list.
2858fn resolve_tool_name_conflicts(tools: &[Arc<dyn Tool>]) -> Vec<(String, Arc<dyn Tool>)> {
2859    fn resolve_tool_name(tool: &Arc<dyn Tool>) -> String {
2860        let mut tool_name = tool.name();
2861        tool_name.truncate(MAX_TOOL_NAME_LENGTH);
2862        tool_name
2863    }
2864
2865    const MAX_TOOL_NAME_LENGTH: usize = 64;
2866
2867    let mut duplicated_tool_names = HashSet::default();
2868    let mut seen_tool_names = HashSet::default();
2869    for tool in tools {
2870        let tool_name = resolve_tool_name(tool);
2871        if seen_tool_names.contains(&tool_name) {
2872            debug_assert!(
2873                tool.source() != assistant_tool::ToolSource::Native,
2874                "There are two built-in tools with the same name: {}",
2875                tool_name
2876            );
2877            duplicated_tool_names.insert(tool_name);
2878        } else {
2879            seen_tool_names.insert(tool_name);
2880        }
2881    }
2882
2883    if duplicated_tool_names.is_empty() {
2884        return tools
2885            .into_iter()
2886            .map(|tool| (resolve_tool_name(tool), tool.clone()))
2887            .collect();
2888    }
2889
2890    tools
2891        .into_iter()
2892        .filter_map(|tool| {
2893            let mut tool_name = resolve_tool_name(tool);
2894            if !duplicated_tool_names.contains(&tool_name) {
2895                return Some((tool_name, tool.clone()));
2896            }
2897            match tool.source() {
2898                assistant_tool::ToolSource::Native => {
2899                    // Built-in tools always keep their original name
2900                    Some((tool_name, tool.clone()))
2901                }
2902                assistant_tool::ToolSource::ContextServer { id } => {
2903                    // Context server tools are prefixed with the context server ID, and truncated if necessary
2904                    tool_name.insert(0, '_');
2905                    if tool_name.len() + id.len() > MAX_TOOL_NAME_LENGTH {
2906                        let len = MAX_TOOL_NAME_LENGTH - tool_name.len();
2907                        let mut id = id.to_string();
2908                        id.truncate(len);
2909                        tool_name.insert_str(0, &id);
2910                    } else {
2911                        tool_name.insert_str(0, &id);
2912                    }
2913
2914                    tool_name.truncate(MAX_TOOL_NAME_LENGTH);
2915
2916                    if seen_tool_names.contains(&tool_name) {
2917                        log::error!("Cannot resolve tool name conflict for tool {}", tool.name());
2918                        None
2919                    } else {
2920                        Some((tool_name, tool.clone()))
2921                    }
2922                }
2923            }
2924        })
2925        .collect()
2926}
2927
2928#[cfg(test)]
2929mod tests {
2930    use super::*;
2931    use crate::{ThreadStore, context::load_context, context_store::ContextStore, thread_store};
2932    use agent_settings::{AgentProfileId, AgentSettings, LanguageModelParameters};
2933    use assistant_tool::ToolRegistry;
2934    use editor::EditorSettings;
2935    use gpui::TestAppContext;
2936    use language_model::fake_provider::{FakeLanguageModel, FakeLanguageModelProvider};
2937    use project::{FakeFs, Project};
2938    use prompt_store::PromptBuilder;
2939    use serde_json::json;
2940    use settings::{Settings, SettingsStore};
2941    use std::sync::Arc;
2942    use theme::ThemeSettings;
2943    use ui::IconName;
2944    use util::path;
2945    use workspace::Workspace;
2946
2947    #[gpui::test]
2948    async fn test_message_with_context(cx: &mut TestAppContext) {
2949        init_test_settings(cx);
2950
2951        let project = create_test_project(
2952            cx,
2953            json!({"code.rs": "fn main() {\n    println!(\"Hello, world!\");\n}"}),
2954        )
2955        .await;
2956
2957        let (_workspace, _thread_store, thread, context_store, model) =
2958            setup_test_environment(cx, project.clone()).await;
2959
2960        add_file_to_context(&project, &context_store, "test/code.rs", cx)
2961            .await
2962            .unwrap();
2963
2964        let context =
2965            context_store.read_with(cx, |store, _| store.context().next().cloned().unwrap());
2966        let loaded_context = cx
2967            .update(|cx| load_context(vec![context], &project, &None, cx))
2968            .await;
2969
2970        // Insert user message with context
2971        let message_id = thread.update(cx, |thread, cx| {
2972            thread.insert_user_message(
2973                "Please explain this code",
2974                loaded_context,
2975                None,
2976                Vec::new(),
2977                cx,
2978            )
2979        });
2980
2981        // Check content and context in message object
2982        let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
2983
2984        // Use different path format strings based on platform for the test
2985        #[cfg(windows)]
2986        let path_part = r"test\code.rs";
2987        #[cfg(not(windows))]
2988        let path_part = "test/code.rs";
2989
2990        let expected_context = format!(
2991            r#"
2992<context>
2993The following items were attached by the user. They are up-to-date and don't need to be re-read.
2994
2995<files>
2996```rs {path_part}
2997fn main() {{
2998    println!("Hello, world!");
2999}}
3000```
3001</files>
3002</context>
3003"#
3004        );
3005
3006        assert_eq!(message.role, Role::User);
3007        assert_eq!(message.segments.len(), 1);
3008        assert_eq!(
3009            message.segments[0],
3010            MessageSegment::Text("Please explain this code".to_string())
3011        );
3012        assert_eq!(message.loaded_context.text, expected_context);
3013
3014        // Check message in request
3015        let request = thread.update(cx, |thread, cx| {
3016            thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3017        });
3018
3019        assert_eq!(request.messages.len(), 2);
3020        let expected_full_message = format!("{}Please explain this code", expected_context);
3021        assert_eq!(request.messages[1].string_contents(), expected_full_message);
3022    }
3023
3024    #[gpui::test]
3025    async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
3026        init_test_settings(cx);
3027
3028        let project = create_test_project(
3029            cx,
3030            json!({
3031                "file1.rs": "fn function1() {}\n",
3032                "file2.rs": "fn function2() {}\n",
3033                "file3.rs": "fn function3() {}\n",
3034                "file4.rs": "fn function4() {}\n",
3035            }),
3036        )
3037        .await;
3038
3039        let (_, _thread_store, thread, context_store, model) =
3040            setup_test_environment(cx, project.clone()).await;
3041
3042        // First message with context 1
3043        add_file_to_context(&project, &context_store, "test/file1.rs", cx)
3044            .await
3045            .unwrap();
3046        let new_contexts = context_store.update(cx, |store, cx| {
3047            store.new_context_for_thread(thread.read(cx), None)
3048        });
3049        assert_eq!(new_contexts.len(), 1);
3050        let loaded_context = cx
3051            .update(|cx| load_context(new_contexts, &project, &None, cx))
3052            .await;
3053        let message1_id = thread.update(cx, |thread, cx| {
3054            thread.insert_user_message("Message 1", loaded_context, None, Vec::new(), cx)
3055        });
3056
3057        // Second message with contexts 1 and 2 (context 1 should be skipped as it's already included)
3058        add_file_to_context(&project, &context_store, "test/file2.rs", cx)
3059            .await
3060            .unwrap();
3061        let new_contexts = context_store.update(cx, |store, cx| {
3062            store.new_context_for_thread(thread.read(cx), None)
3063        });
3064        assert_eq!(new_contexts.len(), 1);
3065        let loaded_context = cx
3066            .update(|cx| load_context(new_contexts, &project, &None, cx))
3067            .await;
3068        let message2_id = thread.update(cx, |thread, cx| {
3069            thread.insert_user_message("Message 2", loaded_context, None, Vec::new(), cx)
3070        });
3071
3072        // Third message with all three contexts (contexts 1 and 2 should be skipped)
3073        //
3074        add_file_to_context(&project, &context_store, "test/file3.rs", cx)
3075            .await
3076            .unwrap();
3077        let new_contexts = context_store.update(cx, |store, cx| {
3078            store.new_context_for_thread(thread.read(cx), None)
3079        });
3080        assert_eq!(new_contexts.len(), 1);
3081        let loaded_context = cx
3082            .update(|cx| load_context(new_contexts, &project, &None, cx))
3083            .await;
3084        let message3_id = thread.update(cx, |thread, cx| {
3085            thread.insert_user_message("Message 3", loaded_context, None, Vec::new(), cx)
3086        });
3087
3088        // Check what contexts are included in each message
3089        let (message1, message2, message3) = thread.read_with(cx, |thread, _| {
3090            (
3091                thread.message(message1_id).unwrap().clone(),
3092                thread.message(message2_id).unwrap().clone(),
3093                thread.message(message3_id).unwrap().clone(),
3094            )
3095        });
3096
3097        // First message should include context 1
3098        assert!(message1.loaded_context.text.contains("file1.rs"));
3099
3100        // Second message should include only context 2 (not 1)
3101        assert!(!message2.loaded_context.text.contains("file1.rs"));
3102        assert!(message2.loaded_context.text.contains("file2.rs"));
3103
3104        // Third message should include only context 3 (not 1 or 2)
3105        assert!(!message3.loaded_context.text.contains("file1.rs"));
3106        assert!(!message3.loaded_context.text.contains("file2.rs"));
3107        assert!(message3.loaded_context.text.contains("file3.rs"));
3108
3109        // Check entire request to make sure all contexts are properly included
3110        let request = thread.update(cx, |thread, cx| {
3111            thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3112        });
3113
3114        // The request should contain all 3 messages
3115        assert_eq!(request.messages.len(), 4);
3116
3117        // Check that the contexts are properly formatted in each message
3118        assert!(request.messages[1].string_contents().contains("file1.rs"));
3119        assert!(!request.messages[1].string_contents().contains("file2.rs"));
3120        assert!(!request.messages[1].string_contents().contains("file3.rs"));
3121
3122        assert!(!request.messages[2].string_contents().contains("file1.rs"));
3123        assert!(request.messages[2].string_contents().contains("file2.rs"));
3124        assert!(!request.messages[2].string_contents().contains("file3.rs"));
3125
3126        assert!(!request.messages[3].string_contents().contains("file1.rs"));
3127        assert!(!request.messages[3].string_contents().contains("file2.rs"));
3128        assert!(request.messages[3].string_contents().contains("file3.rs"));
3129
3130        add_file_to_context(&project, &context_store, "test/file4.rs", cx)
3131            .await
3132            .unwrap();
3133        let new_contexts = context_store.update(cx, |store, cx| {
3134            store.new_context_for_thread(thread.read(cx), Some(message2_id))
3135        });
3136        assert_eq!(new_contexts.len(), 3);
3137        let loaded_context = cx
3138            .update(|cx| load_context(new_contexts, &project, &None, cx))
3139            .await
3140            .loaded_context;
3141
3142        assert!(!loaded_context.text.contains("file1.rs"));
3143        assert!(loaded_context.text.contains("file2.rs"));
3144        assert!(loaded_context.text.contains("file3.rs"));
3145        assert!(loaded_context.text.contains("file4.rs"));
3146
3147        let new_contexts = context_store.update(cx, |store, cx| {
3148            // Remove file4.rs
3149            store.remove_context(&loaded_context.contexts[2].handle(), cx);
3150            store.new_context_for_thread(thread.read(cx), Some(message2_id))
3151        });
3152        assert_eq!(new_contexts.len(), 2);
3153        let loaded_context = cx
3154            .update(|cx| load_context(new_contexts, &project, &None, cx))
3155            .await
3156            .loaded_context;
3157
3158        assert!(!loaded_context.text.contains("file1.rs"));
3159        assert!(loaded_context.text.contains("file2.rs"));
3160        assert!(loaded_context.text.contains("file3.rs"));
3161        assert!(!loaded_context.text.contains("file4.rs"));
3162
3163        let new_contexts = context_store.update(cx, |store, cx| {
3164            // Remove file3.rs
3165            store.remove_context(&loaded_context.contexts[1].handle(), cx);
3166            store.new_context_for_thread(thread.read(cx), Some(message2_id))
3167        });
3168        assert_eq!(new_contexts.len(), 1);
3169        let loaded_context = cx
3170            .update(|cx| load_context(new_contexts, &project, &None, cx))
3171            .await
3172            .loaded_context;
3173
3174        assert!(!loaded_context.text.contains("file1.rs"));
3175        assert!(loaded_context.text.contains("file2.rs"));
3176        assert!(!loaded_context.text.contains("file3.rs"));
3177        assert!(!loaded_context.text.contains("file4.rs"));
3178    }
3179
3180    #[gpui::test]
3181    async fn test_message_without_files(cx: &mut TestAppContext) {
3182        init_test_settings(cx);
3183
3184        let project = create_test_project(
3185            cx,
3186            json!({"code.rs": "fn main() {\n    println!(\"Hello, world!\");\n}"}),
3187        )
3188        .await;
3189
3190        let (_, _thread_store, thread, _context_store, model) =
3191            setup_test_environment(cx, project.clone()).await;
3192
3193        // Insert user message without any context (empty context vector)
3194        let message_id = thread.update(cx, |thread, cx| {
3195            thread.insert_user_message(
3196                "What is the best way to learn Rust?",
3197                ContextLoadResult::default(),
3198                None,
3199                Vec::new(),
3200                cx,
3201            )
3202        });
3203
3204        // Check content and context in message object
3205        let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
3206
3207        // Context should be empty when no files are included
3208        assert_eq!(message.role, Role::User);
3209        assert_eq!(message.segments.len(), 1);
3210        assert_eq!(
3211            message.segments[0],
3212            MessageSegment::Text("What is the best way to learn Rust?".to_string())
3213        );
3214        assert_eq!(message.loaded_context.text, "");
3215
3216        // Check message in request
3217        let request = thread.update(cx, |thread, cx| {
3218            thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3219        });
3220
3221        assert_eq!(request.messages.len(), 2);
3222        assert_eq!(
3223            request.messages[1].string_contents(),
3224            "What is the best way to learn Rust?"
3225        );
3226
3227        // Add second message, also without context
3228        let message2_id = thread.update(cx, |thread, cx| {
3229            thread.insert_user_message(
3230                "Are there any good books?",
3231                ContextLoadResult::default(),
3232                None,
3233                Vec::new(),
3234                cx,
3235            )
3236        });
3237
3238        let message2 =
3239            thread.read_with(cx, |thread, _| thread.message(message2_id).unwrap().clone());
3240        assert_eq!(message2.loaded_context.text, "");
3241
3242        // Check that both messages appear in the request
3243        let request = thread.update(cx, |thread, cx| {
3244            thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3245        });
3246
3247        assert_eq!(request.messages.len(), 3);
3248        assert_eq!(
3249            request.messages[1].string_contents(),
3250            "What is the best way to learn Rust?"
3251        );
3252        assert_eq!(
3253            request.messages[2].string_contents(),
3254            "Are there any good books?"
3255        );
3256    }
3257
3258    #[gpui::test]
3259    async fn test_storing_profile_setting_per_thread(cx: &mut TestAppContext) {
3260        init_test_settings(cx);
3261
3262        let project = create_test_project(
3263            cx,
3264            json!({"code.rs": "fn main() {\n    println!(\"Hello, world!\");\n}"}),
3265        )
3266        .await;
3267
3268        let (_workspace, thread_store, thread, _context_store, _model) =
3269            setup_test_environment(cx, project.clone()).await;
3270
3271        // Check that we are starting with the default profile
3272        let profile = cx.read(|cx| thread.read(cx).profile.clone());
3273        let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3274        assert_eq!(
3275            profile,
3276            AgentProfile::new(AgentProfileId::default(), tool_set)
3277        );
3278    }
3279
3280    #[gpui::test]
3281    async fn test_serializing_thread_profile(cx: &mut TestAppContext) {
3282        init_test_settings(cx);
3283
3284        let project = create_test_project(
3285            cx,
3286            json!({"code.rs": "fn main() {\n    println!(\"Hello, world!\");\n}"}),
3287        )
3288        .await;
3289
3290        let (_workspace, thread_store, thread, _context_store, _model) =
3291            setup_test_environment(cx, project.clone()).await;
3292
3293        // Profile gets serialized with default values
3294        let serialized = thread
3295            .update(cx, |thread, cx| thread.serialize(cx))
3296            .await
3297            .unwrap();
3298
3299        assert_eq!(serialized.profile, Some(AgentProfileId::default()));
3300
3301        let deserialized = cx.update(|cx| {
3302            thread.update(cx, |thread, cx| {
3303                Thread::deserialize(
3304                    thread.id.clone(),
3305                    serialized,
3306                    thread.project.clone(),
3307                    thread.tools.clone(),
3308                    thread.prompt_builder.clone(),
3309                    thread.project_context.clone(),
3310                    None,
3311                    cx,
3312                )
3313            })
3314        });
3315        let tool_set = cx.read(|cx| thread_store.read(cx).tools());
3316
3317        assert_eq!(
3318            deserialized.profile,
3319            AgentProfile::new(AgentProfileId::default(), tool_set)
3320        );
3321    }
3322
3323    #[gpui::test]
3324    async fn test_temperature_setting(cx: &mut TestAppContext) {
3325        init_test_settings(cx);
3326
3327        let project = create_test_project(
3328            cx,
3329            json!({"code.rs": "fn main() {\n    println!(\"Hello, world!\");\n}"}),
3330        )
3331        .await;
3332
3333        let (_workspace, _thread_store, thread, _context_store, model) =
3334            setup_test_environment(cx, project.clone()).await;
3335
3336        // Both model and provider
3337        cx.update(|cx| {
3338            AgentSettings::override_global(
3339                AgentSettings {
3340                    model_parameters: vec![LanguageModelParameters {
3341                        provider: Some(model.provider_id().0.to_string().into()),
3342                        model: Some(model.id().0.clone()),
3343                        temperature: Some(0.66),
3344                    }],
3345                    ..AgentSettings::get_global(cx).clone()
3346                },
3347                cx,
3348            );
3349        });
3350
3351        let request = thread.update(cx, |thread, cx| {
3352            thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3353        });
3354        assert_eq!(request.temperature, Some(0.66));
3355
3356        // Only model
3357        cx.update(|cx| {
3358            AgentSettings::override_global(
3359                AgentSettings {
3360                    model_parameters: vec![LanguageModelParameters {
3361                        provider: None,
3362                        model: Some(model.id().0.clone()),
3363                        temperature: Some(0.66),
3364                    }],
3365                    ..AgentSettings::get_global(cx).clone()
3366                },
3367                cx,
3368            );
3369        });
3370
3371        let request = thread.update(cx, |thread, cx| {
3372            thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3373        });
3374        assert_eq!(request.temperature, Some(0.66));
3375
3376        // Only provider
3377        cx.update(|cx| {
3378            AgentSettings::override_global(
3379                AgentSettings {
3380                    model_parameters: vec![LanguageModelParameters {
3381                        provider: Some(model.provider_id().0.to_string().into()),
3382                        model: None,
3383                        temperature: Some(0.66),
3384                    }],
3385                    ..AgentSettings::get_global(cx).clone()
3386                },
3387                cx,
3388            );
3389        });
3390
3391        let request = thread.update(cx, |thread, cx| {
3392            thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3393        });
3394        assert_eq!(request.temperature, Some(0.66));
3395
3396        // Same model name, different provider
3397        cx.update(|cx| {
3398            AgentSettings::override_global(
3399                AgentSettings {
3400                    model_parameters: vec![LanguageModelParameters {
3401                        provider: Some("anthropic".into()),
3402                        model: Some(model.id().0.clone()),
3403                        temperature: Some(0.66),
3404                    }],
3405                    ..AgentSettings::get_global(cx).clone()
3406                },
3407                cx,
3408            );
3409        });
3410
3411        let request = thread.update(cx, |thread, cx| {
3412            thread.to_completion_request(model.clone(), CompletionIntent::UserPrompt, cx)
3413        });
3414        assert_eq!(request.temperature, None);
3415    }
3416
3417    #[gpui::test]
3418    async fn test_thread_summary(cx: &mut TestAppContext) {
3419        init_test_settings(cx);
3420
3421        let project = create_test_project(cx, json!({})).await;
3422
3423        let (_, _thread_store, thread, _context_store, model) =
3424            setup_test_environment(cx, project.clone()).await;
3425
3426        // Initial state should be pending
3427        thread.read_with(cx, |thread, _| {
3428            assert!(matches!(thread.summary(), ThreadSummary::Pending));
3429            assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3430        });
3431
3432        // Manually setting the summary should not be allowed in this state
3433        thread.update(cx, |thread, cx| {
3434            thread.set_summary("This should not work", cx);
3435        });
3436
3437        thread.read_with(cx, |thread, _| {
3438            assert!(matches!(thread.summary(), ThreadSummary::Pending));
3439        });
3440
3441        // Send a message
3442        thread.update(cx, |thread, cx| {
3443            thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
3444            thread.send_to_model(
3445                model.clone(),
3446                CompletionIntent::ThreadSummarization,
3447                None,
3448                cx,
3449            );
3450        });
3451
3452        let fake_model = model.as_fake();
3453        simulate_successful_response(&fake_model, cx);
3454
3455        // Should start generating summary when there are >= 2 messages
3456        thread.read_with(cx, |thread, _| {
3457            assert_eq!(*thread.summary(), ThreadSummary::Generating);
3458        });
3459
3460        // Should not be able to set the summary while generating
3461        thread.update(cx, |thread, cx| {
3462            thread.set_summary("This should not work either", cx);
3463        });
3464
3465        thread.read_with(cx, |thread, _| {
3466            assert!(matches!(thread.summary(), ThreadSummary::Generating));
3467            assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3468        });
3469
3470        cx.run_until_parked();
3471        fake_model.stream_last_completion_response("Brief");
3472        fake_model.stream_last_completion_response(" Introduction");
3473        fake_model.end_last_completion_stream();
3474        cx.run_until_parked();
3475
3476        // Summary should be set
3477        thread.read_with(cx, |thread, _| {
3478            assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
3479            assert_eq!(thread.summary().or_default(), "Brief Introduction");
3480        });
3481
3482        // Now we should be able to set a summary
3483        thread.update(cx, |thread, cx| {
3484            thread.set_summary("Brief Intro", cx);
3485        });
3486
3487        thread.read_with(cx, |thread, _| {
3488            assert_eq!(thread.summary().or_default(), "Brief Intro");
3489        });
3490
3491        // Test setting an empty summary (should default to DEFAULT)
3492        thread.update(cx, |thread, cx| {
3493            thread.set_summary("", cx);
3494        });
3495
3496        thread.read_with(cx, |thread, _| {
3497            assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
3498            assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3499        });
3500    }
3501
3502    #[gpui::test]
3503    async fn test_thread_summary_error_set_manually(cx: &mut TestAppContext) {
3504        init_test_settings(cx);
3505
3506        let project = create_test_project(cx, json!({})).await;
3507
3508        let (_, _thread_store, thread, _context_store, model) =
3509            setup_test_environment(cx, project.clone()).await;
3510
3511        test_summarize_error(&model, &thread, cx);
3512
3513        // Now we should be able to set a summary
3514        thread.update(cx, |thread, cx| {
3515            thread.set_summary("Brief Intro", cx);
3516        });
3517
3518        thread.read_with(cx, |thread, _| {
3519            assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
3520            assert_eq!(thread.summary().or_default(), "Brief Intro");
3521        });
3522    }
3523
3524    #[gpui::test]
3525    async fn test_thread_summary_error_retry(cx: &mut TestAppContext) {
3526        init_test_settings(cx);
3527
3528        let project = create_test_project(cx, json!({})).await;
3529
3530        let (_, _thread_store, thread, _context_store, model) =
3531            setup_test_environment(cx, project.clone()).await;
3532
3533        test_summarize_error(&model, &thread, cx);
3534
3535        // Sending another message should not trigger another summarize request
3536        thread.update(cx, |thread, cx| {
3537            thread.insert_user_message(
3538                "How are you?",
3539                ContextLoadResult::default(),
3540                None,
3541                vec![],
3542                cx,
3543            );
3544            thread.send_to_model(model.clone(), CompletionIntent::UserPrompt, None, cx);
3545        });
3546
3547        let fake_model = model.as_fake();
3548        simulate_successful_response(&fake_model, cx);
3549
3550        thread.read_with(cx, |thread, _| {
3551            // State is still Error, not Generating
3552            assert!(matches!(thread.summary(), ThreadSummary::Error));
3553        });
3554
3555        // But the summarize request can be invoked manually
3556        thread.update(cx, |thread, cx| {
3557            thread.summarize(cx);
3558        });
3559
3560        thread.read_with(cx, |thread, _| {
3561            assert!(matches!(thread.summary(), ThreadSummary::Generating));
3562        });
3563
3564        cx.run_until_parked();
3565        fake_model.stream_last_completion_response("A successful summary");
3566        fake_model.end_last_completion_stream();
3567        cx.run_until_parked();
3568
3569        thread.read_with(cx, |thread, _| {
3570            assert!(matches!(thread.summary(), ThreadSummary::Ready(_)));
3571            assert_eq!(thread.summary().or_default(), "A successful summary");
3572        });
3573    }
3574
3575    #[gpui::test]
3576    fn test_resolve_tool_name_conflicts() {
3577        use assistant_tool::{Tool, ToolSource};
3578
3579        assert_resolve_tool_name_conflicts(
3580            vec![
3581                TestTool::new("tool1", ToolSource::Native),
3582                TestTool::new("tool2", ToolSource::Native),
3583                TestTool::new("tool3", ToolSource::ContextServer { id: "mcp-1".into() }),
3584            ],
3585            vec!["tool1", "tool2", "tool3"],
3586        );
3587
3588        assert_resolve_tool_name_conflicts(
3589            vec![
3590                TestTool::new("tool1", ToolSource::Native),
3591                TestTool::new("tool2", ToolSource::Native),
3592                TestTool::new("tool3", ToolSource::ContextServer { id: "mcp-1".into() }),
3593                TestTool::new("tool3", ToolSource::ContextServer { id: "mcp-2".into() }),
3594            ],
3595            vec!["tool1", "tool2", "mcp-1_tool3", "mcp-2_tool3"],
3596        );
3597
3598        assert_resolve_tool_name_conflicts(
3599            vec![
3600                TestTool::new("tool1", ToolSource::Native),
3601                TestTool::new("tool2", ToolSource::Native),
3602                TestTool::new("tool3", ToolSource::Native),
3603                TestTool::new("tool3", ToolSource::ContextServer { id: "mcp-1".into() }),
3604                TestTool::new("tool3", ToolSource::ContextServer { id: "mcp-2".into() }),
3605            ],
3606            vec!["tool1", "tool2", "tool3", "mcp-1_tool3", "mcp-2_tool3"],
3607        );
3608
3609        // Test that tool with very long name is always truncated
3610        assert_resolve_tool_name_conflicts(
3611            vec![TestTool::new(
3612                "tool-with-more-then-64-characters-blah-blah-blah-blah-blah-blah-blah-blah",
3613                ToolSource::Native,
3614            )],
3615            vec!["tool-with-more-then-64-characters-blah-blah-blah-blah-blah-blah-"],
3616        );
3617
3618        // Test deduplication of tools with very long names, in this case the mcp server name should be truncated
3619        assert_resolve_tool_name_conflicts(
3620            vec![
3621                TestTool::new("tool-with-very-very-very-long-name", ToolSource::Native),
3622                TestTool::new(
3623                    "tool-with-very-very-very-long-name",
3624                    ToolSource::ContextServer {
3625                        id: "mcp-with-very-very-very-long-name".into(),
3626                    },
3627                ),
3628            ],
3629            vec![
3630                "tool-with-very-very-very-long-name",
3631                "mcp-with-very-very-very-long-_tool-with-very-very-very-long-name",
3632            ],
3633        );
3634
3635        fn assert_resolve_tool_name_conflicts(
3636            tools: Vec<TestTool>,
3637            expected: Vec<impl Into<String>>,
3638        ) {
3639            let tools: Vec<Arc<dyn Tool>> = tools
3640                .into_iter()
3641                .map(|t| Arc::new(t) as Arc<dyn Tool>)
3642                .collect();
3643            let tools = resolve_tool_name_conflicts(&tools);
3644            assert_eq!(tools.len(), expected.len());
3645            for (i, expected_name) in expected.into_iter().enumerate() {
3646                let expected_name = expected_name.into();
3647                let actual_name = &tools[i].0;
3648                assert_eq!(
3649                    actual_name, &expected_name,
3650                    "Expected '{}' got '{}' at index {}",
3651                    expected_name, actual_name, i
3652                );
3653            }
3654        }
3655
3656        struct TestTool {
3657            name: String,
3658            source: ToolSource,
3659        }
3660
3661        impl TestTool {
3662            fn new(name: impl Into<String>, source: ToolSource) -> Self {
3663                Self {
3664                    name: name.into(),
3665                    source,
3666                }
3667            }
3668        }
3669
3670        impl Tool for TestTool {
3671            fn name(&self) -> String {
3672                self.name.clone()
3673            }
3674
3675            fn icon(&self) -> IconName {
3676                IconName::Ai
3677            }
3678
3679            fn may_perform_edits(&self) -> bool {
3680                false
3681            }
3682
3683            fn needs_confirmation(&self, _input: &serde_json::Value, _cx: &App) -> bool {
3684                true
3685            }
3686
3687            fn source(&self) -> ToolSource {
3688                self.source.clone()
3689            }
3690
3691            fn description(&self) -> String {
3692                "Test tool".to_string()
3693            }
3694
3695            fn ui_text(&self, _input: &serde_json::Value) -> String {
3696                "Test tool".to_string()
3697            }
3698
3699            fn run(
3700                self: Arc<Self>,
3701                _input: serde_json::Value,
3702                _request: Arc<LanguageModelRequest>,
3703                _project: Entity<Project>,
3704                _action_log: Entity<ActionLog>,
3705                _model: Arc<dyn LanguageModel>,
3706                _window: Option<AnyWindowHandle>,
3707                _cx: &mut App,
3708            ) -> assistant_tool::ToolResult {
3709                assistant_tool::ToolResult {
3710                    output: Task::ready(Err(anyhow::anyhow!("No content"))),
3711                    card: None,
3712                }
3713            }
3714        }
3715    }
3716
3717    fn test_summarize_error(
3718        model: &Arc<dyn LanguageModel>,
3719        thread: &Entity<Thread>,
3720        cx: &mut TestAppContext,
3721    ) {
3722        thread.update(cx, |thread, cx| {
3723            thread.insert_user_message("Hi!", ContextLoadResult::default(), None, vec![], cx);
3724            thread.send_to_model(
3725                model.clone(),
3726                CompletionIntent::ThreadSummarization,
3727                None,
3728                cx,
3729            );
3730        });
3731
3732        let fake_model = model.as_fake();
3733        simulate_successful_response(&fake_model, cx);
3734
3735        thread.read_with(cx, |thread, _| {
3736            assert!(matches!(thread.summary(), ThreadSummary::Generating));
3737            assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3738        });
3739
3740        // Simulate summary request ending
3741        cx.run_until_parked();
3742        fake_model.end_last_completion_stream();
3743        cx.run_until_parked();
3744
3745        // State is set to Error and default message
3746        thread.read_with(cx, |thread, _| {
3747            assert!(matches!(thread.summary(), ThreadSummary::Error));
3748            assert_eq!(thread.summary().or_default(), ThreadSummary::DEFAULT);
3749        });
3750    }
3751
3752    fn simulate_successful_response(fake_model: &FakeLanguageModel, cx: &mut TestAppContext) {
3753        cx.run_until_parked();
3754        fake_model.stream_last_completion_response("Assistant response");
3755        fake_model.end_last_completion_stream();
3756        cx.run_until_parked();
3757    }
3758
3759    fn init_test_settings(cx: &mut TestAppContext) {
3760        cx.update(|cx| {
3761            let settings_store = SettingsStore::test(cx);
3762            cx.set_global(settings_store);
3763            language::init(cx);
3764            Project::init_settings(cx);
3765            AgentSettings::register(cx);
3766            prompt_store::init(cx);
3767            thread_store::init(cx);
3768            workspace::init_settings(cx);
3769            language_model::init_settings(cx);
3770            ThemeSettings::register(cx);
3771            EditorSettings::register(cx);
3772            ToolRegistry::default_global(cx);
3773        });
3774    }
3775
3776    // Helper to create a test project with test files
3777    async fn create_test_project(
3778        cx: &mut TestAppContext,
3779        files: serde_json::Value,
3780    ) -> Entity<Project> {
3781        let fs = FakeFs::new(cx.executor());
3782        fs.insert_tree(path!("/test"), files).await;
3783        Project::test(fs, [path!("/test").as_ref()], cx).await
3784    }
3785
3786    async fn setup_test_environment(
3787        cx: &mut TestAppContext,
3788        project: Entity<Project>,
3789    ) -> (
3790        Entity<Workspace>,
3791        Entity<ThreadStore>,
3792        Entity<Thread>,
3793        Entity<ContextStore>,
3794        Arc<dyn LanguageModel>,
3795    ) {
3796        let (workspace, cx) =
3797            cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
3798
3799        let thread_store = cx
3800            .update(|_, cx| {
3801                ThreadStore::load(
3802                    project.clone(),
3803                    cx.new(|_| ToolWorkingSet::default()),
3804                    None,
3805                    Arc::new(PromptBuilder::new(None).unwrap()),
3806                    cx,
3807                )
3808            })
3809            .await
3810            .unwrap();
3811
3812        let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
3813        let context_store = cx.new(|_cx| ContextStore::new(project.downgrade(), None));
3814
3815        let provider = Arc::new(FakeLanguageModelProvider);
3816        let model = provider.test_model();
3817        let model: Arc<dyn LanguageModel> = Arc::new(model);
3818
3819        cx.update(|_, cx| {
3820            LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
3821                registry.set_default_model(
3822                    Some(ConfiguredModel {
3823                        provider: provider.clone(),
3824                        model: model.clone(),
3825                    }),
3826                    cx,
3827                );
3828                registry.set_thread_summary_model(
3829                    Some(ConfiguredModel {
3830                        provider,
3831                        model: model.clone(),
3832                    }),
3833                    cx,
3834                );
3835            })
3836        });
3837
3838        (workspace, thread_store, thread, context_store, model)
3839    }
3840
3841    async fn add_file_to_context(
3842        project: &Entity<Project>,
3843        context_store: &Entity<ContextStore>,
3844        path: &str,
3845        cx: &mut TestAppContext,
3846    ) -> Result<Entity<language::Buffer>> {
3847        let buffer_path = project
3848            .read_with(cx, |project, cx| project.find_project_path(path, cx))
3849            .unwrap();
3850
3851        let buffer = project
3852            .update(cx, |project, cx| {
3853                project.open_buffer(buffer_path.clone(), cx)
3854            })
3855            .await
3856            .unwrap();
3857
3858        context_store.update(cx, |context_store, cx| {
3859            context_store.add_file_from_buffer(&buffer_path, buffer.clone(), false, cx);
3860        });
3861
3862        Ok(buffer)
3863    }
3864}