1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 RestoreFileFromDiskTool, SaveFileTool, SubagentTool, SystemPromptTemplate, Template, Templates,
6 TerminalTool, ThinkingTool, ToolPermissionDecision, WebSearchTool,
7 decide_permission_from_settings,
8};
9use acp_thread::{MentionUri, UserMessageId};
10use action_log::ActionLog;
11use feature_flags::{FeatureFlagAppExt as _, SubagentsFeatureFlag};
12
13use agent_client_protocol as acp;
14use agent_settings::{
15 AgentProfileId, AgentProfileSettings, AgentSettings, SUMMARIZE_THREAD_DETAILED_PROMPT,
16 SUMMARIZE_THREAD_PROMPT,
17};
18use anyhow::{Context as _, Result, anyhow};
19use chrono::{DateTime, Utc};
20use client::UserStore;
21use cloud_llm_client::{CompletionIntent, Plan};
22use collections::{HashMap, HashSet, IndexMap};
23use fs::Fs;
24use futures::stream;
25use futures::{
26 FutureExt,
27 channel::{mpsc, oneshot},
28 future::Shared,
29 stream::FuturesUnordered,
30};
31use gpui::{
32 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
33};
34use language::Buffer;
35use language_model::{
36 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId,
37 LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry, LanguageModelRequest,
38 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
39 LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
40 LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage, ZED_CLOUD_PROVIDER_ID,
41};
42use project::Project;
43use prompt_store::ProjectContext;
44use schemars::{JsonSchema, Schema};
45use serde::{Deserialize, Serialize};
46use settings::{LanguageModelSelection, Settings, ToolPermissionMode, update_settings_file};
47use smol::stream::StreamExt;
48use std::{
49 collections::BTreeMap,
50 ops::RangeInclusive,
51 path::Path,
52 rc::Rc,
53 sync::Arc,
54 time::{Duration, Instant},
55};
56use std::{fmt::Write, path::PathBuf};
57use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
58use uuid::Uuid;
59
60const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
61pub const MAX_TOOL_NAME_LENGTH: usize = 64;
62pub const MAX_SUBAGENT_DEPTH: u8 = 4;
63pub const MAX_PARALLEL_SUBAGENTS: usize = 8;
64
65/// Context passed to a subagent thread for lifecycle management
66#[derive(Clone)]
67pub struct SubagentContext {
68 /// ID of the parent thread
69 pub parent_thread_id: acp::SessionId,
70
71 /// ID of the tool call that spawned this subagent
72 pub tool_use_id: LanguageModelToolUseId,
73
74 /// Current depth level (0 = root agent, 1 = first-level subagent, etc.)
75 pub depth: u8,
76
77 /// Prompt to send when subagent completes successfully
78 pub summary_prompt: String,
79
80 /// Prompt to send when context is running low (≤25% remaining)
81 pub context_low_prompt: String,
82}
83
84/// The ID of the user prompt that initiated a request.
85///
86/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
87#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
88pub struct PromptId(Arc<str>);
89
90impl PromptId {
91 pub fn new() -> Self {
92 Self(Uuid::new_v4().to_string().into())
93 }
94}
95
96impl std::fmt::Display for PromptId {
97 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
98 write!(f, "{}", self.0)
99 }
100}
101
102pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
103pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
104
105#[derive(Debug, Clone)]
106enum RetryStrategy {
107 ExponentialBackoff {
108 initial_delay: Duration,
109 max_attempts: u8,
110 },
111 Fixed {
112 delay: Duration,
113 max_attempts: u8,
114 },
115}
116
117#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
118pub enum Message {
119 User(UserMessage),
120 Agent(AgentMessage),
121 Resume,
122}
123
124impl Message {
125 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
126 match self {
127 Message::Agent(agent_message) => Some(agent_message),
128 _ => None,
129 }
130 }
131
132 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
133 match self {
134 Message::User(message) => {
135 if message.content.is_empty() {
136 vec![]
137 } else {
138 vec![message.to_request()]
139 }
140 }
141 Message::Agent(message) => message.to_request(),
142 Message::Resume => vec![LanguageModelRequestMessage {
143 role: Role::User,
144 content: vec!["Continue where you left off".into()],
145 cache: false,
146 reasoning_details: None,
147 }],
148 }
149 }
150
151 pub fn to_markdown(&self) -> String {
152 match self {
153 Message::User(message) => message.to_markdown(),
154 Message::Agent(message) => message.to_markdown(),
155 Message::Resume => "[resume]\n".into(),
156 }
157 }
158
159 pub fn role(&self) -> Role {
160 match self {
161 Message::User(_) | Message::Resume => Role::User,
162 Message::Agent(_) => Role::Assistant,
163 }
164 }
165}
166
167#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
168pub struct UserMessage {
169 pub id: UserMessageId,
170 pub content: Vec<UserMessageContent>,
171}
172
173#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
174pub enum UserMessageContent {
175 Text(String),
176 Mention { uri: MentionUri, content: String },
177 Image(LanguageModelImage),
178}
179
180impl UserMessage {
181 pub fn to_markdown(&self) -> String {
182 let mut markdown = String::from("## User\n\n");
183
184 for content in &self.content {
185 match content {
186 UserMessageContent::Text(text) => {
187 markdown.push_str(text);
188 markdown.push('\n');
189 }
190 UserMessageContent::Image(_) => {
191 markdown.push_str("<image />\n");
192 }
193 UserMessageContent::Mention { uri, content } => {
194 if !content.is_empty() {
195 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
196 } else {
197 let _ = writeln!(&mut markdown, "{}", uri.as_link());
198 }
199 }
200 }
201 }
202
203 markdown
204 }
205
206 fn to_request(&self) -> LanguageModelRequestMessage {
207 let mut message = LanguageModelRequestMessage {
208 role: Role::User,
209 content: Vec::with_capacity(self.content.len()),
210 cache: false,
211 reasoning_details: None,
212 };
213
214 const OPEN_CONTEXT: &str = "<context>\n\
215 The following items were attached by the user. \
216 They are up-to-date and don't need to be re-read.\n\n";
217
218 const OPEN_FILES_TAG: &str = "<files>";
219 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
220 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
221 const OPEN_SELECTIONS_TAG: &str = "<selections>";
222 const OPEN_THREADS_TAG: &str = "<threads>";
223 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
224 const OPEN_RULES_TAG: &str =
225 "<rules>\nThe user has specified the following rules that should be applied:\n";
226
227 let mut file_context = OPEN_FILES_TAG.to_string();
228 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
229 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
230 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
231 let mut thread_context = OPEN_THREADS_TAG.to_string();
232 let mut fetch_context = OPEN_FETCH_TAG.to_string();
233 let mut rules_context = OPEN_RULES_TAG.to_string();
234
235 for chunk in &self.content {
236 let chunk = match chunk {
237 UserMessageContent::Text(text) => {
238 language_model::MessageContent::Text(text.clone())
239 }
240 UserMessageContent::Image(value) => {
241 language_model::MessageContent::Image(value.clone())
242 }
243 UserMessageContent::Mention { uri, content } => {
244 match uri {
245 MentionUri::File { abs_path } => {
246 write!(
247 &mut file_context,
248 "\n{}",
249 MarkdownCodeBlock {
250 tag: &codeblock_tag(abs_path, None),
251 text: &content.to_string(),
252 }
253 )
254 .ok();
255 }
256 MentionUri::PastedImage => {
257 debug_panic!("pasted image URI should not be used in mention content")
258 }
259 MentionUri::Directory { .. } => {
260 write!(&mut directory_context, "\n{}\n", content).ok();
261 }
262 MentionUri::Symbol {
263 abs_path: path,
264 line_range,
265 ..
266 } => {
267 write!(
268 &mut symbol_context,
269 "\n{}",
270 MarkdownCodeBlock {
271 tag: &codeblock_tag(path, Some(line_range)),
272 text: content
273 }
274 )
275 .ok();
276 }
277 MentionUri::Selection {
278 abs_path: path,
279 line_range,
280 ..
281 } => {
282 write!(
283 &mut selection_context,
284 "\n{}",
285 MarkdownCodeBlock {
286 tag: &codeblock_tag(
287 path.as_deref().unwrap_or("Untitled".as_ref()),
288 Some(line_range)
289 ),
290 text: content
291 }
292 )
293 .ok();
294 }
295 MentionUri::Thread { .. } => {
296 write!(&mut thread_context, "\n{}\n", content).ok();
297 }
298 MentionUri::TextThread { .. } => {
299 write!(&mut thread_context, "\n{}\n", content).ok();
300 }
301 MentionUri::Rule { .. } => {
302 write!(
303 &mut rules_context,
304 "\n{}",
305 MarkdownCodeBlock {
306 tag: "",
307 text: content
308 }
309 )
310 .ok();
311 }
312 MentionUri::Fetch { url } => {
313 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
314 }
315 }
316
317 language_model::MessageContent::Text(uri.as_link().to_string())
318 }
319 };
320
321 message.content.push(chunk);
322 }
323
324 let len_before_context = message.content.len();
325
326 if file_context.len() > OPEN_FILES_TAG.len() {
327 file_context.push_str("</files>\n");
328 message
329 .content
330 .push(language_model::MessageContent::Text(file_context));
331 }
332
333 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
334 directory_context.push_str("</directories>\n");
335 message
336 .content
337 .push(language_model::MessageContent::Text(directory_context));
338 }
339
340 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
341 symbol_context.push_str("</symbols>\n");
342 message
343 .content
344 .push(language_model::MessageContent::Text(symbol_context));
345 }
346
347 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
348 selection_context.push_str("</selections>\n");
349 message
350 .content
351 .push(language_model::MessageContent::Text(selection_context));
352 }
353
354 if thread_context.len() > OPEN_THREADS_TAG.len() {
355 thread_context.push_str("</threads>\n");
356 message
357 .content
358 .push(language_model::MessageContent::Text(thread_context));
359 }
360
361 if fetch_context.len() > OPEN_FETCH_TAG.len() {
362 fetch_context.push_str("</fetched_urls>\n");
363 message
364 .content
365 .push(language_model::MessageContent::Text(fetch_context));
366 }
367
368 if rules_context.len() > OPEN_RULES_TAG.len() {
369 rules_context.push_str("</user_rules>\n");
370 message
371 .content
372 .push(language_model::MessageContent::Text(rules_context));
373 }
374
375 if message.content.len() > len_before_context {
376 message.content.insert(
377 len_before_context,
378 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
379 );
380 message
381 .content
382 .push(language_model::MessageContent::Text("</context>".into()));
383 }
384
385 message
386 }
387}
388
389fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
390 let mut result = String::new();
391
392 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
393 let _ = write!(result, "{} ", extension);
394 }
395
396 let _ = write!(result, "{}", full_path.display());
397
398 if let Some(range) = line_range {
399 if range.start() == range.end() {
400 let _ = write!(result, ":{}", range.start() + 1);
401 } else {
402 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
403 }
404 }
405
406 result
407}
408
409impl AgentMessage {
410 pub fn to_markdown(&self) -> String {
411 let mut markdown = String::from("## Assistant\n\n");
412
413 for content in &self.content {
414 match content {
415 AgentMessageContent::Text(text) => {
416 markdown.push_str(text);
417 markdown.push('\n');
418 }
419 AgentMessageContent::Thinking { text, .. } => {
420 markdown.push_str("<think>");
421 markdown.push_str(text);
422 markdown.push_str("</think>\n");
423 }
424 AgentMessageContent::RedactedThinking(_) => {
425 markdown.push_str("<redacted_thinking />\n")
426 }
427 AgentMessageContent::ToolUse(tool_use) => {
428 markdown.push_str(&format!(
429 "**Tool Use**: {} (ID: {})\n",
430 tool_use.name, tool_use.id
431 ));
432 markdown.push_str(&format!(
433 "{}\n",
434 MarkdownCodeBlock {
435 tag: "json",
436 text: &format!("{:#}", tool_use.input)
437 }
438 ));
439 }
440 }
441 }
442
443 for tool_result in self.tool_results.values() {
444 markdown.push_str(&format!(
445 "**Tool Result**: {} (ID: {})\n\n",
446 tool_result.tool_name, tool_result.tool_use_id
447 ));
448 if tool_result.is_error {
449 markdown.push_str("**ERROR:**\n");
450 }
451
452 match &tool_result.content {
453 LanguageModelToolResultContent::Text(text) => {
454 writeln!(markdown, "{text}\n").ok();
455 }
456 LanguageModelToolResultContent::Image(_) => {
457 writeln!(markdown, "<image />\n").ok();
458 }
459 }
460
461 if let Some(output) = tool_result.output.as_ref() {
462 writeln!(
463 markdown,
464 "**Debug Output**:\n\n```json\n{}\n```\n",
465 serde_json::to_string_pretty(output).unwrap()
466 )
467 .unwrap();
468 }
469 }
470
471 markdown
472 }
473
474 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
475 let mut assistant_message = LanguageModelRequestMessage {
476 role: Role::Assistant,
477 content: Vec::with_capacity(self.content.len()),
478 cache: false,
479 reasoning_details: self.reasoning_details.clone(),
480 };
481 for chunk in &self.content {
482 match chunk {
483 AgentMessageContent::Text(text) => {
484 assistant_message
485 .content
486 .push(language_model::MessageContent::Text(text.clone()));
487 }
488 AgentMessageContent::Thinking { text, signature } => {
489 assistant_message
490 .content
491 .push(language_model::MessageContent::Thinking {
492 text: text.clone(),
493 signature: signature.clone(),
494 });
495 }
496 AgentMessageContent::RedactedThinking(value) => {
497 assistant_message.content.push(
498 language_model::MessageContent::RedactedThinking(value.clone()),
499 );
500 }
501 AgentMessageContent::ToolUse(tool_use) => {
502 if self.tool_results.contains_key(&tool_use.id) {
503 assistant_message
504 .content
505 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
506 }
507 }
508 };
509 }
510
511 let mut user_message = LanguageModelRequestMessage {
512 role: Role::User,
513 content: Vec::new(),
514 cache: false,
515 reasoning_details: None,
516 };
517
518 for tool_result in self.tool_results.values() {
519 let mut tool_result = tool_result.clone();
520 // Surprisingly, the API fails if we return an empty string here.
521 // It thinks we are sending a tool use without a tool result.
522 if tool_result.content.is_empty() {
523 tool_result.content = "<Tool returned an empty string>".into();
524 }
525 user_message
526 .content
527 .push(language_model::MessageContent::ToolResult(tool_result));
528 }
529
530 let mut messages = Vec::new();
531 if !assistant_message.content.is_empty() {
532 messages.push(assistant_message);
533 }
534 if !user_message.content.is_empty() {
535 messages.push(user_message);
536 }
537 messages
538 }
539}
540
541#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
542pub struct AgentMessage {
543 pub content: Vec<AgentMessageContent>,
544 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
545 pub reasoning_details: Option<serde_json::Value>,
546}
547
548#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
549pub enum AgentMessageContent {
550 Text(String),
551 Thinking {
552 text: String,
553 signature: Option<String>,
554 },
555 RedactedThinking(String),
556 ToolUse(LanguageModelToolUse),
557}
558
559pub trait TerminalHandle {
560 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
561 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
562 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
563 fn kill(&self, cx: &AsyncApp) -> Result<()>;
564 fn was_stopped_by_user(&self, cx: &AsyncApp) -> Result<bool>;
565}
566
567pub trait ThreadEnvironment {
568 fn create_terminal(
569 &self,
570 command: String,
571 cwd: Option<PathBuf>,
572 output_byte_limit: Option<u64>,
573 cx: &mut AsyncApp,
574 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
575}
576
577#[derive(Debug)]
578pub enum ThreadEvent {
579 UserMessage(UserMessage),
580 AgentText(String),
581 AgentThinking(String),
582 ToolCall(acp::ToolCall),
583 ToolCallUpdate(acp_thread::ToolCallUpdate),
584 ToolCallAuthorization(ToolCallAuthorization),
585 Retry(acp_thread::RetryStatus),
586 Stop(acp::StopReason),
587}
588
589#[derive(Debug)]
590pub struct NewTerminal {
591 pub command: String,
592 pub output_byte_limit: Option<u64>,
593 pub cwd: Option<PathBuf>,
594 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
595}
596
597#[derive(Debug, Clone)]
598pub struct ToolPermissionContext {
599 pub tool_name: String,
600 pub input_value: String,
601}
602
603impl ToolPermissionContext {
604 pub fn new(tool_name: impl Into<String>, input_value: impl Into<String>) -> Self {
605 Self {
606 tool_name: tool_name.into(),
607 input_value: input_value.into(),
608 }
609 }
610
611 /// Builds the permission options for this tool context.
612 ///
613 /// This is the canonical source for permission option generation.
614 /// Tests should use this function rather than manually constructing options.
615 pub fn build_permission_options(&self) -> Vec<acp::PermissionOption> {
616 use crate::pattern_extraction::*;
617
618 let tool_name = &self.tool_name;
619 let input_value = &self.input_value;
620
621 let (pattern, pattern_display) = match tool_name.as_str() {
622 "terminal" => (
623 extract_terminal_pattern(input_value),
624 extract_terminal_pattern_display(input_value),
625 ),
626 "edit_file" | "delete_path" | "move_path" | "create_directory" | "save_file" => (
627 extract_path_pattern(input_value),
628 extract_path_pattern_display(input_value),
629 ),
630 "fetch" => (
631 extract_url_pattern(input_value),
632 extract_url_pattern_display(input_value),
633 ),
634 _ => (None, None),
635 };
636
637 let mut options = vec![acp::PermissionOption::new(
638 acp::PermissionOptionId::new(format!("always:{}", tool_name)),
639 format!("Always for {}", tool_name.replace('_', " ")),
640 acp::PermissionOptionKind::AllowAlways,
641 )];
642
643 if let (Some(pattern), Some(display)) = (pattern, pattern_display) {
644 let button_text = match tool_name.as_str() {
645 "terminal" => format!("Always for `{}` commands", display),
646 "fetch" => format!("Always for `{}`", display),
647 _ => format!("Always for `{}`", display),
648 };
649 options.push(acp::PermissionOption::new(
650 acp::PermissionOptionId::new(format!("always_pattern:{}:{}", tool_name, pattern)),
651 button_text,
652 acp::PermissionOptionKind::AllowAlways,
653 ));
654 }
655
656 options.push(acp::PermissionOption::new(
657 acp::PermissionOptionId::new("once"),
658 "Only this time",
659 acp::PermissionOptionKind::AllowOnce,
660 ));
661
662 options
663 }
664}
665
666#[derive(Debug)]
667pub struct ToolCallAuthorization {
668 pub tool_call: acp::ToolCallUpdate,
669 pub options: Vec<acp::PermissionOption>,
670 pub response: oneshot::Sender<acp::PermissionOptionId>,
671 pub context: Option<ToolPermissionContext>,
672}
673
674#[derive(Debug, thiserror::Error)]
675enum CompletionError {
676 #[error("max tokens")]
677 MaxTokens,
678 #[error("refusal")]
679 Refusal,
680 #[error(transparent)]
681 Other(#[from] anyhow::Error),
682}
683
684pub struct QueuedMessage {
685 pub content: Vec<acp::ContentBlock>,
686 pub tracked_buffers: Vec<Entity<Buffer>>,
687}
688
689pub struct Thread {
690 id: acp::SessionId,
691 prompt_id: PromptId,
692 updated_at: DateTime<Utc>,
693 title: Option<SharedString>,
694 pending_title_generation: Option<Task<()>>,
695 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
696 summary: Option<SharedString>,
697 messages: Vec<Message>,
698 user_store: Entity<UserStore>,
699 /// Holds the task that handles agent interaction until the end of the turn.
700 /// Survives across multiple requests as the model performs tool calls and
701 /// we run tools, report their results.
702 running_turn: Option<RunningTurn>,
703 queued_messages: Vec<QueuedMessage>,
704 pending_message: Option<AgentMessage>,
705 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
706 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
707 #[allow(unused)]
708 cumulative_token_usage: TokenUsage,
709 #[allow(unused)]
710 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
711 context_server_registry: Entity<ContextServerRegistry>,
712 profile_id: AgentProfileId,
713 project_context: Entity<ProjectContext>,
714 templates: Arc<Templates>,
715 model: Option<Arc<dyn LanguageModel>>,
716 summarization_model: Option<Arc<dyn LanguageModel>>,
717 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
718 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
719 pub(crate) project: Entity<Project>,
720 pub(crate) action_log: Entity<ActionLog>,
721 /// Tracks the last time files were read by the agent, to detect external modifications
722 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
723 /// True if this thread was imported from a shared thread and can be synced.
724 imported: bool,
725 /// If this is a subagent thread, contains context about the parent
726 subagent_context: Option<SubagentContext>,
727 /// Weak references to running subagent threads for cancellation propagation
728 running_subagents: Vec<WeakEntity<Thread>>,
729}
730
731impl Thread {
732 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
733 let image = model.map_or(true, |model| model.supports_images());
734 acp::PromptCapabilities::new()
735 .image(image)
736 .embedded_context(true)
737 }
738
739 pub fn new(
740 project: Entity<Project>,
741 project_context: Entity<ProjectContext>,
742 context_server_registry: Entity<ContextServerRegistry>,
743 templates: Arc<Templates>,
744 model: Option<Arc<dyn LanguageModel>>,
745 cx: &mut Context<Self>,
746 ) -> Self {
747 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
748 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
749 let (prompt_capabilities_tx, prompt_capabilities_rx) =
750 watch::channel(Self::prompt_capabilities(model.as_deref()));
751 Self {
752 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
753 prompt_id: PromptId::new(),
754 updated_at: Utc::now(),
755 title: None,
756 pending_title_generation: None,
757 pending_summary_generation: None,
758 summary: None,
759 messages: Vec::new(),
760 user_store: project.read(cx).user_store(),
761 running_turn: None,
762 queued_messages: Vec::new(),
763 pending_message: None,
764 tools: BTreeMap::default(),
765 request_token_usage: HashMap::default(),
766 cumulative_token_usage: TokenUsage::default(),
767 initial_project_snapshot: {
768 let project_snapshot = Self::project_snapshot(project.clone(), cx);
769 cx.foreground_executor()
770 .spawn(async move { Some(project_snapshot.await) })
771 .shared()
772 },
773 context_server_registry,
774 profile_id,
775 project_context,
776 templates,
777 model,
778 summarization_model: None,
779 prompt_capabilities_tx,
780 prompt_capabilities_rx,
781 project,
782 action_log,
783 file_read_times: HashMap::default(),
784 imported: false,
785 subagent_context: None,
786 running_subagents: Vec::new(),
787 }
788 }
789
790 pub fn new_subagent(
791 project: Entity<Project>,
792 project_context: Entity<ProjectContext>,
793 context_server_registry: Entity<ContextServerRegistry>,
794 templates: Arc<Templates>,
795 model: Arc<dyn LanguageModel>,
796 subagent_context: SubagentContext,
797 parent_tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
798 cx: &mut Context<Self>,
799 ) -> Self {
800 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
801 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
802 let (prompt_capabilities_tx, prompt_capabilities_rx) =
803 watch::channel(Self::prompt_capabilities(Some(model.as_ref())));
804 Self {
805 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
806 prompt_id: PromptId::new(),
807 updated_at: Utc::now(),
808 title: None,
809 pending_title_generation: None,
810 pending_summary_generation: None,
811 summary: None,
812 messages: Vec::new(),
813 user_store: project.read(cx).user_store(),
814 running_turn: None,
815 queued_messages: Vec::new(),
816 pending_message: None,
817 tools: parent_tools,
818 request_token_usage: HashMap::default(),
819 cumulative_token_usage: TokenUsage::default(),
820 initial_project_snapshot: Task::ready(None).shared(),
821 context_server_registry,
822 profile_id,
823 project_context,
824 templates,
825 model: Some(model),
826 summarization_model: None,
827 prompt_capabilities_tx,
828 prompt_capabilities_rx,
829 project,
830 action_log,
831 file_read_times: HashMap::default(),
832 imported: false,
833 subagent_context: Some(subagent_context),
834 running_subagents: Vec::new(),
835 }
836 }
837
838 pub fn id(&self) -> &acp::SessionId {
839 &self.id
840 }
841
842 /// Returns true if this thread was imported from a shared thread.
843 pub fn is_imported(&self) -> bool {
844 self.imported
845 }
846
847 pub fn replay(
848 &mut self,
849 cx: &mut Context<Self>,
850 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
851 let (tx, rx) = mpsc::unbounded();
852 let stream = ThreadEventStream(tx);
853 for message in &self.messages {
854 match message {
855 Message::User(user_message) => stream.send_user_message(user_message),
856 Message::Agent(assistant_message) => {
857 for content in &assistant_message.content {
858 match content {
859 AgentMessageContent::Text(text) => stream.send_text(text),
860 AgentMessageContent::Thinking { text, .. } => {
861 stream.send_thinking(text)
862 }
863 AgentMessageContent::RedactedThinking(_) => {}
864 AgentMessageContent::ToolUse(tool_use) => {
865 self.replay_tool_call(
866 tool_use,
867 assistant_message.tool_results.get(&tool_use.id),
868 &stream,
869 cx,
870 );
871 }
872 }
873 }
874 }
875 Message::Resume => {}
876 }
877 }
878 rx
879 }
880
881 fn replay_tool_call(
882 &self,
883 tool_use: &LanguageModelToolUse,
884 tool_result: Option<&LanguageModelToolResult>,
885 stream: &ThreadEventStream,
886 cx: &mut Context<Self>,
887 ) {
888 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
889 self.context_server_registry
890 .read(cx)
891 .servers()
892 .find_map(|(_, tools)| {
893 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
894 Some(tool.clone())
895 } else {
896 None
897 }
898 })
899 });
900
901 let Some(tool) = tool else {
902 stream
903 .0
904 .unbounded_send(Ok(ThreadEvent::ToolCall(
905 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
906 .status(acp::ToolCallStatus::Failed)
907 .raw_input(tool_use.input.clone()),
908 )))
909 .ok();
910 return;
911 };
912
913 let title = tool.initial_title(tool_use.input.clone(), cx);
914 let kind = tool.kind();
915 stream.send_tool_call(
916 &tool_use.id,
917 &tool_use.name,
918 title,
919 kind,
920 tool_use.input.clone(),
921 );
922
923 let output = tool_result
924 .as_ref()
925 .and_then(|result| result.output.clone());
926 if let Some(output) = output.clone() {
927 // For replay, we use a dummy cancellation receiver since the tool already completed
928 let (_cancellation_tx, cancellation_rx) = watch::channel(false);
929 let tool_event_stream = ToolCallEventStream::new(
930 tool_use.id.clone(),
931 stream.clone(),
932 Some(self.project.read(cx).fs().clone()),
933 cancellation_rx,
934 );
935 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
936 .log_err();
937 }
938
939 stream.update_tool_call_fields(
940 &tool_use.id,
941 acp::ToolCallUpdateFields::new()
942 .status(
943 tool_result
944 .as_ref()
945 .map_or(acp::ToolCallStatus::Failed, |result| {
946 if result.is_error {
947 acp::ToolCallStatus::Failed
948 } else {
949 acp::ToolCallStatus::Completed
950 }
951 }),
952 )
953 .raw_output(output),
954 );
955 }
956
957 pub fn from_db(
958 id: acp::SessionId,
959 db_thread: DbThread,
960 project: Entity<Project>,
961 project_context: Entity<ProjectContext>,
962 context_server_registry: Entity<ContextServerRegistry>,
963 templates: Arc<Templates>,
964 cx: &mut Context<Self>,
965 ) -> Self {
966 let profile_id = db_thread
967 .profile
968 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
969
970 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
971 db_thread
972 .model
973 .and_then(|model| {
974 let model = SelectedModel {
975 provider: model.provider.clone().into(),
976 model: model.model.into(),
977 };
978 registry.select_model(&model, cx)
979 })
980 .or_else(|| registry.default_model())
981 .map(|model| model.model)
982 });
983
984 if model.is_none() {
985 model = Self::resolve_profile_model(&profile_id, cx);
986 }
987 if model.is_none() {
988 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
989 registry.default_model().map(|model| model.model)
990 });
991 }
992
993 let (prompt_capabilities_tx, prompt_capabilities_rx) =
994 watch::channel(Self::prompt_capabilities(model.as_deref()));
995
996 let action_log = cx.new(|_| ActionLog::new(project.clone()));
997
998 Self {
999 id,
1000 prompt_id: PromptId::new(),
1001 title: if db_thread.title.is_empty() {
1002 None
1003 } else {
1004 Some(db_thread.title.clone())
1005 },
1006 pending_title_generation: None,
1007 pending_summary_generation: None,
1008 summary: db_thread.detailed_summary,
1009 messages: db_thread.messages,
1010 user_store: project.read(cx).user_store(),
1011 running_turn: None,
1012 queued_messages: Vec::new(),
1013 pending_message: None,
1014 tools: BTreeMap::default(),
1015 request_token_usage: db_thread.request_token_usage.clone(),
1016 cumulative_token_usage: db_thread.cumulative_token_usage,
1017 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
1018 context_server_registry,
1019 profile_id,
1020 project_context,
1021 templates,
1022 model,
1023 summarization_model: None,
1024 project,
1025 action_log,
1026 updated_at: db_thread.updated_at,
1027 prompt_capabilities_tx,
1028 prompt_capabilities_rx,
1029 file_read_times: HashMap::default(),
1030 imported: db_thread.imported,
1031 subagent_context: None,
1032 running_subagents: Vec::new(),
1033 }
1034 }
1035
1036 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
1037 let initial_project_snapshot = self.initial_project_snapshot.clone();
1038 let mut thread = DbThread {
1039 title: self.title(),
1040 messages: self.messages.clone(),
1041 updated_at: self.updated_at,
1042 detailed_summary: self.summary.clone(),
1043 initial_project_snapshot: None,
1044 cumulative_token_usage: self.cumulative_token_usage,
1045 request_token_usage: self.request_token_usage.clone(),
1046 model: self.model.as_ref().map(|model| DbLanguageModel {
1047 provider: model.provider_id().to_string(),
1048 model: model.name().0.to_string(),
1049 }),
1050 profile: Some(self.profile_id.clone()),
1051 imported: self.imported,
1052 };
1053
1054 cx.background_spawn(async move {
1055 let initial_project_snapshot = initial_project_snapshot.await;
1056 thread.initial_project_snapshot = initial_project_snapshot;
1057 thread
1058 })
1059 }
1060
1061 /// Create a snapshot of the current project state including git information and unsaved buffers.
1062 fn project_snapshot(
1063 project: Entity<Project>,
1064 cx: &mut Context<Self>,
1065 ) -> Task<Arc<ProjectSnapshot>> {
1066 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
1067 cx.spawn(async move |_, _| {
1068 let snapshot = task.await;
1069
1070 Arc::new(ProjectSnapshot {
1071 worktree_snapshots: snapshot.worktree_snapshots,
1072 timestamp: Utc::now(),
1073 })
1074 })
1075 }
1076
1077 pub fn project_context(&self) -> &Entity<ProjectContext> {
1078 &self.project_context
1079 }
1080
1081 pub fn project(&self) -> &Entity<Project> {
1082 &self.project
1083 }
1084
1085 pub fn action_log(&self) -> &Entity<ActionLog> {
1086 &self.action_log
1087 }
1088
1089 pub fn is_empty(&self) -> bool {
1090 self.messages.is_empty() && self.title.is_none()
1091 }
1092
1093 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
1094 self.model.as_ref()
1095 }
1096
1097 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
1098 let old_usage = self.latest_token_usage();
1099 self.model = Some(model);
1100 let new_caps = Self::prompt_capabilities(self.model.as_deref());
1101 let new_usage = self.latest_token_usage();
1102 if old_usage != new_usage {
1103 cx.emit(TokenUsageUpdated(new_usage));
1104 }
1105 self.prompt_capabilities_tx.send(new_caps).log_err();
1106 cx.notify()
1107 }
1108
1109 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
1110 self.summarization_model.as_ref()
1111 }
1112
1113 pub fn set_summarization_model(
1114 &mut self,
1115 model: Option<Arc<dyn LanguageModel>>,
1116 cx: &mut Context<Self>,
1117 ) {
1118 self.summarization_model = model;
1119 cx.notify()
1120 }
1121
1122 pub fn last_message(&self) -> Option<Message> {
1123 if let Some(message) = self.pending_message.clone() {
1124 Some(Message::Agent(message))
1125 } else {
1126 self.messages.last().cloned()
1127 }
1128 }
1129
1130 pub fn add_default_tools(
1131 &mut self,
1132 environment: Rc<dyn ThreadEnvironment>,
1133 cx: &mut Context<Self>,
1134 ) {
1135 let language_registry = self.project.read(cx).languages().clone();
1136 self.add_tool(CopyPathTool::new(self.project.clone()));
1137 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
1138 self.add_tool(DeletePathTool::new(
1139 self.project.clone(),
1140 self.action_log.clone(),
1141 ));
1142 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1143 self.add_tool(EditFileTool::new(
1144 self.project.clone(),
1145 cx.weak_entity(),
1146 language_registry,
1147 Templates::new(),
1148 ));
1149 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1150 self.add_tool(FindPathTool::new(self.project.clone()));
1151 self.add_tool(GrepTool::new(self.project.clone()));
1152 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1153 self.add_tool(MovePathTool::new(self.project.clone()));
1154 self.add_tool(NowTool);
1155 self.add_tool(OpenTool::new(self.project.clone()));
1156 self.add_tool(ReadFileTool::new(
1157 cx.weak_entity(),
1158 self.project.clone(),
1159 self.action_log.clone(),
1160 ));
1161 self.add_tool(SaveFileTool::new(self.project.clone()));
1162 self.add_tool(RestoreFileFromDiskTool::new(self.project.clone()));
1163 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1164 self.add_tool(ThinkingTool);
1165 self.add_tool(WebSearchTool);
1166
1167 if cx.has_flag::<SubagentsFeatureFlag>() && self.depth() < MAX_SUBAGENT_DEPTH {
1168 let parent_tools = self.tools.clone();
1169 self.add_tool(SubagentTool::new(
1170 cx.weak_entity(),
1171 self.project.clone(),
1172 self.project_context.clone(),
1173 self.context_server_registry.clone(),
1174 self.templates.clone(),
1175 self.depth(),
1176 parent_tools,
1177 ));
1178 }
1179 }
1180
1181 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1182 self.tools.insert(T::name().into(), tool.erase());
1183 }
1184
1185 pub fn remove_tool(&mut self, name: &str) -> bool {
1186 self.tools.remove(name).is_some()
1187 }
1188
1189 pub fn restrict_tools(&mut self, allowed: &collections::HashSet<SharedString>) {
1190 self.tools.retain(|name, _| allowed.contains(name));
1191 }
1192
1193 pub fn profile(&self) -> &AgentProfileId {
1194 &self.profile_id
1195 }
1196
1197 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1198 if self.profile_id == profile_id {
1199 return;
1200 }
1201
1202 self.profile_id = profile_id;
1203
1204 // Swap to the profile's preferred model when available.
1205 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1206 self.set_model(model, cx);
1207 }
1208 }
1209
1210 pub fn cancel(&mut self, cx: &mut Context<Self>) -> Task<()> {
1211 for subagent in self.running_subagents.drain(..) {
1212 if let Some(subagent) = subagent.upgrade() {
1213 subagent.update(cx, |thread, cx| thread.cancel(cx)).detach();
1214 }
1215 }
1216
1217 let Some(running_turn) = self.running_turn.take() else {
1218 self.flush_pending_message(cx);
1219 return Task::ready(());
1220 };
1221
1222 let turn_task = running_turn.cancel();
1223
1224 cx.spawn(async move |this, cx| {
1225 turn_task.await;
1226 this.update(cx, |this, cx| {
1227 this.flush_pending_message(cx);
1228 })
1229 .ok();
1230 })
1231 }
1232
1233 pub fn queue_message(
1234 &mut self,
1235 content: Vec<acp::ContentBlock>,
1236 tracked_buffers: Vec<Entity<Buffer>>,
1237 ) {
1238 self.queued_messages.push(QueuedMessage {
1239 content,
1240 tracked_buffers,
1241 });
1242 }
1243
1244 pub fn queued_messages(&self) -> &[QueuedMessage] {
1245 &self.queued_messages
1246 }
1247
1248 pub fn remove_queued_message(&mut self, index: usize) -> Option<QueuedMessage> {
1249 if index < self.queued_messages.len() {
1250 Some(self.queued_messages.remove(index))
1251 } else {
1252 None
1253 }
1254 }
1255
1256 pub fn clear_queued_messages(&mut self) {
1257 self.queued_messages.clear();
1258 }
1259
1260 fn has_queued_messages(&self) -> bool {
1261 !self.queued_messages.is_empty()
1262 }
1263
1264 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1265 let Some(last_user_message) = self.last_user_message() else {
1266 return;
1267 };
1268
1269 self.request_token_usage
1270 .insert(last_user_message.id.clone(), update);
1271 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1272 cx.notify();
1273 }
1274
1275 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1276 self.cancel(cx).detach();
1277 // Clear pending message since cancel will try to flush it asynchronously,
1278 // and we don't want that content to be added after we truncate
1279 self.pending_message.take();
1280 let Some(position) = self.messages.iter().position(
1281 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1282 ) else {
1283 return Err(anyhow!("Message not found"));
1284 };
1285
1286 for message in self.messages.drain(position..) {
1287 match message {
1288 Message::User(message) => {
1289 self.request_token_usage.remove(&message.id);
1290 }
1291 Message::Agent(_) | Message::Resume => {}
1292 }
1293 }
1294 self.clear_summary();
1295 cx.notify();
1296 Ok(())
1297 }
1298
1299 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1300 let last_user_message = self.last_user_message()?;
1301 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1302 Some(*tokens)
1303 }
1304
1305 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1306 let usage = self.latest_request_token_usage()?;
1307 let model = self.model.clone()?;
1308 Some(acp_thread::TokenUsage {
1309 max_tokens: model.max_token_count(),
1310 used_tokens: usage.total_tokens(),
1311 input_tokens: usage.input_tokens,
1312 output_tokens: usage.output_tokens,
1313 })
1314 }
1315
1316 /// Get the total input token count as of the message before the given message.
1317 ///
1318 /// Returns `None` if:
1319 /// - `target_id` is the first message (no previous message)
1320 /// - The previous message hasn't received a response yet (no usage data)
1321 /// - `target_id` is not found in the messages
1322 pub fn tokens_before_message(&self, target_id: &UserMessageId) -> Option<u64> {
1323 let mut previous_user_message_id: Option<&UserMessageId> = None;
1324
1325 for message in &self.messages {
1326 if let Message::User(user_msg) = message {
1327 if &user_msg.id == target_id {
1328 let prev_id = previous_user_message_id?;
1329 let usage = self.request_token_usage.get(prev_id)?;
1330 return Some(usage.input_tokens);
1331 }
1332 previous_user_message_id = Some(&user_msg.id);
1333 }
1334 }
1335 None
1336 }
1337
1338 /// Look up the active profile and resolve its preferred model if one is configured.
1339 fn resolve_profile_model(
1340 profile_id: &AgentProfileId,
1341 cx: &mut Context<Self>,
1342 ) -> Option<Arc<dyn LanguageModel>> {
1343 let selection = AgentSettings::get_global(cx)
1344 .profiles
1345 .get(profile_id)?
1346 .default_model
1347 .clone()?;
1348 Self::resolve_model_from_selection(&selection, cx)
1349 }
1350
1351 /// Translate a stored model selection into the configured model from the registry.
1352 fn resolve_model_from_selection(
1353 selection: &LanguageModelSelection,
1354 cx: &mut Context<Self>,
1355 ) -> Option<Arc<dyn LanguageModel>> {
1356 let selected = SelectedModel {
1357 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1358 model: LanguageModelId::from(selection.model.clone()),
1359 };
1360 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1361 registry
1362 .select_model(&selected, cx)
1363 .map(|configured| configured.model)
1364 })
1365 }
1366
1367 pub fn resume(
1368 &mut self,
1369 cx: &mut Context<Self>,
1370 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1371 self.messages.push(Message::Resume);
1372 cx.notify();
1373
1374 log::debug!("Total messages in thread: {}", self.messages.len());
1375 self.run_turn(cx)
1376 }
1377
1378 /// Sending a message results in the model streaming a response, which could include tool calls.
1379 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1380 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1381 pub fn send<T>(
1382 &mut self,
1383 id: UserMessageId,
1384 content: impl IntoIterator<Item = T>,
1385 cx: &mut Context<Self>,
1386 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1387 where
1388 T: Into<UserMessageContent>,
1389 {
1390 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1391 log::debug!("Thread::send content: {:?}", content);
1392
1393 self.messages
1394 .push(Message::User(UserMessage { id, content }));
1395 cx.notify();
1396
1397 self.send_existing(cx)
1398 }
1399
1400 pub fn send_existing(
1401 &mut self,
1402 cx: &mut Context<Self>,
1403 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1404 let model = self.model().context("No language model configured")?;
1405
1406 log::info!("Thread::send called with model: {}", model.name().0);
1407 self.advance_prompt_id();
1408
1409 log::debug!("Total messages in thread: {}", self.messages.len());
1410 self.run_turn(cx)
1411 }
1412
1413 pub fn push_acp_user_block(
1414 &mut self,
1415 id: UserMessageId,
1416 blocks: impl IntoIterator<Item = acp::ContentBlock>,
1417 path_style: PathStyle,
1418 cx: &mut Context<Self>,
1419 ) {
1420 let content = blocks
1421 .into_iter()
1422 .map(|block| UserMessageContent::from_content_block(block, path_style))
1423 .collect::<Vec<_>>();
1424 self.messages
1425 .push(Message::User(UserMessage { id, content }));
1426 cx.notify();
1427 }
1428
1429 pub fn push_acp_agent_block(&mut self, block: acp::ContentBlock, cx: &mut Context<Self>) {
1430 let text = match block {
1431 acp::ContentBlock::Text(text_content) => text_content.text,
1432 acp::ContentBlock::Image(_) => "[image]".to_string(),
1433 acp::ContentBlock::Audio(_) => "[audio]".to_string(),
1434 acp::ContentBlock::ResourceLink(resource_link) => resource_link.uri,
1435 acp::ContentBlock::Resource(resource) => match resource.resource {
1436 acp::EmbeddedResourceResource::TextResourceContents(resource) => resource.uri,
1437 acp::EmbeddedResourceResource::BlobResourceContents(resource) => resource.uri,
1438 _ => "[resource]".to_string(),
1439 },
1440 _ => "[unknown]".to_string(),
1441 };
1442
1443 self.messages.push(Message::Agent(AgentMessage {
1444 content: vec![AgentMessageContent::Text(text)],
1445 ..Default::default()
1446 }));
1447 cx.notify();
1448 }
1449
1450 #[cfg(feature = "eval")]
1451 pub fn proceed(
1452 &mut self,
1453 cx: &mut Context<Self>,
1454 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1455 self.run_turn(cx)
1456 }
1457
1458 fn run_turn(
1459 &mut self,
1460 cx: &mut Context<Self>,
1461 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1462 // Flush the old pending message synchronously before cancelling,
1463 // to avoid a race where the detached cancel task might flush the NEW
1464 // turn's pending message instead of the old one.
1465 self.flush_pending_message(cx);
1466 self.cancel(cx).detach();
1467
1468 let model = self.model.clone().context("No language model configured")?;
1469 let profile = AgentSettings::get_global(cx)
1470 .profiles
1471 .get(&self.profile_id)
1472 .context("Profile not found")?;
1473 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1474 let event_stream = ThreadEventStream(events_tx);
1475 let message_ix = self.messages.len().saturating_sub(1);
1476 self.clear_summary();
1477 let (cancellation_tx, mut cancellation_rx) = watch::channel(false);
1478 self.running_turn = Some(RunningTurn {
1479 event_stream: event_stream.clone(),
1480 tools: self.enabled_tools(profile, &model, cx),
1481 cancellation_tx,
1482 _task: cx.spawn(async move |this, cx| {
1483 log::debug!("Starting agent turn execution");
1484
1485 let turn_result = Self::run_turn_internal(
1486 &this,
1487 model,
1488 &event_stream,
1489 cancellation_rx.clone(),
1490 cx,
1491 )
1492 .await;
1493
1494 // Check if we were cancelled - if so, cancel() already took running_turn
1495 // and we shouldn't touch it (it might be a NEW turn now)
1496 let was_cancelled = *cancellation_rx.borrow();
1497 if was_cancelled {
1498 log::debug!("Turn was cancelled, skipping cleanup");
1499 return;
1500 }
1501
1502 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1503
1504 match turn_result {
1505 Ok(()) => {
1506 log::debug!("Turn execution completed");
1507 event_stream.send_stop(acp::StopReason::EndTurn);
1508 }
1509 Err(error) => {
1510 log::error!("Turn execution failed: {:?}", error);
1511 match error.downcast::<CompletionError>() {
1512 Ok(CompletionError::Refusal) => {
1513 event_stream.send_stop(acp::StopReason::Refusal);
1514 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1515 }
1516 Ok(CompletionError::MaxTokens) => {
1517 event_stream.send_stop(acp::StopReason::MaxTokens);
1518 }
1519 Ok(CompletionError::Other(error)) | Err(error) => {
1520 event_stream.send_error(error);
1521 }
1522 }
1523 }
1524 }
1525
1526 _ = this.update(cx, |this, _| this.running_turn.take());
1527 }),
1528 });
1529 Ok(events_rx)
1530 }
1531
1532 async fn run_turn_internal(
1533 this: &WeakEntity<Self>,
1534 model: Arc<dyn LanguageModel>,
1535 event_stream: &ThreadEventStream,
1536 mut cancellation_rx: watch::Receiver<bool>,
1537 cx: &mut AsyncApp,
1538 ) -> Result<()> {
1539 let mut attempt = 0;
1540 let mut intent = CompletionIntent::UserPrompt;
1541 loop {
1542 let request =
1543 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1544
1545 telemetry::event!(
1546 "Agent Thread Completion",
1547 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1548 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1549 model = model.telemetry_id(),
1550 model_provider = model.provider_id().to_string(),
1551 attempt
1552 );
1553
1554 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1555
1556 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1557 Ok(events) => (events.fuse(), None),
1558 Err(err) => (stream::empty().boxed().fuse(), Some(err)),
1559 };
1560 let mut tool_results = FuturesUnordered::new();
1561 let mut cancelled = false;
1562 loop {
1563 // Race between getting the first event and cancellation
1564 let first_event = futures::select! {
1565 event = events.next().fuse() => event,
1566 _ = cancellation_rx.changed().fuse() => {
1567 if *cancellation_rx.borrow() {
1568 cancelled = true;
1569 break;
1570 }
1571 continue;
1572 }
1573 };
1574 let Some(first_event) = first_event else {
1575 break;
1576 };
1577
1578 // Collect all immediately available events to process as a batch
1579 let mut batch = vec![first_event];
1580 while let Some(event) = events.next().now_or_never().flatten() {
1581 batch.push(event);
1582 }
1583
1584 // Process the batch in a single update
1585 let batch_result = this.update(cx, |this, cx| {
1586 let mut batch_tool_results = Vec::new();
1587 let mut batch_error = None;
1588
1589 for event in batch {
1590 log::trace!("Received completion event: {:?}", event);
1591 match event {
1592 Ok(event) => {
1593 match this.handle_completion_event(
1594 event,
1595 event_stream,
1596 cancellation_rx.clone(),
1597 cx,
1598 ) {
1599 Ok(Some(task)) => batch_tool_results.push(task),
1600 Ok(None) => {}
1601 Err(err) => {
1602 batch_error = Some(err);
1603 break;
1604 }
1605 }
1606 }
1607 Err(err) => {
1608 batch_error = Some(err.into());
1609 break;
1610 }
1611 }
1612 }
1613
1614 cx.notify();
1615 (batch_tool_results, batch_error)
1616 })?;
1617
1618 tool_results.extend(batch_result.0);
1619 if let Some(err) = batch_result.1 {
1620 error = Some(err.downcast()?);
1621 break;
1622 }
1623 }
1624
1625 let end_turn = tool_results.is_empty();
1626 while let Some(tool_result) = tool_results.next().await {
1627 log::debug!("Tool finished {:?}", tool_result);
1628
1629 event_stream.update_tool_call_fields(
1630 &tool_result.tool_use_id,
1631 acp::ToolCallUpdateFields::new()
1632 .status(if tool_result.is_error {
1633 acp::ToolCallStatus::Failed
1634 } else {
1635 acp::ToolCallStatus::Completed
1636 })
1637 .raw_output(tool_result.output.clone()),
1638 );
1639 this.update(cx, |this, _cx| {
1640 this.pending_message()
1641 .tool_results
1642 .insert(tool_result.tool_use_id.clone(), tool_result);
1643 })?;
1644 }
1645
1646 this.update(cx, |this, cx| {
1647 this.flush_pending_message(cx);
1648 if this.title.is_none() && this.pending_title_generation.is_none() {
1649 this.generate_title(cx);
1650 }
1651 })?;
1652
1653 if cancelled {
1654 log::debug!("Turn cancelled by user, exiting");
1655 return Ok(());
1656 }
1657
1658 if let Some(error) = error {
1659 attempt += 1;
1660 let retry = this.update(cx, |this, cx| {
1661 let user_store = this.user_store.read(cx);
1662 this.handle_completion_error(error, attempt, user_store.plan())
1663 })??;
1664 let timer = cx.background_executor().timer(retry.duration);
1665 event_stream.send_retry(retry);
1666 timer.await;
1667 this.update(cx, |this, _cx| {
1668 if let Some(Message::Agent(message)) = this.messages.last() {
1669 if message.tool_results.is_empty() {
1670 intent = CompletionIntent::UserPrompt;
1671 this.messages.push(Message::Resume);
1672 }
1673 }
1674 })?;
1675 } else if end_turn {
1676 return Ok(());
1677 } else {
1678 let has_queued = this.update(cx, |this, _| this.has_queued_messages())?;
1679 if has_queued {
1680 log::debug!("Queued message found, ending turn at message boundary");
1681 return Ok(());
1682 }
1683 intent = CompletionIntent::ToolResults;
1684 attempt = 0;
1685 }
1686 }
1687 }
1688
1689 fn handle_completion_error(
1690 &mut self,
1691 error: LanguageModelCompletionError,
1692 attempt: u8,
1693 plan: Option<Plan>,
1694 ) -> Result<acp_thread::RetryStatus> {
1695 let Some(model) = self.model.as_ref() else {
1696 return Err(anyhow!(error));
1697 };
1698
1699 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1700 match plan {
1701 Some(Plan::V2(_)) => true,
1702 None => false,
1703 }
1704 } else {
1705 true
1706 };
1707
1708 if !auto_retry {
1709 return Err(anyhow!(error));
1710 }
1711
1712 let Some(strategy) = Self::retry_strategy_for(&error) else {
1713 return Err(anyhow!(error));
1714 };
1715
1716 let max_attempts = match &strategy {
1717 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1718 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1719 };
1720
1721 if attempt > max_attempts {
1722 return Err(anyhow!(error));
1723 }
1724
1725 let delay = match &strategy {
1726 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1727 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1728 Duration::from_secs(delay_secs)
1729 }
1730 RetryStrategy::Fixed { delay, .. } => *delay,
1731 };
1732 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1733
1734 Ok(acp_thread::RetryStatus {
1735 last_error: error.to_string().into(),
1736 attempt: attempt as usize,
1737 max_attempts: max_attempts as usize,
1738 started_at: Instant::now(),
1739 duration: delay,
1740 })
1741 }
1742
1743 /// A helper method that's called on every streamed completion event.
1744 /// Returns an optional tool result task, which the main agentic loop will
1745 /// send back to the model when it resolves.
1746 fn handle_completion_event(
1747 &mut self,
1748 event: LanguageModelCompletionEvent,
1749 event_stream: &ThreadEventStream,
1750 cancellation_rx: watch::Receiver<bool>,
1751 cx: &mut Context<Self>,
1752 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1753 log::trace!("Handling streamed completion event: {:?}", event);
1754 use LanguageModelCompletionEvent::*;
1755
1756 match event {
1757 StartMessage { .. } => {
1758 self.flush_pending_message(cx);
1759 self.pending_message = Some(AgentMessage::default());
1760 }
1761 Text(new_text) => self.handle_text_event(new_text, event_stream),
1762 Thinking { text, signature } => {
1763 self.handle_thinking_event(text, signature, event_stream)
1764 }
1765 RedactedThinking { data } => self.handle_redacted_thinking_event(data),
1766 ReasoningDetails(details) => {
1767 let last_message = self.pending_message();
1768 // Store the last non-empty reasoning_details (overwrites earlier ones)
1769 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1770 if let serde_json::Value::Array(ref arr) = details {
1771 if !arr.is_empty() {
1772 last_message.reasoning_details = Some(details);
1773 }
1774 } else {
1775 last_message.reasoning_details = Some(details);
1776 }
1777 }
1778 ToolUse(tool_use) => {
1779 return Ok(self.handle_tool_use_event(tool_use, event_stream, cancellation_rx, cx));
1780 }
1781 ToolUseJsonParseError {
1782 id,
1783 tool_name,
1784 raw_input,
1785 json_parse_error,
1786 } => {
1787 return Ok(Some(Task::ready(
1788 self.handle_tool_use_json_parse_error_event(
1789 id,
1790 tool_name,
1791 raw_input,
1792 json_parse_error,
1793 ),
1794 )));
1795 }
1796 UsageUpdate(usage) => {
1797 telemetry::event!(
1798 "Agent Thread Completion Usage Updated",
1799 thread_id = self.id.to_string(),
1800 prompt_id = self.prompt_id.to_string(),
1801 model = self.model.as_ref().map(|m| m.telemetry_id()),
1802 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1803 input_tokens = usage.input_tokens,
1804 output_tokens = usage.output_tokens,
1805 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1806 cache_read_input_tokens = usage.cache_read_input_tokens,
1807 );
1808 self.update_token_usage(usage, cx);
1809 }
1810 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1811 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1812 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1813 Started | Queued { .. } => {}
1814 }
1815
1816 Ok(None)
1817 }
1818
1819 fn handle_text_event(&mut self, new_text: String, event_stream: &ThreadEventStream) {
1820 event_stream.send_text(&new_text);
1821
1822 let last_message = self.pending_message();
1823 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1824 text.push_str(&new_text);
1825 } else {
1826 last_message
1827 .content
1828 .push(AgentMessageContent::Text(new_text));
1829 }
1830 }
1831
1832 fn handle_thinking_event(
1833 &mut self,
1834 new_text: String,
1835 new_signature: Option<String>,
1836 event_stream: &ThreadEventStream,
1837 ) {
1838 event_stream.send_thinking(&new_text);
1839
1840 let last_message = self.pending_message();
1841 if let Some(AgentMessageContent::Thinking { text, signature }) =
1842 last_message.content.last_mut()
1843 {
1844 text.push_str(&new_text);
1845 *signature = new_signature.or(signature.take());
1846 } else {
1847 last_message.content.push(AgentMessageContent::Thinking {
1848 text: new_text,
1849 signature: new_signature,
1850 });
1851 }
1852 }
1853
1854 fn handle_redacted_thinking_event(&mut self, data: String) {
1855 let last_message = self.pending_message();
1856 last_message
1857 .content
1858 .push(AgentMessageContent::RedactedThinking(data));
1859 }
1860
1861 fn handle_tool_use_event(
1862 &mut self,
1863 tool_use: LanguageModelToolUse,
1864 event_stream: &ThreadEventStream,
1865 cancellation_rx: watch::Receiver<bool>,
1866 cx: &mut Context<Self>,
1867 ) -> Option<Task<LanguageModelToolResult>> {
1868 cx.notify();
1869
1870 let tool = self.tool(tool_use.name.as_ref());
1871 let mut title = SharedString::from(&tool_use.name);
1872 let mut kind = acp::ToolKind::Other;
1873 if let Some(tool) = tool.as_ref() {
1874 title = tool.initial_title(tool_use.input.clone(), cx);
1875 kind = tool.kind();
1876 }
1877
1878 // Ensure the last message ends in the current tool use
1879 let last_message = self.pending_message();
1880 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1881 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1882 if last_tool_use.id == tool_use.id {
1883 *last_tool_use = tool_use.clone();
1884 false
1885 } else {
1886 true
1887 }
1888 } else {
1889 true
1890 }
1891 });
1892
1893 if push_new_tool_use {
1894 event_stream.send_tool_call(
1895 &tool_use.id,
1896 &tool_use.name,
1897 title,
1898 kind,
1899 tool_use.input.clone(),
1900 );
1901 last_message
1902 .content
1903 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1904 } else {
1905 event_stream.update_tool_call_fields(
1906 &tool_use.id,
1907 acp::ToolCallUpdateFields::new()
1908 .title(title.as_str())
1909 .kind(kind)
1910 .raw_input(tool_use.input.clone()),
1911 );
1912 }
1913
1914 if !tool_use.is_input_complete {
1915 return None;
1916 }
1917
1918 let Some(tool) = tool else {
1919 let content = format!("No tool named {} exists", tool_use.name);
1920 return Some(Task::ready(LanguageModelToolResult {
1921 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1922 tool_use_id: tool_use.id,
1923 tool_name: tool_use.name,
1924 is_error: true,
1925 output: None,
1926 }));
1927 };
1928
1929 let fs = self.project.read(cx).fs().clone();
1930 let tool_event_stream = ToolCallEventStream::new(
1931 tool_use.id.clone(),
1932 event_stream.clone(),
1933 Some(fs),
1934 cancellation_rx,
1935 );
1936 tool_event_stream.update_fields(
1937 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1938 );
1939 let supports_images = self.model().is_some_and(|model| model.supports_images());
1940 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1941 log::debug!("Running tool {}", tool_use.name);
1942 Some(cx.foreground_executor().spawn(async move {
1943 let tool_result = tool_result.await.and_then(|output| {
1944 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1945 && !supports_images
1946 {
1947 return Err(anyhow!(
1948 "Attempted to read an image, but this model doesn't support it.",
1949 ));
1950 }
1951 Ok(output)
1952 });
1953
1954 match tool_result {
1955 Ok(output) => LanguageModelToolResult {
1956 tool_use_id: tool_use.id,
1957 tool_name: tool_use.name,
1958 is_error: false,
1959 content: output.llm_output,
1960 output: Some(output.raw_output),
1961 },
1962 Err(error) => LanguageModelToolResult {
1963 tool_use_id: tool_use.id,
1964 tool_name: tool_use.name,
1965 is_error: true,
1966 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1967 output: Some(error.to_string().into()),
1968 },
1969 }
1970 }))
1971 }
1972
1973 fn handle_tool_use_json_parse_error_event(
1974 &mut self,
1975 tool_use_id: LanguageModelToolUseId,
1976 tool_name: Arc<str>,
1977 raw_input: Arc<str>,
1978 json_parse_error: String,
1979 ) -> LanguageModelToolResult {
1980 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1981 LanguageModelToolResult {
1982 tool_use_id,
1983 tool_name,
1984 is_error: true,
1985 content: LanguageModelToolResultContent::Text(tool_output.into()),
1986 output: Some(serde_json::Value::String(raw_input.to_string())),
1987 }
1988 }
1989
1990 pub fn title(&self) -> SharedString {
1991 self.title.clone().unwrap_or("New Thread".into())
1992 }
1993
1994 pub fn is_generating_summary(&self) -> bool {
1995 self.pending_summary_generation.is_some()
1996 }
1997
1998 pub fn is_generating_title(&self) -> bool {
1999 self.pending_title_generation.is_some()
2000 }
2001
2002 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
2003 if let Some(summary) = self.summary.as_ref() {
2004 return Task::ready(Some(summary.clone())).shared();
2005 }
2006 if let Some(task) = self.pending_summary_generation.clone() {
2007 return task;
2008 }
2009 let Some(model) = self.summarization_model.clone() else {
2010 log::error!("No summarization model available");
2011 return Task::ready(None).shared();
2012 };
2013 let mut request = LanguageModelRequest {
2014 intent: Some(CompletionIntent::ThreadContextSummarization),
2015 temperature: AgentSettings::temperature_for_model(&model, cx),
2016 ..Default::default()
2017 };
2018
2019 for message in &self.messages {
2020 request.messages.extend(message.to_request());
2021 }
2022
2023 request.messages.push(LanguageModelRequestMessage {
2024 role: Role::User,
2025 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
2026 cache: false,
2027 reasoning_details: None,
2028 });
2029
2030 let task = cx
2031 .spawn(async move |this, cx| {
2032 let mut summary = String::new();
2033 let mut messages = model.stream_completion(request, cx).await.log_err()?;
2034 while let Some(event) = messages.next().await {
2035 let event = event.log_err()?;
2036 let text = match event {
2037 LanguageModelCompletionEvent::Text(text) => text,
2038 _ => continue,
2039 };
2040
2041 let mut lines = text.lines();
2042 summary.extend(lines.next());
2043 }
2044
2045 log::debug!("Setting summary: {}", summary);
2046 let summary = SharedString::from(summary);
2047
2048 this.update(cx, |this, cx| {
2049 this.summary = Some(summary.clone());
2050 this.pending_summary_generation = None;
2051 cx.notify()
2052 })
2053 .ok()?;
2054
2055 Some(summary)
2056 })
2057 .shared();
2058 self.pending_summary_generation = Some(task.clone());
2059 task
2060 }
2061
2062 pub fn generate_title(&mut self, cx: &mut Context<Self>) {
2063 let Some(model) = self.summarization_model.clone() else {
2064 return;
2065 };
2066
2067 log::debug!(
2068 "Generating title with model: {:?}",
2069 self.summarization_model.as_ref().map(|model| model.name())
2070 );
2071 let mut request = LanguageModelRequest {
2072 intent: Some(CompletionIntent::ThreadSummarization),
2073 temperature: AgentSettings::temperature_for_model(&model, cx),
2074 ..Default::default()
2075 };
2076
2077 for message in &self.messages {
2078 request.messages.extend(message.to_request());
2079 }
2080
2081 request.messages.push(LanguageModelRequestMessage {
2082 role: Role::User,
2083 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
2084 cache: false,
2085 reasoning_details: None,
2086 });
2087 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
2088 let mut title = String::new();
2089
2090 let generate = async {
2091 let mut messages = model.stream_completion(request, cx).await?;
2092 while let Some(event) = messages.next().await {
2093 let event = event?;
2094 let text = match event {
2095 LanguageModelCompletionEvent::Text(text) => text,
2096 _ => continue,
2097 };
2098
2099 let mut lines = text.lines();
2100 title.extend(lines.next());
2101
2102 // Stop if the LLM generated multiple lines.
2103 if lines.next().is_some() {
2104 break;
2105 }
2106 }
2107 anyhow::Ok(())
2108 };
2109
2110 if generate.await.context("failed to generate title").is_ok() {
2111 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
2112 }
2113 _ = this.update(cx, |this, _| this.pending_title_generation = None);
2114 }));
2115 }
2116
2117 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
2118 self.pending_title_generation = None;
2119 if Some(&title) != self.title.as_ref() {
2120 self.title = Some(title);
2121 cx.emit(TitleUpdated);
2122 cx.notify();
2123 }
2124 }
2125
2126 fn clear_summary(&mut self) {
2127 self.summary = None;
2128 self.pending_summary_generation = None;
2129 }
2130
2131 fn last_user_message(&self) -> Option<&UserMessage> {
2132 self.messages
2133 .iter()
2134 .rev()
2135 .find_map(|message| match message {
2136 Message::User(user_message) => Some(user_message),
2137 Message::Agent(_) => None,
2138 Message::Resume => None,
2139 })
2140 }
2141
2142 fn pending_message(&mut self) -> &mut AgentMessage {
2143 self.pending_message.get_or_insert_default()
2144 }
2145
2146 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
2147 let Some(mut message) = self.pending_message.take() else {
2148 return;
2149 };
2150
2151 if message.content.is_empty() {
2152 return;
2153 }
2154
2155 for content in &message.content {
2156 let AgentMessageContent::ToolUse(tool_use) = content else {
2157 continue;
2158 };
2159
2160 if !message.tool_results.contains_key(&tool_use.id) {
2161 message.tool_results.insert(
2162 tool_use.id.clone(),
2163 LanguageModelToolResult {
2164 tool_use_id: tool_use.id.clone(),
2165 tool_name: tool_use.name.clone(),
2166 is_error: true,
2167 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
2168 output: None,
2169 },
2170 );
2171 }
2172 }
2173
2174 self.messages.push(Message::Agent(message));
2175 self.updated_at = Utc::now();
2176 self.clear_summary();
2177 cx.notify()
2178 }
2179
2180 pub(crate) fn build_completion_request(
2181 &self,
2182 completion_intent: CompletionIntent,
2183 cx: &App,
2184 ) -> Result<LanguageModelRequest> {
2185 let model = self.model().context("No language model configured")?;
2186 let tools = if let Some(turn) = self.running_turn.as_ref() {
2187 turn.tools
2188 .iter()
2189 .filter_map(|(tool_name, tool)| {
2190 log::trace!("Including tool: {}", tool_name);
2191 Some(LanguageModelRequestTool {
2192 name: tool_name.to_string(),
2193 description: tool.description().to_string(),
2194 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
2195 })
2196 })
2197 .collect::<Vec<_>>()
2198 } else {
2199 Vec::new()
2200 };
2201
2202 log::debug!("Building completion request");
2203 log::debug!("Completion intent: {:?}", completion_intent);
2204
2205 let available_tools: Vec<_> = self
2206 .running_turn
2207 .as_ref()
2208 .map(|turn| turn.tools.keys().cloned().collect())
2209 .unwrap_or_default();
2210
2211 log::debug!("Request includes {} tools", available_tools.len());
2212 let messages = self.build_request_messages(available_tools, cx);
2213 log::debug!("Request will include {} messages", messages.len());
2214
2215 let request = LanguageModelRequest {
2216 thread_id: Some(self.id.to_string()),
2217 prompt_id: Some(self.prompt_id.to_string()),
2218 intent: Some(completion_intent),
2219 messages,
2220 tools,
2221 tool_choice: None,
2222 stop: Vec::new(),
2223 temperature: AgentSettings::temperature_for_model(model, cx),
2224 thinking_allowed: true,
2225 };
2226
2227 log::debug!("Completion request built successfully");
2228 Ok(request)
2229 }
2230
2231 fn enabled_tools(
2232 &self,
2233 profile: &AgentProfileSettings,
2234 model: &Arc<dyn LanguageModel>,
2235 cx: &App,
2236 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
2237 fn truncate(tool_name: &SharedString) -> SharedString {
2238 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
2239 let mut truncated = tool_name.to_string();
2240 truncated.truncate(MAX_TOOL_NAME_LENGTH);
2241 truncated.into()
2242 } else {
2243 tool_name.clone()
2244 }
2245 }
2246
2247 let mut tools = self
2248 .tools
2249 .iter()
2250 .filter_map(|(tool_name, tool)| {
2251 if tool.supports_provider(&model.provider_id())
2252 && profile.is_tool_enabled(tool_name)
2253 {
2254 Some((truncate(tool_name), tool.clone()))
2255 } else {
2256 None
2257 }
2258 })
2259 .collect::<BTreeMap<_, _>>();
2260
2261 let mut context_server_tools = Vec::new();
2262 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
2263 let mut duplicate_tool_names = HashSet::default();
2264 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
2265 for (tool_name, tool) in server_tools {
2266 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
2267 let tool_name = truncate(tool_name);
2268 if !seen_tools.insert(tool_name.clone()) {
2269 duplicate_tool_names.insert(tool_name.clone());
2270 }
2271 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
2272 }
2273 }
2274 }
2275
2276 // When there are duplicate tool names, disambiguate by prefixing them
2277 // with the server ID. In the rare case there isn't enough space for the
2278 // disambiguated tool name, keep only the last tool with this name.
2279 for (server_id, tool_name, tool) in context_server_tools {
2280 if duplicate_tool_names.contains(&tool_name) {
2281 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
2282 if available >= 2 {
2283 let mut disambiguated = server_id.0.to_string();
2284 disambiguated.truncate(available - 1);
2285 disambiguated.push('_');
2286 disambiguated.push_str(&tool_name);
2287 tools.insert(disambiguated.into(), tool.clone());
2288 } else {
2289 tools.insert(tool_name, tool.clone());
2290 }
2291 } else {
2292 tools.insert(tool_name, tool.clone());
2293 }
2294 }
2295
2296 tools
2297 }
2298
2299 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
2300 self.running_turn.as_ref()?.tools.get(name).cloned()
2301 }
2302
2303 pub fn has_tool(&self, name: &str) -> bool {
2304 self.running_turn
2305 .as_ref()
2306 .is_some_and(|turn| turn.tools.contains_key(name))
2307 }
2308
2309 #[cfg(any(test, feature = "test-support"))]
2310 pub fn has_registered_tool(&self, name: &str) -> bool {
2311 self.tools.contains_key(name)
2312 }
2313
2314 pub fn registered_tool_names(&self) -> Vec<SharedString> {
2315 self.tools.keys().cloned().collect()
2316 }
2317
2318 pub fn register_running_subagent(&mut self, subagent: WeakEntity<Thread>) {
2319 self.running_subagents.push(subagent);
2320 }
2321
2322 pub fn unregister_running_subagent(&mut self, subagent: &WeakEntity<Thread>) {
2323 self.running_subagents
2324 .retain(|s| s.entity_id() != subagent.entity_id());
2325 }
2326
2327 pub fn running_subagent_count(&self) -> usize {
2328 self.running_subagents
2329 .iter()
2330 .filter(|s| s.upgrade().is_some())
2331 .count()
2332 }
2333
2334 pub fn is_subagent(&self) -> bool {
2335 self.subagent_context.is_some()
2336 }
2337
2338 pub fn depth(&self) -> u8 {
2339 self.subagent_context.as_ref().map(|c| c.depth).unwrap_or(0)
2340 }
2341
2342 pub fn is_turn_complete(&self) -> bool {
2343 self.running_turn.is_none()
2344 }
2345
2346 pub fn submit_user_message(
2347 &mut self,
2348 content: impl Into<String>,
2349 cx: &mut Context<Self>,
2350 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2351 let content = content.into();
2352 self.messages.push(Message::User(UserMessage {
2353 id: UserMessageId::new(),
2354 content: vec![UserMessageContent::Text(content)],
2355 }));
2356 cx.notify();
2357 self.send_existing(cx)
2358 }
2359
2360 pub fn interrupt_for_summary(
2361 &mut self,
2362 cx: &mut Context<Self>,
2363 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2364 let context = self
2365 .subagent_context
2366 .as_ref()
2367 .context("Not a subagent thread")?;
2368 let prompt = context.context_low_prompt.clone();
2369 self.cancel(cx).detach();
2370 self.submit_user_message(prompt, cx)
2371 }
2372
2373 pub fn request_final_summary(
2374 &mut self,
2375 cx: &mut Context<Self>,
2376 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2377 let context = self
2378 .subagent_context
2379 .as_ref()
2380 .context("Not a subagent thread")?;
2381 let prompt = context.summary_prompt.clone();
2382 self.submit_user_message(prompt, cx)
2383 }
2384
2385 fn build_request_messages(
2386 &self,
2387 available_tools: Vec<SharedString>,
2388 cx: &App,
2389 ) -> Vec<LanguageModelRequestMessage> {
2390 log::trace!(
2391 "Building request messages from {} thread messages",
2392 self.messages.len()
2393 );
2394
2395 let system_prompt = SystemPromptTemplate {
2396 project: self.project_context.read(cx),
2397 available_tools,
2398 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
2399 }
2400 .render(&self.templates)
2401 .context("failed to build system prompt")
2402 .expect("Invalid template");
2403 let mut messages = vec![LanguageModelRequestMessage {
2404 role: Role::System,
2405 content: vec![system_prompt.into()],
2406 cache: false,
2407 reasoning_details: None,
2408 }];
2409 for message in &self.messages {
2410 messages.extend(message.to_request());
2411 }
2412
2413 if let Some(last_message) = messages.last_mut() {
2414 last_message.cache = true;
2415 }
2416
2417 if let Some(message) = self.pending_message.as_ref() {
2418 messages.extend(message.to_request());
2419 }
2420
2421 messages
2422 }
2423
2424 pub fn to_markdown(&self) -> String {
2425 let mut markdown = String::new();
2426 for (ix, message) in self.messages.iter().enumerate() {
2427 if ix > 0 {
2428 markdown.push('\n');
2429 }
2430 markdown.push_str(&message.to_markdown());
2431 }
2432
2433 if let Some(message) = self.pending_message.as_ref() {
2434 markdown.push('\n');
2435 markdown.push_str(&message.to_markdown());
2436 }
2437
2438 markdown
2439 }
2440
2441 fn advance_prompt_id(&mut self) {
2442 self.prompt_id = PromptId::new();
2443 }
2444
2445 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2446 use LanguageModelCompletionError::*;
2447 use http_client::StatusCode;
2448
2449 // General strategy here:
2450 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2451 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2452 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2453 match error {
2454 HttpResponseError {
2455 status_code: StatusCode::TOO_MANY_REQUESTS,
2456 ..
2457 } => Some(RetryStrategy::ExponentialBackoff {
2458 initial_delay: BASE_RETRY_DELAY,
2459 max_attempts: MAX_RETRY_ATTEMPTS,
2460 }),
2461 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2462 Some(RetryStrategy::Fixed {
2463 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2464 max_attempts: MAX_RETRY_ATTEMPTS,
2465 })
2466 }
2467 UpstreamProviderError {
2468 status,
2469 retry_after,
2470 ..
2471 } => match *status {
2472 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2473 Some(RetryStrategy::Fixed {
2474 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2475 max_attempts: MAX_RETRY_ATTEMPTS,
2476 })
2477 }
2478 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2479 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2480 // Internal Server Error could be anything, retry up to 3 times.
2481 max_attempts: 3,
2482 }),
2483 status => {
2484 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2485 // but we frequently get them in practice. See https://http.dev/529
2486 if status.as_u16() == 529 {
2487 Some(RetryStrategy::Fixed {
2488 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2489 max_attempts: MAX_RETRY_ATTEMPTS,
2490 })
2491 } else {
2492 Some(RetryStrategy::Fixed {
2493 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2494 max_attempts: 2,
2495 })
2496 }
2497 }
2498 },
2499 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2500 delay: BASE_RETRY_DELAY,
2501 max_attempts: 3,
2502 }),
2503 ApiReadResponseError { .. }
2504 | HttpSend { .. }
2505 | DeserializeResponse { .. }
2506 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2507 delay: BASE_RETRY_DELAY,
2508 max_attempts: 3,
2509 }),
2510 // Retrying these errors definitely shouldn't help.
2511 HttpResponseError {
2512 status_code:
2513 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2514 ..
2515 }
2516 | AuthenticationError { .. }
2517 | PermissionError { .. }
2518 | NoApiKey { .. }
2519 | ApiEndpointNotFound { .. }
2520 | PromptTooLarge { .. } => None,
2521 // These errors might be transient, so retry them
2522 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2523 delay: BASE_RETRY_DELAY,
2524 max_attempts: 1,
2525 }),
2526 // Retry all other 4xx and 5xx errors once.
2527 HttpResponseError { status_code, .. }
2528 if status_code.is_client_error() || status_code.is_server_error() =>
2529 {
2530 Some(RetryStrategy::Fixed {
2531 delay: BASE_RETRY_DELAY,
2532 max_attempts: 3,
2533 })
2534 }
2535 Other(err) if err.is::<language_model::PaymentRequiredError>() => {
2536 // Retrying won't help for Payment Required errors.
2537 None
2538 }
2539 // Conservatively assume that any other errors are non-retryable
2540 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2541 delay: BASE_RETRY_DELAY,
2542 max_attempts: 2,
2543 }),
2544 }
2545 }
2546}
2547
2548struct RunningTurn {
2549 /// Holds the task that handles agent interaction until the end of the turn.
2550 /// Survives across multiple requests as the model performs tool calls and
2551 /// we run tools, report their results.
2552 _task: Task<()>,
2553 /// The current event stream for the running turn. Used to report a final
2554 /// cancellation event if we cancel the turn.
2555 event_stream: ThreadEventStream,
2556 /// The tools that were enabled for this turn.
2557 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2558 /// Sender to signal tool cancellation. When cancel is called, this is
2559 /// set to true so all tools can detect user-initiated cancellation.
2560 cancellation_tx: watch::Sender<bool>,
2561}
2562
2563impl RunningTurn {
2564 fn cancel(mut self) -> Task<()> {
2565 log::debug!("Cancelling in progress turn");
2566 self.cancellation_tx.send(true).ok();
2567 self.event_stream.send_canceled();
2568 self._task
2569 }
2570}
2571
2572pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2573
2574impl EventEmitter<TokenUsageUpdated> for Thread {}
2575
2576pub struct TitleUpdated;
2577
2578impl EventEmitter<TitleUpdated> for Thread {}
2579
2580pub trait AgentTool
2581where
2582 Self: 'static + Sized,
2583{
2584 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2585 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2586
2587 fn name() -> &'static str;
2588
2589 fn description() -> SharedString {
2590 let schema = schemars::schema_for!(Self::Input);
2591 SharedString::new(
2592 schema
2593 .get("description")
2594 .and_then(|description| description.as_str())
2595 .unwrap_or_default(),
2596 )
2597 }
2598
2599 fn kind() -> acp::ToolKind;
2600
2601 /// The initial tool title to display. Can be updated during the tool run.
2602 fn initial_title(
2603 &self,
2604 input: Result<Self::Input, serde_json::Value>,
2605 cx: &mut App,
2606 ) -> SharedString;
2607
2608 /// Returns the JSON schema that describes the tool's input.
2609 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2610 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2611 }
2612
2613 /// Some tools rely on a provider for the underlying billing or other reasons.
2614 /// Allow the tool to check if they are compatible, or should be filtered out.
2615 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2616 true
2617 }
2618
2619 /// Runs the tool with the provided input.
2620 fn run(
2621 self: Arc<Self>,
2622 input: Self::Input,
2623 event_stream: ToolCallEventStream,
2624 cx: &mut App,
2625 ) -> Task<Result<Self::Output>>;
2626
2627 /// Emits events for a previous execution of the tool.
2628 fn replay(
2629 &self,
2630 _input: Self::Input,
2631 _output: Self::Output,
2632 _event_stream: ToolCallEventStream,
2633 _cx: &mut App,
2634 ) -> Result<()> {
2635 Ok(())
2636 }
2637
2638 fn erase(self) -> Arc<dyn AnyAgentTool> {
2639 Arc::new(Erased(Arc::new(self)))
2640 }
2641}
2642
2643pub struct Erased<T>(T);
2644
2645pub struct AgentToolOutput {
2646 pub llm_output: LanguageModelToolResultContent,
2647 pub raw_output: serde_json::Value,
2648}
2649
2650pub trait AnyAgentTool {
2651 fn name(&self) -> SharedString;
2652 fn description(&self) -> SharedString;
2653 fn kind(&self) -> acp::ToolKind;
2654 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2655 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2656 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2657 true
2658 }
2659 fn run(
2660 self: Arc<Self>,
2661 input: serde_json::Value,
2662 event_stream: ToolCallEventStream,
2663 cx: &mut App,
2664 ) -> Task<Result<AgentToolOutput>>;
2665 fn replay(
2666 &self,
2667 input: serde_json::Value,
2668 output: serde_json::Value,
2669 event_stream: ToolCallEventStream,
2670 cx: &mut App,
2671 ) -> Result<()>;
2672}
2673
2674impl<T> AnyAgentTool for Erased<Arc<T>>
2675where
2676 T: AgentTool,
2677{
2678 fn name(&self) -> SharedString {
2679 T::name().into()
2680 }
2681
2682 fn description(&self) -> SharedString {
2683 T::description()
2684 }
2685
2686 fn kind(&self) -> agent_client_protocol::ToolKind {
2687 T::kind()
2688 }
2689
2690 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2691 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2692 self.0.initial_title(parsed_input, _cx)
2693 }
2694
2695 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2696 let mut json = serde_json::to_value(T::input_schema(format))?;
2697 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2698 Ok(json)
2699 }
2700
2701 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2702 T::supports_provider(provider)
2703 }
2704
2705 fn run(
2706 self: Arc<Self>,
2707 input: serde_json::Value,
2708 event_stream: ToolCallEventStream,
2709 cx: &mut App,
2710 ) -> Task<Result<AgentToolOutput>> {
2711 cx.spawn(async move |cx| {
2712 let input = serde_json::from_value(input)?;
2713 let output = cx
2714 .update(|cx| self.0.clone().run(input, event_stream, cx))
2715 .await?;
2716 let raw_output = serde_json::to_value(&output)?;
2717 Ok(AgentToolOutput {
2718 llm_output: output.into(),
2719 raw_output,
2720 })
2721 })
2722 }
2723
2724 fn replay(
2725 &self,
2726 input: serde_json::Value,
2727 output: serde_json::Value,
2728 event_stream: ToolCallEventStream,
2729 cx: &mut App,
2730 ) -> Result<()> {
2731 let input = serde_json::from_value(input)?;
2732 let output = serde_json::from_value(output)?;
2733 self.0.replay(input, output, event_stream, cx)
2734 }
2735}
2736
2737#[derive(Clone)]
2738struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2739
2740impl ThreadEventStream {
2741 fn send_user_message(&self, message: &UserMessage) {
2742 self.0
2743 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2744 .ok();
2745 }
2746
2747 fn send_text(&self, text: &str) {
2748 self.0
2749 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2750 .ok();
2751 }
2752
2753 fn send_thinking(&self, text: &str) {
2754 self.0
2755 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2756 .ok();
2757 }
2758
2759 fn send_tool_call(
2760 &self,
2761 id: &LanguageModelToolUseId,
2762 tool_name: &str,
2763 title: SharedString,
2764 kind: acp::ToolKind,
2765 input: serde_json::Value,
2766 ) {
2767 self.0
2768 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2769 id,
2770 tool_name,
2771 title.to_string(),
2772 kind,
2773 input,
2774 ))))
2775 .ok();
2776 }
2777
2778 fn initial_tool_call(
2779 id: &LanguageModelToolUseId,
2780 tool_name: &str,
2781 title: String,
2782 kind: acp::ToolKind,
2783 input: serde_json::Value,
2784 ) -> acp::ToolCall {
2785 acp::ToolCall::new(id.to_string(), title)
2786 .kind(kind)
2787 .raw_input(input)
2788 .meta(acp_thread::meta_with_tool_name(tool_name))
2789 }
2790
2791 fn update_tool_call_fields(
2792 &self,
2793 tool_use_id: &LanguageModelToolUseId,
2794 fields: acp::ToolCallUpdateFields,
2795 ) {
2796 self.0
2797 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2798 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2799 )))
2800 .ok();
2801 }
2802
2803 fn send_retry(&self, status: acp_thread::RetryStatus) {
2804 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2805 }
2806
2807 fn send_stop(&self, reason: acp::StopReason) {
2808 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2809 }
2810
2811 fn send_canceled(&self) {
2812 self.0
2813 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2814 .ok();
2815 }
2816
2817 fn send_error(&self, error: impl Into<anyhow::Error>) {
2818 self.0.unbounded_send(Err(error.into())).ok();
2819 }
2820}
2821
2822#[derive(Clone)]
2823pub struct ToolCallEventStream {
2824 tool_use_id: LanguageModelToolUseId,
2825 stream: ThreadEventStream,
2826 fs: Option<Arc<dyn Fs>>,
2827 cancellation_rx: watch::Receiver<bool>,
2828}
2829
2830impl ToolCallEventStream {
2831 #[cfg(any(test, feature = "test-support"))]
2832 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2833 let (stream, receiver, _cancellation_tx) = Self::test_with_cancellation();
2834 (stream, receiver)
2835 }
2836
2837 #[cfg(any(test, feature = "test-support"))]
2838 pub fn test_with_cancellation() -> (Self, ToolCallEventStreamReceiver, watch::Sender<bool>) {
2839 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2840 let (cancellation_tx, cancellation_rx) = watch::channel(false);
2841
2842 let stream = ToolCallEventStream::new(
2843 "test_id".into(),
2844 ThreadEventStream(events_tx),
2845 None,
2846 cancellation_rx,
2847 );
2848
2849 (
2850 stream,
2851 ToolCallEventStreamReceiver(events_rx),
2852 cancellation_tx,
2853 )
2854 }
2855
2856 /// Signal cancellation for this event stream. Only available in tests.
2857 #[cfg(any(test, feature = "test-support"))]
2858 pub fn signal_cancellation_with_sender(cancellation_tx: &mut watch::Sender<bool>) {
2859 cancellation_tx.send(true).ok();
2860 }
2861
2862 fn new(
2863 tool_use_id: LanguageModelToolUseId,
2864 stream: ThreadEventStream,
2865 fs: Option<Arc<dyn Fs>>,
2866 cancellation_rx: watch::Receiver<bool>,
2867 ) -> Self {
2868 Self {
2869 tool_use_id,
2870 stream,
2871 fs,
2872 cancellation_rx,
2873 }
2874 }
2875
2876 /// Returns a future that resolves when the user cancels the tool call.
2877 /// Tools should select on this alongside their main work to detect user cancellation.
2878 pub fn cancelled_by_user(&self) -> impl std::future::Future<Output = ()> + '_ {
2879 let mut rx = self.cancellation_rx.clone();
2880 async move {
2881 loop {
2882 if *rx.borrow() {
2883 return;
2884 }
2885 if rx.changed().await.is_err() {
2886 // Sender dropped, will never be cancelled
2887 std::future::pending::<()>().await;
2888 }
2889 }
2890 }
2891 }
2892
2893 /// Returns true if the user has cancelled this tool call.
2894 /// This is useful for checking cancellation state after an operation completes,
2895 /// to determine if the completion was due to user cancellation.
2896 pub fn was_cancelled_by_user(&self) -> bool {
2897 *self.cancellation_rx.clone().borrow()
2898 }
2899
2900 pub fn tool_use_id(&self) -> &LanguageModelToolUseId {
2901 &self.tool_use_id
2902 }
2903
2904 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2905 self.stream
2906 .update_tool_call_fields(&self.tool_use_id, fields);
2907 }
2908
2909 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2910 self.stream
2911 .0
2912 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2913 acp_thread::ToolCallUpdateDiff {
2914 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2915 diff,
2916 }
2917 .into(),
2918 )))
2919 .ok();
2920 }
2921
2922 pub fn update_subagent_thread(&self, thread: Entity<acp_thread::AcpThread>) {
2923 self.stream
2924 .0
2925 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2926 acp_thread::ToolCallUpdateSubagentThread {
2927 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2928 thread,
2929 }
2930 .into(),
2931 )))
2932 .ok();
2933 }
2934
2935 /// Authorize a third-party tool (e.g., MCP tool from a context server).
2936 ///
2937 /// Unlike built-in tools, third-party tools don't support pattern-based permissions.
2938 /// They only support `default_mode` (allow/deny/confirm) per tool.
2939 ///
2940 /// Shows 3 buttons:
2941 /// - "Always allow <display_name> MCP tool" → sets `tools.<tool_id>.default_mode = "allow"`
2942 /// - "Allow" → approve once
2943 /// - "Deny" → reject once
2944 pub fn authorize_third_party_tool(
2945 &self,
2946 title: impl Into<String>,
2947 tool_id: String,
2948 display_name: String,
2949 cx: &mut App,
2950 ) -> Task<Result<()>> {
2951 let settings = agent_settings::AgentSettings::get_global(cx);
2952
2953 let decision = decide_permission_from_settings(&tool_id, "", &settings);
2954
2955 match decision {
2956 ToolPermissionDecision::Allow => return Task::ready(Ok(())),
2957 ToolPermissionDecision::Deny(reason) => return Task::ready(Err(anyhow!(reason))),
2958 ToolPermissionDecision::Confirm => {}
2959 }
2960
2961 let (response_tx, response_rx) = oneshot::channel();
2962 self.stream
2963 .0
2964 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2965 ToolCallAuthorization {
2966 tool_call: acp::ToolCallUpdate::new(
2967 self.tool_use_id.to_string(),
2968 acp::ToolCallUpdateFields::new().title(title.into()),
2969 ),
2970 options: vec![
2971 acp::PermissionOption::new(
2972 acp::PermissionOptionId::new(format!("always_allow_mcp:{}", tool_id)),
2973 format!("Always allow {} MCP tool", display_name),
2974 acp::PermissionOptionKind::AllowAlways,
2975 ),
2976 acp::PermissionOption::new(
2977 acp::PermissionOptionId::new("allow"),
2978 "Allow once",
2979 acp::PermissionOptionKind::AllowOnce,
2980 ),
2981 acp::PermissionOption::new(
2982 acp::PermissionOptionId::new("deny"),
2983 "Deny",
2984 acp::PermissionOptionKind::RejectOnce,
2985 ),
2986 ],
2987 response: response_tx,
2988 context: None,
2989 },
2990 )))
2991 .ok();
2992
2993 let fs = self.fs.clone();
2994 cx.spawn(async move |cx| {
2995 let response_str = response_rx.await?.0.to_string();
2996
2997 if response_str == format!("always_allow_mcp:{}", tool_id) {
2998 if let Some(fs) = fs.clone() {
2999 cx.update(|cx| {
3000 update_settings_file(fs, cx, move |settings, _| {
3001 settings
3002 .agent
3003 .get_or_insert_default()
3004 .set_tool_default_mode(&tool_id, ToolPermissionMode::Allow);
3005 });
3006 });
3007 }
3008 return Ok(());
3009 }
3010
3011 if response_str == "allow" {
3012 return Ok(());
3013 }
3014
3015 Err(anyhow!("Permission to run tool denied by user"))
3016 })
3017 }
3018
3019 pub fn authorize(
3020 &self,
3021 title: impl Into<String>,
3022 context: ToolPermissionContext,
3023 cx: &mut App,
3024 ) -> Task<Result<()>> {
3025 use settings::ToolPermissionMode;
3026
3027 let options = context.build_permission_options();
3028
3029 let (response_tx, response_rx) = oneshot::channel();
3030 self.stream
3031 .0
3032 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
3033 ToolCallAuthorization {
3034 tool_call: acp::ToolCallUpdate::new(
3035 self.tool_use_id.to_string(),
3036 acp::ToolCallUpdateFields::new().title(title.into()),
3037 ),
3038 options,
3039 response: response_tx,
3040 context: Some(context),
3041 },
3042 )))
3043 .ok();
3044
3045 let fs = self.fs.clone();
3046 cx.spawn(async move |cx| {
3047 let response_str = response_rx.await?.0.to_string();
3048
3049 // Handle "always allow tool" - e.g., "always_allow:terminal"
3050 if let Some(tool) = response_str.strip_prefix("always_allow:") {
3051 if let Some(fs) = fs.clone() {
3052 let tool = tool.to_string();
3053 cx.update(|cx| {
3054 update_settings_file(fs, cx, move |settings, _| {
3055 settings
3056 .agent
3057 .get_or_insert_default()
3058 .set_tool_default_mode(&tool, ToolPermissionMode::Allow);
3059 });
3060 });
3061 }
3062 return Ok(());
3063 }
3064
3065 // Handle "always deny tool" - e.g., "always_deny:terminal"
3066 if let Some(tool) = response_str.strip_prefix("always_deny:") {
3067 if let Some(fs) = fs.clone() {
3068 let tool = tool.to_string();
3069 cx.update(|cx| {
3070 update_settings_file(fs, cx, move |settings, _| {
3071 settings
3072 .agent
3073 .get_or_insert_default()
3074 .set_tool_default_mode(&tool, ToolPermissionMode::Deny);
3075 });
3076 });
3077 }
3078 return Err(anyhow!("Permission to run tool denied by user"));
3079 }
3080
3081 // Handle "always allow pattern" - e.g., "always_allow_pattern:terminal:^cargo\s"
3082 if response_str.starts_with("always_allow_pattern:") {
3083 let parts: Vec<&str> = response_str.splitn(3, ':').collect();
3084 if parts.len() == 3 {
3085 let pattern_tool_name = parts[1].to_string();
3086 let pattern = parts[2].to_string();
3087 if let Some(fs) = fs.clone() {
3088 cx.update(|cx| {
3089 update_settings_file(fs, cx, move |settings, _| {
3090 settings
3091 .agent
3092 .get_or_insert_default()
3093 .add_tool_allow_pattern(&pattern_tool_name, pattern);
3094 });
3095 });
3096 }
3097 }
3098 return Ok(());
3099 }
3100
3101 // Handle "always deny pattern" - e.g., "always_deny_pattern:terminal:^cargo\s"
3102 if response_str.starts_with("always_deny_pattern:") {
3103 let parts: Vec<&str> = response_str.splitn(3, ':').collect();
3104 if parts.len() == 3 {
3105 let pattern_tool_name = parts[1].to_string();
3106 let pattern = parts[2].to_string();
3107 if let Some(fs) = fs.clone() {
3108 cx.update(|cx| {
3109 update_settings_file(fs, cx, move |settings, _| {
3110 settings
3111 .agent
3112 .get_or_insert_default()
3113 .add_tool_deny_pattern(&pattern_tool_name, pattern);
3114 });
3115 });
3116 }
3117 }
3118 return Err(anyhow!("Permission to run tool denied by user"));
3119 }
3120
3121 // Handle simple "allow" (allow once)
3122 if response_str == "allow" {
3123 return Ok(());
3124 }
3125
3126 // Handle simple "deny" (deny once)
3127 Err(anyhow!("Permission to run tool denied by user"))
3128 })
3129 }
3130}
3131
3132#[cfg(any(test, feature = "test-support"))]
3133pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
3134
3135#[cfg(any(test, feature = "test-support"))]
3136impl ToolCallEventStreamReceiver {
3137 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
3138 let event = self.0.next().await;
3139 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
3140 auth
3141 } else {
3142 panic!("Expected ToolCallAuthorization but got: {:?}", event);
3143 }
3144 }
3145
3146 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
3147 let event = self.0.next().await;
3148 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
3149 update,
3150 )))) = event
3151 {
3152 update.fields
3153 } else {
3154 panic!("Expected update fields but got: {:?}", event);
3155 }
3156 }
3157
3158 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
3159 let event = self.0.next().await;
3160 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
3161 update,
3162 )))) = event
3163 {
3164 update.diff
3165 } else {
3166 panic!("Expected diff but got: {:?}", event);
3167 }
3168 }
3169
3170 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
3171 let event = self.0.next().await;
3172 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
3173 update,
3174 )))) = event
3175 {
3176 update.terminal
3177 } else {
3178 panic!("Expected terminal but got: {:?}", event);
3179 }
3180 }
3181}
3182
3183#[cfg(any(test, feature = "test-support"))]
3184impl std::ops::Deref for ToolCallEventStreamReceiver {
3185 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
3186
3187 fn deref(&self) -> &Self::Target {
3188 &self.0
3189 }
3190}
3191
3192#[cfg(any(test, feature = "test-support"))]
3193impl std::ops::DerefMut for ToolCallEventStreamReceiver {
3194 fn deref_mut(&mut self) -> &mut Self::Target {
3195 &mut self.0
3196 }
3197}
3198
3199impl From<&str> for UserMessageContent {
3200 fn from(text: &str) -> Self {
3201 Self::Text(text.into())
3202 }
3203}
3204
3205impl UserMessageContent {
3206 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
3207 match value {
3208 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
3209 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
3210 acp::ContentBlock::Audio(_) => {
3211 // TODO
3212 Self::Text("[audio]".to_string())
3213 }
3214 acp::ContentBlock::ResourceLink(resource_link) => {
3215 match MentionUri::parse(&resource_link.uri, path_style) {
3216 Ok(uri) => Self::Mention {
3217 uri,
3218 content: String::new(),
3219 },
3220 Err(err) => {
3221 log::error!("Failed to parse mention link: {}", err);
3222 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
3223 }
3224 }
3225 }
3226 acp::ContentBlock::Resource(resource) => match resource.resource {
3227 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
3228 match MentionUri::parse(&resource.uri, path_style) {
3229 Ok(uri) => Self::Mention {
3230 uri,
3231 content: resource.text,
3232 },
3233 Err(err) => {
3234 log::error!("Failed to parse mention link: {}", err);
3235 Self::Text(
3236 MarkdownCodeBlock {
3237 tag: &resource.uri,
3238 text: &resource.text,
3239 }
3240 .to_string(),
3241 )
3242 }
3243 }
3244 }
3245 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
3246 // TODO
3247 Self::Text("[blob]".to_string())
3248 }
3249 other => {
3250 log::warn!("Unexpected content type: {:?}", other);
3251 Self::Text("[unknown]".to_string())
3252 }
3253 },
3254 other => {
3255 log::warn!("Unexpected content type: {:?}", other);
3256 Self::Text("[unknown]".to_string())
3257 }
3258 }
3259 }
3260}
3261
3262impl From<UserMessageContent> for acp::ContentBlock {
3263 fn from(content: UserMessageContent) -> Self {
3264 match content {
3265 UserMessageContent::Text(text) => text.into(),
3266 UserMessageContent::Image(image) => {
3267 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
3268 }
3269 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
3270 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
3271 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
3272 )),
3273 ),
3274 }
3275 }
3276}
3277
3278fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
3279 LanguageModelImage {
3280 source: image_content.data.into(),
3281 size: None,
3282 }
3283}