1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 RestoreFileFromDiskTool, SaveFileTool, SubagentTool, SystemPromptTemplate, Template, Templates,
6 TerminalTool, ThinkingTool, ToolPermissionDecision, WebSearchTool,
7 decide_permission_from_settings,
8};
9use acp_thread::{MentionUri, UserMessageId};
10use action_log::ActionLog;
11use feature_flags::{FeatureFlagAppExt as _, SubagentsFeatureFlag};
12
13use agent_client_protocol as acp;
14use agent_settings::{
15 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
16 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
17};
18use anyhow::{Context as _, Result, anyhow};
19use chrono::{DateTime, Utc};
20use client::{ModelRequestUsage, RequestUsage, UserStore};
21use cloud_llm_client::{CompletionIntent, Plan, UsageLimit};
22use collections::{HashMap, HashSet, IndexMap};
23use fs::Fs;
24use futures::stream;
25use futures::{
26 FutureExt,
27 channel::{mpsc, oneshot},
28 future::Shared,
29 stream::FuturesUnordered,
30};
31use gpui::{
32 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
33};
34use language_model::{
35 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
36 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
37 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
38 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
39 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
40 ZED_CLOUD_PROVIDER_ID,
41};
42use project::Project;
43use prompt_store::ProjectContext;
44use schemars::{JsonSchema, Schema};
45use serde::{Deserialize, Serialize};
46use settings::{LanguageModelSelection, Settings, ToolPermissionMode, update_settings_file};
47use smol::stream::StreamExt;
48use std::{
49 collections::BTreeMap,
50 ops::RangeInclusive,
51 path::Path,
52 rc::Rc,
53 sync::Arc,
54 time::{Duration, Instant},
55};
56use std::{fmt::Write, path::PathBuf};
57use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
58use uuid::Uuid;
59
60const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
61pub const MAX_TOOL_NAME_LENGTH: usize = 64;
62pub const MAX_SUBAGENT_DEPTH: u8 = 4;
63pub const MAX_PARALLEL_SUBAGENTS: usize = 8;
64
65/// Context passed to a subagent thread for lifecycle management
66#[derive(Clone)]
67pub struct SubagentContext {
68 /// ID of the parent thread
69 pub parent_thread_id: acp::SessionId,
70
71 /// ID of the tool call that spawned this subagent
72 pub tool_use_id: LanguageModelToolUseId,
73
74 /// Current depth level (0 = root agent, 1 = first-level subagent, etc.)
75 pub depth: u8,
76
77 /// Prompt to send when subagent completes successfully
78 pub summary_prompt: String,
79
80 /// Prompt to send when context is running low (≤25% remaining)
81 pub context_low_prompt: String,
82}
83
84/// The ID of the user prompt that initiated a request.
85///
86/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
87#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
88pub struct PromptId(Arc<str>);
89
90impl PromptId {
91 pub fn new() -> Self {
92 Self(Uuid::new_v4().to_string().into())
93 }
94}
95
96impl std::fmt::Display for PromptId {
97 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
98 write!(f, "{}", self.0)
99 }
100}
101
102pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
103pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
104
105#[derive(Debug, Clone)]
106enum RetryStrategy {
107 ExponentialBackoff {
108 initial_delay: Duration,
109 max_attempts: u8,
110 },
111 Fixed {
112 delay: Duration,
113 max_attempts: u8,
114 },
115}
116
117#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
118pub enum Message {
119 User(UserMessage),
120 Agent(AgentMessage),
121 Resume,
122}
123
124impl Message {
125 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
126 match self {
127 Message::Agent(agent_message) => Some(agent_message),
128 _ => None,
129 }
130 }
131
132 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
133 match self {
134 Message::User(message) => {
135 if message.content.is_empty() {
136 vec![]
137 } else {
138 vec![message.to_request()]
139 }
140 }
141 Message::Agent(message) => message.to_request(),
142 Message::Resume => vec![LanguageModelRequestMessage {
143 role: Role::User,
144 content: vec!["Continue where you left off".into()],
145 cache: false,
146 reasoning_details: None,
147 }],
148 }
149 }
150
151 pub fn to_markdown(&self) -> String {
152 match self {
153 Message::User(message) => message.to_markdown(),
154 Message::Agent(message) => message.to_markdown(),
155 Message::Resume => "[resume]\n".into(),
156 }
157 }
158
159 pub fn role(&self) -> Role {
160 match self {
161 Message::User(_) | Message::Resume => Role::User,
162 Message::Agent(_) => Role::Assistant,
163 }
164 }
165}
166
167#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
168pub struct UserMessage {
169 pub id: UserMessageId,
170 pub content: Vec<UserMessageContent>,
171}
172
173#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
174pub enum UserMessageContent {
175 Text(String),
176 Mention { uri: MentionUri, content: String },
177 Image(LanguageModelImage),
178}
179
180impl UserMessage {
181 pub fn to_markdown(&self) -> String {
182 let mut markdown = String::from("## User\n\n");
183
184 for content in &self.content {
185 match content {
186 UserMessageContent::Text(text) => {
187 markdown.push_str(text);
188 markdown.push('\n');
189 }
190 UserMessageContent::Image(_) => {
191 markdown.push_str("<image />\n");
192 }
193 UserMessageContent::Mention { uri, content } => {
194 if !content.is_empty() {
195 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
196 } else {
197 let _ = writeln!(&mut markdown, "{}", uri.as_link());
198 }
199 }
200 }
201 }
202
203 markdown
204 }
205
206 fn to_request(&self) -> LanguageModelRequestMessage {
207 let mut message = LanguageModelRequestMessage {
208 role: Role::User,
209 content: Vec::with_capacity(self.content.len()),
210 cache: false,
211 reasoning_details: None,
212 };
213
214 const OPEN_CONTEXT: &str = "<context>\n\
215 The following items were attached by the user. \
216 They are up-to-date and don't need to be re-read.\n\n";
217
218 const OPEN_FILES_TAG: &str = "<files>";
219 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
220 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
221 const OPEN_SELECTIONS_TAG: &str = "<selections>";
222 const OPEN_THREADS_TAG: &str = "<threads>";
223 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
224 const OPEN_RULES_TAG: &str =
225 "<rules>\nThe user has specified the following rules that should be applied:\n";
226
227 let mut file_context = OPEN_FILES_TAG.to_string();
228 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
229 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
230 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
231 let mut thread_context = OPEN_THREADS_TAG.to_string();
232 let mut fetch_context = OPEN_FETCH_TAG.to_string();
233 let mut rules_context = OPEN_RULES_TAG.to_string();
234
235 for chunk in &self.content {
236 let chunk = match chunk {
237 UserMessageContent::Text(text) => {
238 language_model::MessageContent::Text(text.clone())
239 }
240 UserMessageContent::Image(value) => {
241 language_model::MessageContent::Image(value.clone())
242 }
243 UserMessageContent::Mention { uri, content } => {
244 match uri {
245 MentionUri::File { abs_path } => {
246 write!(
247 &mut file_context,
248 "\n{}",
249 MarkdownCodeBlock {
250 tag: &codeblock_tag(abs_path, None),
251 text: &content.to_string(),
252 }
253 )
254 .ok();
255 }
256 MentionUri::PastedImage => {
257 debug_panic!("pasted image URI should not be used in mention content")
258 }
259 MentionUri::Directory { .. } => {
260 write!(&mut directory_context, "\n{}\n", content).ok();
261 }
262 MentionUri::Symbol {
263 abs_path: path,
264 line_range,
265 ..
266 } => {
267 write!(
268 &mut symbol_context,
269 "\n{}",
270 MarkdownCodeBlock {
271 tag: &codeblock_tag(path, Some(line_range)),
272 text: content
273 }
274 )
275 .ok();
276 }
277 MentionUri::Selection {
278 abs_path: path,
279 line_range,
280 ..
281 } => {
282 write!(
283 &mut selection_context,
284 "\n{}",
285 MarkdownCodeBlock {
286 tag: &codeblock_tag(
287 path.as_deref().unwrap_or("Untitled".as_ref()),
288 Some(line_range)
289 ),
290 text: content
291 }
292 )
293 .ok();
294 }
295 MentionUri::Thread { .. } => {
296 write!(&mut thread_context, "\n{}\n", content).ok();
297 }
298 MentionUri::TextThread { .. } => {
299 write!(&mut thread_context, "\n{}\n", content).ok();
300 }
301 MentionUri::Rule { .. } => {
302 write!(
303 &mut rules_context,
304 "\n{}",
305 MarkdownCodeBlock {
306 tag: "",
307 text: content
308 }
309 )
310 .ok();
311 }
312 MentionUri::Fetch { url } => {
313 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
314 }
315 }
316
317 language_model::MessageContent::Text(uri.as_link().to_string())
318 }
319 };
320
321 message.content.push(chunk);
322 }
323
324 let len_before_context = message.content.len();
325
326 if file_context.len() > OPEN_FILES_TAG.len() {
327 file_context.push_str("</files>\n");
328 message
329 .content
330 .push(language_model::MessageContent::Text(file_context));
331 }
332
333 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
334 directory_context.push_str("</directories>\n");
335 message
336 .content
337 .push(language_model::MessageContent::Text(directory_context));
338 }
339
340 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
341 symbol_context.push_str("</symbols>\n");
342 message
343 .content
344 .push(language_model::MessageContent::Text(symbol_context));
345 }
346
347 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
348 selection_context.push_str("</selections>\n");
349 message
350 .content
351 .push(language_model::MessageContent::Text(selection_context));
352 }
353
354 if thread_context.len() > OPEN_THREADS_TAG.len() {
355 thread_context.push_str("</threads>\n");
356 message
357 .content
358 .push(language_model::MessageContent::Text(thread_context));
359 }
360
361 if fetch_context.len() > OPEN_FETCH_TAG.len() {
362 fetch_context.push_str("</fetched_urls>\n");
363 message
364 .content
365 .push(language_model::MessageContent::Text(fetch_context));
366 }
367
368 if rules_context.len() > OPEN_RULES_TAG.len() {
369 rules_context.push_str("</user_rules>\n");
370 message
371 .content
372 .push(language_model::MessageContent::Text(rules_context));
373 }
374
375 if message.content.len() > len_before_context {
376 message.content.insert(
377 len_before_context,
378 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
379 );
380 message
381 .content
382 .push(language_model::MessageContent::Text("</context>".into()));
383 }
384
385 message
386 }
387}
388
389fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
390 let mut result = String::new();
391
392 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
393 let _ = write!(result, "{} ", extension);
394 }
395
396 let _ = write!(result, "{}", full_path.display());
397
398 if let Some(range) = line_range {
399 if range.start() == range.end() {
400 let _ = write!(result, ":{}", range.start() + 1);
401 } else {
402 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
403 }
404 }
405
406 result
407}
408
409impl AgentMessage {
410 pub fn to_markdown(&self) -> String {
411 let mut markdown = String::from("## Assistant\n\n");
412
413 for content in &self.content {
414 match content {
415 AgentMessageContent::Text(text) => {
416 markdown.push_str(text);
417 markdown.push('\n');
418 }
419 AgentMessageContent::Thinking { text, .. } => {
420 markdown.push_str("<think>");
421 markdown.push_str(text);
422 markdown.push_str("</think>\n");
423 }
424 AgentMessageContent::RedactedThinking(_) => {
425 markdown.push_str("<redacted_thinking />\n")
426 }
427 AgentMessageContent::ToolUse(tool_use) => {
428 markdown.push_str(&format!(
429 "**Tool Use**: {} (ID: {})\n",
430 tool_use.name, tool_use.id
431 ));
432 markdown.push_str(&format!(
433 "{}\n",
434 MarkdownCodeBlock {
435 tag: "json",
436 text: &format!("{:#}", tool_use.input)
437 }
438 ));
439 }
440 }
441 }
442
443 for tool_result in self.tool_results.values() {
444 markdown.push_str(&format!(
445 "**Tool Result**: {} (ID: {})\n\n",
446 tool_result.tool_name, tool_result.tool_use_id
447 ));
448 if tool_result.is_error {
449 markdown.push_str("**ERROR:**\n");
450 }
451
452 match &tool_result.content {
453 LanguageModelToolResultContent::Text(text) => {
454 writeln!(markdown, "{text}\n").ok();
455 }
456 LanguageModelToolResultContent::Image(_) => {
457 writeln!(markdown, "<image />\n").ok();
458 }
459 }
460
461 if let Some(output) = tool_result.output.as_ref() {
462 writeln!(
463 markdown,
464 "**Debug Output**:\n\n```json\n{}\n```\n",
465 serde_json::to_string_pretty(output).unwrap()
466 )
467 .unwrap();
468 }
469 }
470
471 markdown
472 }
473
474 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
475 let mut assistant_message = LanguageModelRequestMessage {
476 role: Role::Assistant,
477 content: Vec::with_capacity(self.content.len()),
478 cache: false,
479 reasoning_details: self.reasoning_details.clone(),
480 };
481 for chunk in &self.content {
482 match chunk {
483 AgentMessageContent::Text(text) => {
484 assistant_message
485 .content
486 .push(language_model::MessageContent::Text(text.clone()));
487 }
488 AgentMessageContent::Thinking { text, signature } => {
489 assistant_message
490 .content
491 .push(language_model::MessageContent::Thinking {
492 text: text.clone(),
493 signature: signature.clone(),
494 });
495 }
496 AgentMessageContent::RedactedThinking(value) => {
497 assistant_message.content.push(
498 language_model::MessageContent::RedactedThinking(value.clone()),
499 );
500 }
501 AgentMessageContent::ToolUse(tool_use) => {
502 if self.tool_results.contains_key(&tool_use.id) {
503 assistant_message
504 .content
505 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
506 }
507 }
508 };
509 }
510
511 let mut user_message = LanguageModelRequestMessage {
512 role: Role::User,
513 content: Vec::new(),
514 cache: false,
515 reasoning_details: None,
516 };
517
518 for tool_result in self.tool_results.values() {
519 let mut tool_result = tool_result.clone();
520 // Surprisingly, the API fails if we return an empty string here.
521 // It thinks we are sending a tool use without a tool result.
522 if tool_result.content.is_empty() {
523 tool_result.content = "<Tool returned an empty string>".into();
524 }
525 user_message
526 .content
527 .push(language_model::MessageContent::ToolResult(tool_result));
528 }
529
530 let mut messages = Vec::new();
531 if !assistant_message.content.is_empty() {
532 messages.push(assistant_message);
533 }
534 if !user_message.content.is_empty() {
535 messages.push(user_message);
536 }
537 messages
538 }
539}
540
541#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
542pub struct AgentMessage {
543 pub content: Vec<AgentMessageContent>,
544 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
545 pub reasoning_details: Option<serde_json::Value>,
546}
547
548#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
549pub enum AgentMessageContent {
550 Text(String),
551 Thinking {
552 text: String,
553 signature: Option<String>,
554 },
555 RedactedThinking(String),
556 ToolUse(LanguageModelToolUse),
557}
558
559pub trait TerminalHandle {
560 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
561 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
562 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
563 fn kill(&self, cx: &AsyncApp) -> Result<()>;
564 fn was_stopped_by_user(&self, cx: &AsyncApp) -> Result<bool>;
565}
566
567pub trait ThreadEnvironment {
568 fn create_terminal(
569 &self,
570 command: String,
571 cwd: Option<PathBuf>,
572 output_byte_limit: Option<u64>,
573 cx: &mut AsyncApp,
574 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
575}
576
577#[derive(Debug)]
578pub enum ThreadEvent {
579 UserMessage(UserMessage),
580 AgentText(String),
581 AgentThinking(String),
582 ToolCall(acp::ToolCall),
583 ToolCallUpdate(acp_thread::ToolCallUpdate),
584 ToolCallAuthorization(ToolCallAuthorization),
585 Retry(acp_thread::RetryStatus),
586 Stop(acp::StopReason),
587}
588
589#[derive(Debug)]
590pub struct NewTerminal {
591 pub command: String,
592 pub output_byte_limit: Option<u64>,
593 pub cwd: Option<PathBuf>,
594 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
595}
596
597#[derive(Debug, Clone)]
598pub struct ToolPermissionContext {
599 pub tool_name: String,
600 pub input_value: String,
601}
602
603impl ToolPermissionContext {
604 pub fn new(tool_name: impl Into<String>, input_value: impl Into<String>) -> Self {
605 Self {
606 tool_name: tool_name.into(),
607 input_value: input_value.into(),
608 }
609 }
610
611 /// Builds the permission options for this tool context.
612 ///
613 /// This is the canonical source for permission option generation.
614 /// Tests should use this function rather than manually constructing options.
615 pub fn build_permission_options(&self) -> Vec<acp::PermissionOption> {
616 use crate::pattern_extraction::*;
617
618 let tool_name = &self.tool_name;
619 let input_value = &self.input_value;
620
621 let (pattern, pattern_display) = match tool_name.as_str() {
622 "terminal" => (
623 extract_terminal_pattern(input_value),
624 extract_terminal_pattern_display(input_value),
625 ),
626 "edit_file" | "delete_path" | "move_path" | "create_directory" | "save_file" => (
627 extract_path_pattern(input_value),
628 extract_path_pattern_display(input_value),
629 ),
630 "fetch" => (
631 extract_url_pattern(input_value),
632 extract_url_pattern_display(input_value),
633 ),
634 _ => (None, None),
635 };
636
637 let mut options = vec![acp::PermissionOption::new(
638 acp::PermissionOptionId::new(format!("always:{}", tool_name)),
639 format!("Always for {}", tool_name.replace('_', " ")),
640 acp::PermissionOptionKind::AllowAlways,
641 )];
642
643 if let (Some(pattern), Some(display)) = (pattern, pattern_display) {
644 let button_text = match tool_name.as_str() {
645 "terminal" => format!("Always for `{}` commands", display),
646 "fetch" => format!("Always for `{}`", display),
647 _ => format!("Always for `{}`", display),
648 };
649 options.push(acp::PermissionOption::new(
650 acp::PermissionOptionId::new(format!("always_pattern:{}:{}", tool_name, pattern)),
651 button_text,
652 acp::PermissionOptionKind::AllowAlways,
653 ));
654 }
655
656 options.push(acp::PermissionOption::new(
657 acp::PermissionOptionId::new("once"),
658 "Only this time",
659 acp::PermissionOptionKind::AllowOnce,
660 ));
661
662 options
663 }
664}
665
666#[derive(Debug)]
667pub struct ToolCallAuthorization {
668 pub tool_call: acp::ToolCallUpdate,
669 pub options: Vec<acp::PermissionOption>,
670 pub response: oneshot::Sender<acp::PermissionOptionId>,
671 pub context: Option<ToolPermissionContext>,
672}
673
674#[derive(Debug, thiserror::Error)]
675enum CompletionError {
676 #[error("max tokens")]
677 MaxTokens,
678 #[error("refusal")]
679 Refusal,
680 #[error(transparent)]
681 Other(#[from] anyhow::Error),
682}
683
684pub struct Thread {
685 id: acp::SessionId,
686 prompt_id: PromptId,
687 updated_at: DateTime<Utc>,
688 title: Option<SharedString>,
689 pending_title_generation: Option<Task<()>>,
690 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
691 summary: Option<SharedString>,
692 messages: Vec<Message>,
693 user_store: Entity<UserStore>,
694 completion_mode: CompletionMode,
695 /// Holds the task that handles agent interaction until the end of the turn.
696 /// Survives across multiple requests as the model performs tool calls and
697 /// we run tools, report their results.
698 running_turn: Option<RunningTurn>,
699 pending_message: Option<AgentMessage>,
700 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
701 tool_use_limit_reached: bool,
702 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
703 #[allow(unused)]
704 cumulative_token_usage: TokenUsage,
705 #[allow(unused)]
706 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
707 context_server_registry: Entity<ContextServerRegistry>,
708 profile_id: AgentProfileId,
709 project_context: Entity<ProjectContext>,
710 templates: Arc<Templates>,
711 model: Option<Arc<dyn LanguageModel>>,
712 summarization_model: Option<Arc<dyn LanguageModel>>,
713 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
714 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
715 pub(crate) project: Entity<Project>,
716 pub(crate) action_log: Entity<ActionLog>,
717 /// Tracks the last time files were read by the agent, to detect external modifications
718 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
719 /// True if this thread was imported from a shared thread and can be synced.
720 imported: bool,
721 /// If this is a subagent thread, contains context about the parent
722 subagent_context: Option<SubagentContext>,
723 /// Weak references to running subagent threads for cancellation propagation
724 running_subagents: Vec<WeakEntity<Thread>>,
725}
726
727impl Thread {
728 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
729 let image = model.map_or(true, |model| model.supports_images());
730 acp::PromptCapabilities::new()
731 .image(image)
732 .embedded_context(true)
733 }
734
735 pub fn new(
736 project: Entity<Project>,
737 project_context: Entity<ProjectContext>,
738 context_server_registry: Entity<ContextServerRegistry>,
739 templates: Arc<Templates>,
740 model: Option<Arc<dyn LanguageModel>>,
741 cx: &mut Context<Self>,
742 ) -> Self {
743 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
744 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
745 let (prompt_capabilities_tx, prompt_capabilities_rx) =
746 watch::channel(Self::prompt_capabilities(model.as_deref()));
747 Self {
748 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
749 prompt_id: PromptId::new(),
750 updated_at: Utc::now(),
751 title: None,
752 pending_title_generation: None,
753 pending_summary_generation: None,
754 summary: None,
755 messages: Vec::new(),
756 user_store: project.read(cx).user_store(),
757 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
758 running_turn: None,
759 pending_message: None,
760 tools: BTreeMap::default(),
761 tool_use_limit_reached: false,
762 request_token_usage: HashMap::default(),
763 cumulative_token_usage: TokenUsage::default(),
764 initial_project_snapshot: {
765 let project_snapshot = Self::project_snapshot(project.clone(), cx);
766 cx.foreground_executor()
767 .spawn(async move { Some(project_snapshot.await) })
768 .shared()
769 },
770 context_server_registry,
771 profile_id,
772 project_context,
773 templates,
774 model,
775 summarization_model: None,
776 prompt_capabilities_tx,
777 prompt_capabilities_rx,
778 project,
779 action_log,
780 file_read_times: HashMap::default(),
781 imported: false,
782 subagent_context: None,
783 running_subagents: Vec::new(),
784 }
785 }
786
787 pub fn new_subagent(
788 project: Entity<Project>,
789 project_context: Entity<ProjectContext>,
790 context_server_registry: Entity<ContextServerRegistry>,
791 templates: Arc<Templates>,
792 model: Arc<dyn LanguageModel>,
793 subagent_context: SubagentContext,
794 parent_tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
795 cx: &mut Context<Self>,
796 ) -> Self {
797 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
798 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
799 let (prompt_capabilities_tx, prompt_capabilities_rx) =
800 watch::channel(Self::prompt_capabilities(Some(model.as_ref())));
801 Self {
802 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
803 prompt_id: PromptId::new(),
804 updated_at: Utc::now(),
805 title: None,
806 pending_title_generation: None,
807 pending_summary_generation: None,
808 summary: None,
809 messages: Vec::new(),
810 user_store: project.read(cx).user_store(),
811 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
812 running_turn: None,
813 pending_message: None,
814 tools: parent_tools,
815 tool_use_limit_reached: false,
816 request_token_usage: HashMap::default(),
817 cumulative_token_usage: TokenUsage::default(),
818 initial_project_snapshot: Task::ready(None).shared(),
819 context_server_registry,
820 profile_id,
821 project_context,
822 templates,
823 model: Some(model),
824 summarization_model: None,
825 prompt_capabilities_tx,
826 prompt_capabilities_rx,
827 project,
828 action_log,
829 file_read_times: HashMap::default(),
830 imported: false,
831 subagent_context: Some(subagent_context),
832 running_subagents: Vec::new(),
833 }
834 }
835
836 pub fn id(&self) -> &acp::SessionId {
837 &self.id
838 }
839
840 /// Returns true if this thread was imported from a shared thread.
841 pub fn is_imported(&self) -> bool {
842 self.imported
843 }
844
845 pub fn replay(
846 &mut self,
847 cx: &mut Context<Self>,
848 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
849 let (tx, rx) = mpsc::unbounded();
850 let stream = ThreadEventStream(tx);
851 for message in &self.messages {
852 match message {
853 Message::User(user_message) => stream.send_user_message(user_message),
854 Message::Agent(assistant_message) => {
855 for content in &assistant_message.content {
856 match content {
857 AgentMessageContent::Text(text) => stream.send_text(text),
858 AgentMessageContent::Thinking { text, .. } => {
859 stream.send_thinking(text)
860 }
861 AgentMessageContent::RedactedThinking(_) => {}
862 AgentMessageContent::ToolUse(tool_use) => {
863 self.replay_tool_call(
864 tool_use,
865 assistant_message.tool_results.get(&tool_use.id),
866 &stream,
867 cx,
868 );
869 }
870 }
871 }
872 }
873 Message::Resume => {}
874 }
875 }
876 rx
877 }
878
879 fn replay_tool_call(
880 &self,
881 tool_use: &LanguageModelToolUse,
882 tool_result: Option<&LanguageModelToolResult>,
883 stream: &ThreadEventStream,
884 cx: &mut Context<Self>,
885 ) {
886 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
887 self.context_server_registry
888 .read(cx)
889 .servers()
890 .find_map(|(_, tools)| {
891 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
892 Some(tool.clone())
893 } else {
894 None
895 }
896 })
897 });
898
899 let Some(tool) = tool else {
900 stream
901 .0
902 .unbounded_send(Ok(ThreadEvent::ToolCall(
903 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
904 .status(acp::ToolCallStatus::Failed)
905 .raw_input(tool_use.input.clone()),
906 )))
907 .ok();
908 return;
909 };
910
911 let title = tool.initial_title(tool_use.input.clone(), cx);
912 let kind = tool.kind();
913 stream.send_tool_call(
914 &tool_use.id,
915 &tool_use.name,
916 title,
917 kind,
918 tool_use.input.clone(),
919 );
920
921 let output = tool_result
922 .as_ref()
923 .and_then(|result| result.output.clone());
924 if let Some(output) = output.clone() {
925 // For replay, we use a dummy cancellation receiver since the tool already completed
926 let (_cancellation_tx, cancellation_rx) = watch::channel(false);
927 let tool_event_stream = ToolCallEventStream::new(
928 tool_use.id.clone(),
929 stream.clone(),
930 Some(self.project.read(cx).fs().clone()),
931 cancellation_rx,
932 );
933 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
934 .log_err();
935 }
936
937 stream.update_tool_call_fields(
938 &tool_use.id,
939 acp::ToolCallUpdateFields::new()
940 .status(
941 tool_result
942 .as_ref()
943 .map_or(acp::ToolCallStatus::Failed, |result| {
944 if result.is_error {
945 acp::ToolCallStatus::Failed
946 } else {
947 acp::ToolCallStatus::Completed
948 }
949 }),
950 )
951 .raw_output(output),
952 );
953 }
954
955 pub fn from_db(
956 id: acp::SessionId,
957 db_thread: DbThread,
958 project: Entity<Project>,
959 project_context: Entity<ProjectContext>,
960 context_server_registry: Entity<ContextServerRegistry>,
961 templates: Arc<Templates>,
962 cx: &mut Context<Self>,
963 ) -> Self {
964 let profile_id = db_thread
965 .profile
966 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
967
968 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
969 db_thread
970 .model
971 .and_then(|model| {
972 let model = SelectedModel {
973 provider: model.provider.clone().into(),
974 model: model.model.into(),
975 };
976 registry.select_model(&model, cx)
977 })
978 .or_else(|| registry.default_model())
979 .map(|model| model.model)
980 });
981
982 if model.is_none() {
983 model = Self::resolve_profile_model(&profile_id, cx);
984 }
985 if model.is_none() {
986 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
987 registry.default_model().map(|model| model.model)
988 });
989 }
990
991 let (prompt_capabilities_tx, prompt_capabilities_rx) =
992 watch::channel(Self::prompt_capabilities(model.as_deref()));
993
994 let action_log = cx.new(|_| ActionLog::new(project.clone()));
995
996 Self {
997 id,
998 prompt_id: PromptId::new(),
999 title: if db_thread.title.is_empty() {
1000 None
1001 } else {
1002 Some(db_thread.title.clone())
1003 },
1004 pending_title_generation: None,
1005 pending_summary_generation: None,
1006 summary: db_thread.detailed_summary,
1007 messages: db_thread.messages,
1008 user_store: project.read(cx).user_store(),
1009 completion_mode: db_thread.completion_mode.unwrap_or_default(),
1010 running_turn: None,
1011 pending_message: None,
1012 tools: BTreeMap::default(),
1013 tool_use_limit_reached: false,
1014 request_token_usage: db_thread.request_token_usage.clone(),
1015 cumulative_token_usage: db_thread.cumulative_token_usage,
1016 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
1017 context_server_registry,
1018 profile_id,
1019 project_context,
1020 templates,
1021 model,
1022 summarization_model: None,
1023 project,
1024 action_log,
1025 updated_at: db_thread.updated_at,
1026 prompt_capabilities_tx,
1027 prompt_capabilities_rx,
1028 file_read_times: HashMap::default(),
1029 imported: db_thread.imported,
1030 subagent_context: None,
1031 running_subagents: Vec::new(),
1032 }
1033 }
1034
1035 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
1036 let initial_project_snapshot = self.initial_project_snapshot.clone();
1037 let mut thread = DbThread {
1038 title: self.title(),
1039 messages: self.messages.clone(),
1040 updated_at: self.updated_at,
1041 detailed_summary: self.summary.clone(),
1042 initial_project_snapshot: None,
1043 cumulative_token_usage: self.cumulative_token_usage,
1044 request_token_usage: self.request_token_usage.clone(),
1045 model: self.model.as_ref().map(|model| DbLanguageModel {
1046 provider: model.provider_id().to_string(),
1047 model: model.name().0.to_string(),
1048 }),
1049 completion_mode: Some(self.completion_mode),
1050 profile: Some(self.profile_id.clone()),
1051 imported: self.imported,
1052 };
1053
1054 cx.background_spawn(async move {
1055 let initial_project_snapshot = initial_project_snapshot.await;
1056 thread.initial_project_snapshot = initial_project_snapshot;
1057 thread
1058 })
1059 }
1060
1061 /// Create a snapshot of the current project state including git information and unsaved buffers.
1062 fn project_snapshot(
1063 project: Entity<Project>,
1064 cx: &mut Context<Self>,
1065 ) -> Task<Arc<ProjectSnapshot>> {
1066 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
1067 cx.spawn(async move |_, _| {
1068 let snapshot = task.await;
1069
1070 Arc::new(ProjectSnapshot {
1071 worktree_snapshots: snapshot.worktree_snapshots,
1072 timestamp: Utc::now(),
1073 })
1074 })
1075 }
1076
1077 pub fn project_context(&self) -> &Entity<ProjectContext> {
1078 &self.project_context
1079 }
1080
1081 pub fn project(&self) -> &Entity<Project> {
1082 &self.project
1083 }
1084
1085 pub fn action_log(&self) -> &Entity<ActionLog> {
1086 &self.action_log
1087 }
1088
1089 pub fn is_empty(&self) -> bool {
1090 self.messages.is_empty() && self.title.is_none()
1091 }
1092
1093 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
1094 self.model.as_ref()
1095 }
1096
1097 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
1098 let old_usage = self.latest_token_usage();
1099 self.model = Some(model);
1100 let new_caps = Self::prompt_capabilities(self.model.as_deref());
1101 let new_usage = self.latest_token_usage();
1102 if old_usage != new_usage {
1103 cx.emit(TokenUsageUpdated(new_usage));
1104 }
1105 self.prompt_capabilities_tx.send(new_caps).log_err();
1106 cx.notify()
1107 }
1108
1109 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
1110 self.summarization_model.as_ref()
1111 }
1112
1113 pub fn set_summarization_model(
1114 &mut self,
1115 model: Option<Arc<dyn LanguageModel>>,
1116 cx: &mut Context<Self>,
1117 ) {
1118 self.summarization_model = model;
1119 cx.notify()
1120 }
1121
1122 pub fn completion_mode(&self) -> CompletionMode {
1123 self.completion_mode
1124 }
1125
1126 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
1127 let old_usage = self.latest_token_usage();
1128 self.completion_mode = mode;
1129 let new_usage = self.latest_token_usage();
1130 if old_usage != new_usage {
1131 cx.emit(TokenUsageUpdated(new_usage));
1132 }
1133 cx.notify()
1134 }
1135
1136 pub fn last_message(&self) -> Option<Message> {
1137 if let Some(message) = self.pending_message.clone() {
1138 Some(Message::Agent(message))
1139 } else {
1140 self.messages.last().cloned()
1141 }
1142 }
1143
1144 pub fn add_default_tools(
1145 &mut self,
1146 environment: Rc<dyn ThreadEnvironment>,
1147 cx: &mut Context<Self>,
1148 ) {
1149 let language_registry = self.project.read(cx).languages().clone();
1150 self.add_tool(CopyPathTool::new(self.project.clone()));
1151 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
1152 self.add_tool(DeletePathTool::new(
1153 self.project.clone(),
1154 self.action_log.clone(),
1155 ));
1156 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1157 self.add_tool(EditFileTool::new(
1158 self.project.clone(),
1159 cx.weak_entity(),
1160 language_registry,
1161 Templates::new(),
1162 ));
1163 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1164 self.add_tool(FindPathTool::new(self.project.clone()));
1165 self.add_tool(GrepTool::new(self.project.clone()));
1166 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1167 self.add_tool(MovePathTool::new(self.project.clone()));
1168 self.add_tool(NowTool);
1169 self.add_tool(OpenTool::new(self.project.clone()));
1170 self.add_tool(ReadFileTool::new(
1171 cx.weak_entity(),
1172 self.project.clone(),
1173 self.action_log.clone(),
1174 ));
1175 self.add_tool(SaveFileTool::new(self.project.clone()));
1176 self.add_tool(RestoreFileFromDiskTool::new(self.project.clone()));
1177 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1178 self.add_tool(ThinkingTool);
1179 self.add_tool(WebSearchTool);
1180
1181 if cx.has_flag::<SubagentsFeatureFlag>() && self.depth() < MAX_SUBAGENT_DEPTH {
1182 let parent_tools = self.tools.clone();
1183 self.add_tool(SubagentTool::new(
1184 cx.weak_entity(),
1185 self.project.clone(),
1186 self.project_context.clone(),
1187 self.context_server_registry.clone(),
1188 self.templates.clone(),
1189 self.depth(),
1190 parent_tools,
1191 ));
1192 }
1193 }
1194
1195 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1196 self.tools.insert(T::name().into(), tool.erase());
1197 }
1198
1199 pub fn remove_tool(&mut self, name: &str) -> bool {
1200 self.tools.remove(name).is_some()
1201 }
1202
1203 pub fn restrict_tools(&mut self, allowed: &collections::HashSet<SharedString>) {
1204 self.tools.retain(|name, _| allowed.contains(name));
1205 }
1206
1207 pub fn profile(&self) -> &AgentProfileId {
1208 &self.profile_id
1209 }
1210
1211 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1212 if self.profile_id == profile_id {
1213 return;
1214 }
1215
1216 self.profile_id = profile_id;
1217
1218 // Swap to the profile's preferred model when available.
1219 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1220 self.set_model(model, cx);
1221 }
1222 }
1223
1224 pub fn cancel(&mut self, cx: &mut Context<Self>) -> Task<()> {
1225 for subagent in self.running_subagents.drain(..) {
1226 if let Some(subagent) = subagent.upgrade() {
1227 subagent.update(cx, |thread, cx| thread.cancel(cx)).detach();
1228 }
1229 }
1230
1231 let Some(running_turn) = self.running_turn.take() else {
1232 self.flush_pending_message(cx);
1233 return Task::ready(());
1234 };
1235
1236 let turn_task = running_turn.cancel();
1237
1238 cx.spawn(async move |this, cx| {
1239 turn_task.await;
1240 this.update(cx, |this, cx| {
1241 this.flush_pending_message(cx);
1242 })
1243 .ok();
1244 })
1245 }
1246
1247 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1248 let Some(last_user_message) = self.last_user_message() else {
1249 return;
1250 };
1251
1252 self.request_token_usage
1253 .insert(last_user_message.id.clone(), update);
1254 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1255 cx.notify();
1256 }
1257
1258 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1259 self.cancel(cx).detach();
1260 // Clear pending message since cancel will try to flush it asynchronously,
1261 // and we don't want that content to be added after we truncate
1262 self.pending_message.take();
1263 let Some(position) = self.messages.iter().position(
1264 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1265 ) else {
1266 return Err(anyhow!("Message not found"));
1267 };
1268
1269 for message in self.messages.drain(position..) {
1270 match message {
1271 Message::User(message) => {
1272 self.request_token_usage.remove(&message.id);
1273 }
1274 Message::Agent(_) | Message::Resume => {}
1275 }
1276 }
1277 self.clear_summary();
1278 cx.notify();
1279 Ok(())
1280 }
1281
1282 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1283 let last_user_message = self.last_user_message()?;
1284 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1285 Some(*tokens)
1286 }
1287
1288 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1289 let usage = self.latest_request_token_usage()?;
1290 let model = self.model.clone()?;
1291 Some(acp_thread::TokenUsage {
1292 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1293 used_tokens: usage.total_tokens(),
1294 input_tokens: usage.input_tokens,
1295 output_tokens: usage.output_tokens,
1296 })
1297 }
1298
1299 /// Get the total input token count as of the message before the given message.
1300 ///
1301 /// Returns `None` if:
1302 /// - `target_id` is the first message (no previous message)
1303 /// - The previous message hasn't received a response yet (no usage data)
1304 /// - `target_id` is not found in the messages
1305 pub fn tokens_before_message(&self, target_id: &UserMessageId) -> Option<u64> {
1306 let mut previous_user_message_id: Option<&UserMessageId> = None;
1307
1308 for message in &self.messages {
1309 if let Message::User(user_msg) = message {
1310 if &user_msg.id == target_id {
1311 let prev_id = previous_user_message_id?;
1312 let usage = self.request_token_usage.get(prev_id)?;
1313 return Some(usage.input_tokens);
1314 }
1315 previous_user_message_id = Some(&user_msg.id);
1316 }
1317 }
1318 None
1319 }
1320
1321 /// Look up the active profile and resolve its preferred model if one is configured.
1322 fn resolve_profile_model(
1323 profile_id: &AgentProfileId,
1324 cx: &mut Context<Self>,
1325 ) -> Option<Arc<dyn LanguageModel>> {
1326 let selection = AgentSettings::get_global(cx)
1327 .profiles
1328 .get(profile_id)?
1329 .default_model
1330 .clone()?;
1331 Self::resolve_model_from_selection(&selection, cx)
1332 }
1333
1334 /// Translate a stored model selection into the configured model from the registry.
1335 fn resolve_model_from_selection(
1336 selection: &LanguageModelSelection,
1337 cx: &mut Context<Self>,
1338 ) -> Option<Arc<dyn LanguageModel>> {
1339 let selected = SelectedModel {
1340 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1341 model: LanguageModelId::from(selection.model.clone()),
1342 };
1343 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1344 registry
1345 .select_model(&selected, cx)
1346 .map(|configured| configured.model)
1347 })
1348 }
1349
1350 pub fn resume(
1351 &mut self,
1352 cx: &mut Context<Self>,
1353 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1354 self.messages.push(Message::Resume);
1355 cx.notify();
1356
1357 log::debug!("Total messages in thread: {}", self.messages.len());
1358 self.run_turn(cx)
1359 }
1360
1361 /// Sending a message results in the model streaming a response, which could include tool calls.
1362 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1363 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1364 pub fn send<T>(
1365 &mut self,
1366 id: UserMessageId,
1367 content: impl IntoIterator<Item = T>,
1368 cx: &mut Context<Self>,
1369 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1370 where
1371 T: Into<UserMessageContent>,
1372 {
1373 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1374 log::debug!("Thread::send content: {:?}", content);
1375
1376 self.messages
1377 .push(Message::User(UserMessage { id, content }));
1378 cx.notify();
1379
1380 self.send_existing(cx)
1381 }
1382
1383 pub fn send_existing(
1384 &mut self,
1385 cx: &mut Context<Self>,
1386 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1387 let model = self.model().context("No language model configured")?;
1388
1389 log::info!("Thread::send called with model: {}", model.name().0);
1390 self.advance_prompt_id();
1391
1392 log::debug!("Total messages in thread: {}", self.messages.len());
1393 self.run_turn(cx)
1394 }
1395
1396 pub fn push_acp_user_block(
1397 &mut self,
1398 id: UserMessageId,
1399 blocks: impl IntoIterator<Item = acp::ContentBlock>,
1400 path_style: PathStyle,
1401 cx: &mut Context<Self>,
1402 ) {
1403 let content = blocks
1404 .into_iter()
1405 .map(|block| UserMessageContent::from_content_block(block, path_style))
1406 .collect::<Vec<_>>();
1407 self.messages
1408 .push(Message::User(UserMessage { id, content }));
1409 cx.notify();
1410 }
1411
1412 pub fn push_acp_agent_block(&mut self, block: acp::ContentBlock, cx: &mut Context<Self>) {
1413 let text = match block {
1414 acp::ContentBlock::Text(text_content) => text_content.text,
1415 acp::ContentBlock::Image(_) => "[image]".to_string(),
1416 acp::ContentBlock::Audio(_) => "[audio]".to_string(),
1417 acp::ContentBlock::ResourceLink(resource_link) => resource_link.uri,
1418 acp::ContentBlock::Resource(resource) => match resource.resource {
1419 acp::EmbeddedResourceResource::TextResourceContents(resource) => resource.uri,
1420 acp::EmbeddedResourceResource::BlobResourceContents(resource) => resource.uri,
1421 _ => "[resource]".to_string(),
1422 },
1423 _ => "[unknown]".to_string(),
1424 };
1425
1426 self.messages.push(Message::Agent(AgentMessage {
1427 content: vec![AgentMessageContent::Text(text)],
1428 ..Default::default()
1429 }));
1430 cx.notify();
1431 }
1432
1433 #[cfg(feature = "eval")]
1434 pub fn proceed(
1435 &mut self,
1436 cx: &mut Context<Self>,
1437 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1438 self.run_turn(cx)
1439 }
1440
1441 fn run_turn(
1442 &mut self,
1443 cx: &mut Context<Self>,
1444 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1445 // Flush the old pending message synchronously before cancelling,
1446 // to avoid a race where the detached cancel task might flush the NEW
1447 // turn's pending message instead of the old one.
1448 self.flush_pending_message(cx);
1449 self.cancel(cx).detach();
1450
1451 let model = self.model.clone().context("No language model configured")?;
1452 let profile = AgentSettings::get_global(cx)
1453 .profiles
1454 .get(&self.profile_id)
1455 .context("Profile not found")?;
1456 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1457 let event_stream = ThreadEventStream(events_tx);
1458 let message_ix = self.messages.len().saturating_sub(1);
1459 self.tool_use_limit_reached = false;
1460 self.clear_summary();
1461 let (cancellation_tx, mut cancellation_rx) = watch::channel(false);
1462 self.running_turn = Some(RunningTurn {
1463 event_stream: event_stream.clone(),
1464 tools: self.enabled_tools(profile, &model, cx),
1465 cancellation_tx,
1466 _task: cx.spawn(async move |this, cx| {
1467 log::debug!("Starting agent turn execution");
1468
1469 let turn_result = Self::run_turn_internal(
1470 &this,
1471 model,
1472 &event_stream,
1473 cancellation_rx.clone(),
1474 cx,
1475 )
1476 .await;
1477
1478 // Check if we were cancelled - if so, cancel() already took running_turn
1479 // and we shouldn't touch it (it might be a NEW turn now)
1480 let was_cancelled = *cancellation_rx.borrow();
1481 if was_cancelled {
1482 log::debug!("Turn was cancelled, skipping cleanup");
1483 return;
1484 }
1485
1486 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1487
1488 match turn_result {
1489 Ok(()) => {
1490 log::debug!("Turn execution completed");
1491 event_stream.send_stop(acp::StopReason::EndTurn);
1492 }
1493 Err(error) => {
1494 log::error!("Turn execution failed: {:?}", error);
1495 match error.downcast::<CompletionError>() {
1496 Ok(CompletionError::Refusal) => {
1497 event_stream.send_stop(acp::StopReason::Refusal);
1498 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1499 }
1500 Ok(CompletionError::MaxTokens) => {
1501 event_stream.send_stop(acp::StopReason::MaxTokens);
1502 }
1503 Ok(CompletionError::Other(error)) | Err(error) => {
1504 event_stream.send_error(error);
1505 }
1506 }
1507 }
1508 }
1509
1510 _ = this.update(cx, |this, _| this.running_turn.take());
1511 }),
1512 });
1513 Ok(events_rx)
1514 }
1515
1516 async fn run_turn_internal(
1517 this: &WeakEntity<Self>,
1518 model: Arc<dyn LanguageModel>,
1519 event_stream: &ThreadEventStream,
1520 mut cancellation_rx: watch::Receiver<bool>,
1521 cx: &mut AsyncApp,
1522 ) -> Result<()> {
1523 let mut attempt = 0;
1524 let mut intent = CompletionIntent::UserPrompt;
1525 loop {
1526 let request =
1527 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1528
1529 telemetry::event!(
1530 "Agent Thread Completion",
1531 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1532 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1533 model = model.telemetry_id(),
1534 model_provider = model.provider_id().to_string(),
1535 attempt
1536 );
1537
1538 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1539
1540 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1541 Ok(events) => (events, None),
1542 Err(err) => (stream::empty().boxed(), Some(err)),
1543 };
1544 let mut tool_results = FuturesUnordered::new();
1545 let mut cancelled = false;
1546 loop {
1547 // Race between getting the first event and cancellation
1548 let first_event = futures::select! {
1549 event = events.next().fuse() => event,
1550 _ = cancellation_rx.changed().fuse() => {
1551 if *cancellation_rx.borrow() {
1552 cancelled = true;
1553 break;
1554 }
1555 continue;
1556 }
1557 };
1558 let Some(first_event) = first_event else {
1559 break;
1560 };
1561
1562 // Collect all immediately available events to process as a batch
1563 let mut batch = vec![first_event];
1564 while let Some(event) = events.next().now_or_never().flatten() {
1565 batch.push(event);
1566 }
1567
1568 // Process the batch in a single update
1569 let batch_result = this.update(cx, |this, cx| {
1570 let mut batch_tool_results = Vec::new();
1571 let mut batch_error = None;
1572
1573 for event in batch {
1574 log::trace!("Received completion event: {:?}", event);
1575 match event {
1576 Ok(event) => {
1577 match this.handle_completion_event(
1578 event,
1579 event_stream,
1580 cancellation_rx.clone(),
1581 cx,
1582 ) {
1583 Ok(Some(task)) => batch_tool_results.push(task),
1584 Ok(None) => {}
1585 Err(err) => {
1586 batch_error = Some(err);
1587 break;
1588 }
1589 }
1590 }
1591 Err(err) => {
1592 batch_error = Some(err.into());
1593 break;
1594 }
1595 }
1596 }
1597
1598 cx.notify();
1599 (batch_tool_results, batch_error)
1600 })?;
1601
1602 tool_results.extend(batch_result.0);
1603 if let Some(err) = batch_result.1 {
1604 error = Some(err.downcast()?);
1605 break;
1606 }
1607 }
1608
1609 let end_turn = tool_results.is_empty();
1610 while let Some(tool_result) = tool_results.next().await {
1611 log::debug!("Tool finished {:?}", tool_result);
1612
1613 event_stream.update_tool_call_fields(
1614 &tool_result.tool_use_id,
1615 acp::ToolCallUpdateFields::new()
1616 .status(if tool_result.is_error {
1617 acp::ToolCallStatus::Failed
1618 } else {
1619 acp::ToolCallStatus::Completed
1620 })
1621 .raw_output(tool_result.output.clone()),
1622 );
1623 this.update(cx, |this, _cx| {
1624 this.pending_message()
1625 .tool_results
1626 .insert(tool_result.tool_use_id.clone(), tool_result);
1627 })?;
1628 }
1629
1630 this.update(cx, |this, cx| {
1631 this.flush_pending_message(cx);
1632 if this.title.is_none() && this.pending_title_generation.is_none() {
1633 this.generate_title(cx);
1634 }
1635 })?;
1636
1637 if cancelled {
1638 log::debug!("Turn cancelled by user, exiting");
1639 return Ok(());
1640 }
1641
1642 if let Some(error) = error {
1643 attempt += 1;
1644 let retry = this.update(cx, |this, cx| {
1645 let user_store = this.user_store.read(cx);
1646 this.handle_completion_error(error, attempt, user_store.plan())
1647 })??;
1648 let timer = cx.background_executor().timer(retry.duration);
1649 event_stream.send_retry(retry);
1650 timer.await;
1651 this.update(cx, |this, _cx| {
1652 if let Some(Message::Agent(message)) = this.messages.last() {
1653 if message.tool_results.is_empty() {
1654 intent = CompletionIntent::UserPrompt;
1655 this.messages.push(Message::Resume);
1656 }
1657 }
1658 })?;
1659 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1660 return Err(language_model::ToolUseLimitReachedError.into());
1661 } else if end_turn {
1662 return Ok(());
1663 } else {
1664 intent = CompletionIntent::ToolResults;
1665 attempt = 0;
1666 }
1667 }
1668 }
1669
1670 fn handle_completion_error(
1671 &mut self,
1672 error: LanguageModelCompletionError,
1673 attempt: u8,
1674 plan: Option<Plan>,
1675 ) -> Result<acp_thread::RetryStatus> {
1676 let Some(model) = self.model.as_ref() else {
1677 return Err(anyhow!(error));
1678 };
1679
1680 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1681 match plan {
1682 Some(Plan::V2(_)) => true,
1683 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1684 None => false,
1685 }
1686 } else {
1687 true
1688 };
1689
1690 if !auto_retry {
1691 return Err(anyhow!(error));
1692 }
1693
1694 let Some(strategy) = Self::retry_strategy_for(&error) else {
1695 return Err(anyhow!(error));
1696 };
1697
1698 let max_attempts = match &strategy {
1699 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1700 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1701 };
1702
1703 if attempt > max_attempts {
1704 return Err(anyhow!(error));
1705 }
1706
1707 let delay = match &strategy {
1708 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1709 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1710 Duration::from_secs(delay_secs)
1711 }
1712 RetryStrategy::Fixed { delay, .. } => *delay,
1713 };
1714 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1715
1716 Ok(acp_thread::RetryStatus {
1717 last_error: error.to_string().into(),
1718 attempt: attempt as usize,
1719 max_attempts: max_attempts as usize,
1720 started_at: Instant::now(),
1721 duration: delay,
1722 })
1723 }
1724
1725 /// A helper method that's called on every streamed completion event.
1726 /// Returns an optional tool result task, which the main agentic loop will
1727 /// send back to the model when it resolves.
1728 fn handle_completion_event(
1729 &mut self,
1730 event: LanguageModelCompletionEvent,
1731 event_stream: &ThreadEventStream,
1732 cancellation_rx: watch::Receiver<bool>,
1733 cx: &mut Context<Self>,
1734 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1735 log::trace!("Handling streamed completion event: {:?}", event);
1736 use LanguageModelCompletionEvent::*;
1737
1738 match event {
1739 StartMessage { .. } => {
1740 self.flush_pending_message(cx);
1741 self.pending_message = Some(AgentMessage::default());
1742 }
1743 Text(new_text) => self.handle_text_event(new_text, event_stream),
1744 Thinking { text, signature } => {
1745 self.handle_thinking_event(text, signature, event_stream)
1746 }
1747 RedactedThinking { data } => self.handle_redacted_thinking_event(data),
1748 ReasoningDetails(details) => {
1749 let last_message = self.pending_message();
1750 // Store the last non-empty reasoning_details (overwrites earlier ones)
1751 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1752 if let serde_json::Value::Array(ref arr) = details {
1753 if !arr.is_empty() {
1754 last_message.reasoning_details = Some(details);
1755 }
1756 } else {
1757 last_message.reasoning_details = Some(details);
1758 }
1759 }
1760 ToolUse(tool_use) => {
1761 return Ok(self.handle_tool_use_event(tool_use, event_stream, cancellation_rx, cx));
1762 }
1763 ToolUseJsonParseError {
1764 id,
1765 tool_name,
1766 raw_input,
1767 json_parse_error,
1768 } => {
1769 return Ok(Some(Task::ready(
1770 self.handle_tool_use_json_parse_error_event(
1771 id,
1772 tool_name,
1773 raw_input,
1774 json_parse_error,
1775 ),
1776 )));
1777 }
1778 UsageUpdate(usage) => {
1779 telemetry::event!(
1780 "Agent Thread Completion Usage Updated",
1781 thread_id = self.id.to_string(),
1782 prompt_id = self.prompt_id.to_string(),
1783 model = self.model.as_ref().map(|m| m.telemetry_id()),
1784 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1785 input_tokens = usage.input_tokens,
1786 output_tokens = usage.output_tokens,
1787 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1788 cache_read_input_tokens = usage.cache_read_input_tokens,
1789 );
1790 self.update_token_usage(usage, cx);
1791 }
1792 UsageUpdated { amount, limit } => {
1793 self.update_model_request_usage(amount, limit, cx);
1794 }
1795 ToolUseLimitReached => {
1796 self.tool_use_limit_reached = true;
1797 }
1798 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1799 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1800 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1801 Started | Queued { .. } => {}
1802 }
1803
1804 Ok(None)
1805 }
1806
1807 fn handle_text_event(&mut self, new_text: String, event_stream: &ThreadEventStream) {
1808 event_stream.send_text(&new_text);
1809
1810 let last_message = self.pending_message();
1811 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1812 text.push_str(&new_text);
1813 } else {
1814 last_message
1815 .content
1816 .push(AgentMessageContent::Text(new_text));
1817 }
1818 }
1819
1820 fn handle_thinking_event(
1821 &mut self,
1822 new_text: String,
1823 new_signature: Option<String>,
1824 event_stream: &ThreadEventStream,
1825 ) {
1826 event_stream.send_thinking(&new_text);
1827
1828 let last_message = self.pending_message();
1829 if let Some(AgentMessageContent::Thinking { text, signature }) =
1830 last_message.content.last_mut()
1831 {
1832 text.push_str(&new_text);
1833 *signature = new_signature.or(signature.take());
1834 } else {
1835 last_message.content.push(AgentMessageContent::Thinking {
1836 text: new_text,
1837 signature: new_signature,
1838 });
1839 }
1840 }
1841
1842 fn handle_redacted_thinking_event(&mut self, data: String) {
1843 let last_message = self.pending_message();
1844 last_message
1845 .content
1846 .push(AgentMessageContent::RedactedThinking(data));
1847 }
1848
1849 fn handle_tool_use_event(
1850 &mut self,
1851 tool_use: LanguageModelToolUse,
1852 event_stream: &ThreadEventStream,
1853 cancellation_rx: watch::Receiver<bool>,
1854 cx: &mut Context<Self>,
1855 ) -> Option<Task<LanguageModelToolResult>> {
1856 cx.notify();
1857
1858 let tool = self.tool(tool_use.name.as_ref());
1859 let mut title = SharedString::from(&tool_use.name);
1860 let mut kind = acp::ToolKind::Other;
1861 if let Some(tool) = tool.as_ref() {
1862 title = tool.initial_title(tool_use.input.clone(), cx);
1863 kind = tool.kind();
1864 }
1865
1866 // Ensure the last message ends in the current tool use
1867 let last_message = self.pending_message();
1868 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1869 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1870 if last_tool_use.id == tool_use.id {
1871 *last_tool_use = tool_use.clone();
1872 false
1873 } else {
1874 true
1875 }
1876 } else {
1877 true
1878 }
1879 });
1880
1881 if push_new_tool_use {
1882 event_stream.send_tool_call(
1883 &tool_use.id,
1884 &tool_use.name,
1885 title,
1886 kind,
1887 tool_use.input.clone(),
1888 );
1889 last_message
1890 .content
1891 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1892 } else {
1893 event_stream.update_tool_call_fields(
1894 &tool_use.id,
1895 acp::ToolCallUpdateFields::new()
1896 .title(title.as_str())
1897 .kind(kind)
1898 .raw_input(tool_use.input.clone()),
1899 );
1900 }
1901
1902 if !tool_use.is_input_complete {
1903 return None;
1904 }
1905
1906 let Some(tool) = tool else {
1907 let content = format!("No tool named {} exists", tool_use.name);
1908 return Some(Task::ready(LanguageModelToolResult {
1909 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1910 tool_use_id: tool_use.id,
1911 tool_name: tool_use.name,
1912 is_error: true,
1913 output: None,
1914 }));
1915 };
1916
1917 let fs = self.project.read(cx).fs().clone();
1918 let tool_event_stream = ToolCallEventStream::new(
1919 tool_use.id.clone(),
1920 event_stream.clone(),
1921 Some(fs),
1922 cancellation_rx,
1923 );
1924 tool_event_stream.update_fields(
1925 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1926 );
1927 let supports_images = self.model().is_some_and(|model| model.supports_images());
1928 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1929 log::debug!("Running tool {}", tool_use.name);
1930 Some(cx.foreground_executor().spawn(async move {
1931 let tool_result = tool_result.await.and_then(|output| {
1932 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1933 && !supports_images
1934 {
1935 return Err(anyhow!(
1936 "Attempted to read an image, but this model doesn't support it.",
1937 ));
1938 }
1939 Ok(output)
1940 });
1941
1942 match tool_result {
1943 Ok(output) => LanguageModelToolResult {
1944 tool_use_id: tool_use.id,
1945 tool_name: tool_use.name,
1946 is_error: false,
1947 content: output.llm_output,
1948 output: Some(output.raw_output),
1949 },
1950 Err(error) => LanguageModelToolResult {
1951 tool_use_id: tool_use.id,
1952 tool_name: tool_use.name,
1953 is_error: true,
1954 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1955 output: Some(error.to_string().into()),
1956 },
1957 }
1958 }))
1959 }
1960
1961 fn handle_tool_use_json_parse_error_event(
1962 &mut self,
1963 tool_use_id: LanguageModelToolUseId,
1964 tool_name: Arc<str>,
1965 raw_input: Arc<str>,
1966 json_parse_error: String,
1967 ) -> LanguageModelToolResult {
1968 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1969 LanguageModelToolResult {
1970 tool_use_id,
1971 tool_name,
1972 is_error: true,
1973 content: LanguageModelToolResultContent::Text(tool_output.into()),
1974 output: Some(serde_json::Value::String(raw_input.to_string())),
1975 }
1976 }
1977
1978 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1979 self.project
1980 .read(cx)
1981 .user_store()
1982 .update(cx, |user_store, cx| {
1983 user_store.update_model_request_usage(
1984 ModelRequestUsage(RequestUsage {
1985 amount: amount as i32,
1986 limit,
1987 }),
1988 cx,
1989 )
1990 });
1991 }
1992
1993 pub fn title(&self) -> SharedString {
1994 self.title.clone().unwrap_or("New Thread".into())
1995 }
1996
1997 pub fn is_generating_summary(&self) -> bool {
1998 self.pending_summary_generation.is_some()
1999 }
2000
2001 pub fn is_generating_title(&self) -> bool {
2002 self.pending_title_generation.is_some()
2003 }
2004
2005 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
2006 if let Some(summary) = self.summary.as_ref() {
2007 return Task::ready(Some(summary.clone())).shared();
2008 }
2009 if let Some(task) = self.pending_summary_generation.clone() {
2010 return task;
2011 }
2012 let Some(model) = self.summarization_model.clone() else {
2013 log::error!("No summarization model available");
2014 return Task::ready(None).shared();
2015 };
2016 let mut request = LanguageModelRequest {
2017 intent: Some(CompletionIntent::ThreadContextSummarization),
2018 temperature: AgentSettings::temperature_for_model(&model, cx),
2019 ..Default::default()
2020 };
2021
2022 for message in &self.messages {
2023 request.messages.extend(message.to_request());
2024 }
2025
2026 request.messages.push(LanguageModelRequestMessage {
2027 role: Role::User,
2028 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
2029 cache: false,
2030 reasoning_details: None,
2031 });
2032
2033 let task = cx
2034 .spawn(async move |this, cx| {
2035 let mut summary = String::new();
2036 let mut messages = model.stream_completion(request, cx).await.log_err()?;
2037 while let Some(event) = messages.next().await {
2038 let event = event.log_err()?;
2039 let text = match event {
2040 LanguageModelCompletionEvent::Text(text) => text,
2041 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
2042 this.update(cx, |thread, cx| {
2043 thread.update_model_request_usage(amount, limit, cx);
2044 })
2045 .ok()?;
2046 continue;
2047 }
2048 _ => continue,
2049 };
2050
2051 let mut lines = text.lines();
2052 summary.extend(lines.next());
2053 }
2054
2055 log::debug!("Setting summary: {}", summary);
2056 let summary = SharedString::from(summary);
2057
2058 this.update(cx, |this, cx| {
2059 this.summary = Some(summary.clone());
2060 this.pending_summary_generation = None;
2061 cx.notify()
2062 })
2063 .ok()?;
2064
2065 Some(summary)
2066 })
2067 .shared();
2068 self.pending_summary_generation = Some(task.clone());
2069 task
2070 }
2071
2072 pub fn generate_title(&mut self, cx: &mut Context<Self>) {
2073 let Some(model) = self.summarization_model.clone() else {
2074 return;
2075 };
2076
2077 log::debug!(
2078 "Generating title with model: {:?}",
2079 self.summarization_model.as_ref().map(|model| model.name())
2080 );
2081 let mut request = LanguageModelRequest {
2082 intent: Some(CompletionIntent::ThreadSummarization),
2083 temperature: AgentSettings::temperature_for_model(&model, cx),
2084 ..Default::default()
2085 };
2086
2087 for message in &self.messages {
2088 request.messages.extend(message.to_request());
2089 }
2090
2091 request.messages.push(LanguageModelRequestMessage {
2092 role: Role::User,
2093 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
2094 cache: false,
2095 reasoning_details: None,
2096 });
2097 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
2098 let mut title = String::new();
2099
2100 let generate = async {
2101 let mut messages = model.stream_completion(request, cx).await?;
2102 while let Some(event) = messages.next().await {
2103 let event = event?;
2104 let text = match event {
2105 LanguageModelCompletionEvent::Text(text) => text,
2106 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
2107 this.update(cx, |thread, cx| {
2108 thread.update_model_request_usage(amount, limit, cx);
2109 })?;
2110 continue;
2111 }
2112 _ => continue,
2113 };
2114
2115 let mut lines = text.lines();
2116 title.extend(lines.next());
2117
2118 // Stop if the LLM generated multiple lines.
2119 if lines.next().is_some() {
2120 break;
2121 }
2122 }
2123 anyhow::Ok(())
2124 };
2125
2126 if generate.await.context("failed to generate title").is_ok() {
2127 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
2128 }
2129 _ = this.update(cx, |this, _| this.pending_title_generation = None);
2130 }));
2131 }
2132
2133 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
2134 self.pending_title_generation = None;
2135 if Some(&title) != self.title.as_ref() {
2136 self.title = Some(title);
2137 cx.emit(TitleUpdated);
2138 cx.notify();
2139 }
2140 }
2141
2142 fn clear_summary(&mut self) {
2143 self.summary = None;
2144 self.pending_summary_generation = None;
2145 }
2146
2147 fn last_user_message(&self) -> Option<&UserMessage> {
2148 self.messages
2149 .iter()
2150 .rev()
2151 .find_map(|message| match message {
2152 Message::User(user_message) => Some(user_message),
2153 Message::Agent(_) => None,
2154 Message::Resume => None,
2155 })
2156 }
2157
2158 fn pending_message(&mut self) -> &mut AgentMessage {
2159 self.pending_message.get_or_insert_default()
2160 }
2161
2162 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
2163 let Some(mut message) = self.pending_message.take() else {
2164 return;
2165 };
2166
2167 if message.content.is_empty() {
2168 return;
2169 }
2170
2171 for content in &message.content {
2172 let AgentMessageContent::ToolUse(tool_use) = content else {
2173 continue;
2174 };
2175
2176 if !message.tool_results.contains_key(&tool_use.id) {
2177 message.tool_results.insert(
2178 tool_use.id.clone(),
2179 LanguageModelToolResult {
2180 tool_use_id: tool_use.id.clone(),
2181 tool_name: tool_use.name.clone(),
2182 is_error: true,
2183 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
2184 output: None,
2185 },
2186 );
2187 }
2188 }
2189
2190 self.messages.push(Message::Agent(message));
2191 self.updated_at = Utc::now();
2192 self.clear_summary();
2193 cx.notify()
2194 }
2195
2196 pub(crate) fn build_completion_request(
2197 &self,
2198 completion_intent: CompletionIntent,
2199 cx: &App,
2200 ) -> Result<LanguageModelRequest> {
2201 let model = self.model().context("No language model configured")?;
2202 let tools = if let Some(turn) = self.running_turn.as_ref() {
2203 turn.tools
2204 .iter()
2205 .filter_map(|(tool_name, tool)| {
2206 log::trace!("Including tool: {}", tool_name);
2207 Some(LanguageModelRequestTool {
2208 name: tool_name.to_string(),
2209 description: tool.description().to_string(),
2210 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
2211 })
2212 })
2213 .collect::<Vec<_>>()
2214 } else {
2215 Vec::new()
2216 };
2217
2218 log::debug!("Building completion request");
2219 log::debug!("Completion intent: {:?}", completion_intent);
2220 log::debug!("Completion mode: {:?}", self.completion_mode);
2221
2222 let available_tools: Vec<_> = self
2223 .running_turn
2224 .as_ref()
2225 .map(|turn| turn.tools.keys().cloned().collect())
2226 .unwrap_or_default();
2227
2228 log::debug!("Request includes {} tools", available_tools.len());
2229 let messages = self.build_request_messages(available_tools, cx);
2230 log::debug!("Request will include {} messages", messages.len());
2231
2232 let request = LanguageModelRequest {
2233 thread_id: Some(self.id.to_string()),
2234 prompt_id: Some(self.prompt_id.to_string()),
2235 intent: Some(completion_intent),
2236 mode: Some(self.completion_mode.into()),
2237 messages,
2238 tools,
2239 tool_choice: None,
2240 stop: Vec::new(),
2241 temperature: AgentSettings::temperature_for_model(model, cx),
2242 thinking_allowed: true,
2243 };
2244
2245 log::debug!("Completion request built successfully");
2246 Ok(request)
2247 }
2248
2249 fn enabled_tools(
2250 &self,
2251 profile: &AgentProfileSettings,
2252 model: &Arc<dyn LanguageModel>,
2253 cx: &App,
2254 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
2255 fn truncate(tool_name: &SharedString) -> SharedString {
2256 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
2257 let mut truncated = tool_name.to_string();
2258 truncated.truncate(MAX_TOOL_NAME_LENGTH);
2259 truncated.into()
2260 } else {
2261 tool_name.clone()
2262 }
2263 }
2264
2265 let mut tools = self
2266 .tools
2267 .iter()
2268 .filter_map(|(tool_name, tool)| {
2269 if tool.supports_provider(&model.provider_id())
2270 && profile.is_tool_enabled(tool_name)
2271 {
2272 Some((truncate(tool_name), tool.clone()))
2273 } else {
2274 None
2275 }
2276 })
2277 .collect::<BTreeMap<_, _>>();
2278
2279 let mut context_server_tools = Vec::new();
2280 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
2281 let mut duplicate_tool_names = HashSet::default();
2282 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
2283 for (tool_name, tool) in server_tools {
2284 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
2285 let tool_name = truncate(tool_name);
2286 if !seen_tools.insert(tool_name.clone()) {
2287 duplicate_tool_names.insert(tool_name.clone());
2288 }
2289 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
2290 }
2291 }
2292 }
2293
2294 // When there are duplicate tool names, disambiguate by prefixing them
2295 // with the server ID. In the rare case there isn't enough space for the
2296 // disambiguated tool name, keep only the last tool with this name.
2297 for (server_id, tool_name, tool) in context_server_tools {
2298 if duplicate_tool_names.contains(&tool_name) {
2299 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
2300 if available >= 2 {
2301 let mut disambiguated = server_id.0.to_string();
2302 disambiguated.truncate(available - 1);
2303 disambiguated.push('_');
2304 disambiguated.push_str(&tool_name);
2305 tools.insert(disambiguated.into(), tool.clone());
2306 } else {
2307 tools.insert(tool_name, tool.clone());
2308 }
2309 } else {
2310 tools.insert(tool_name, tool.clone());
2311 }
2312 }
2313
2314 tools
2315 }
2316
2317 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
2318 self.running_turn.as_ref()?.tools.get(name).cloned()
2319 }
2320
2321 pub fn has_tool(&self, name: &str) -> bool {
2322 self.running_turn
2323 .as_ref()
2324 .is_some_and(|turn| turn.tools.contains_key(name))
2325 }
2326
2327 #[cfg(any(test, feature = "test-support"))]
2328 pub fn has_registered_tool(&self, name: &str) -> bool {
2329 self.tools.contains_key(name)
2330 }
2331
2332 pub fn registered_tool_names(&self) -> Vec<SharedString> {
2333 self.tools.keys().cloned().collect()
2334 }
2335
2336 pub fn register_running_subagent(&mut self, subagent: WeakEntity<Thread>) {
2337 self.running_subagents.push(subagent);
2338 }
2339
2340 pub fn unregister_running_subagent(&mut self, subagent: &WeakEntity<Thread>) {
2341 self.running_subagents
2342 .retain(|s| s.entity_id() != subagent.entity_id());
2343 }
2344
2345 pub fn running_subagent_count(&self) -> usize {
2346 self.running_subagents
2347 .iter()
2348 .filter(|s| s.upgrade().is_some())
2349 .count()
2350 }
2351
2352 pub fn is_subagent(&self) -> bool {
2353 self.subagent_context.is_some()
2354 }
2355
2356 pub fn depth(&self) -> u8 {
2357 self.subagent_context.as_ref().map(|c| c.depth).unwrap_or(0)
2358 }
2359
2360 pub fn is_turn_complete(&self) -> bool {
2361 self.running_turn.is_none()
2362 }
2363
2364 pub fn submit_user_message(
2365 &mut self,
2366 content: impl Into<String>,
2367 cx: &mut Context<Self>,
2368 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2369 let content = content.into();
2370 self.messages.push(Message::User(UserMessage {
2371 id: UserMessageId::new(),
2372 content: vec![UserMessageContent::Text(content)],
2373 }));
2374 cx.notify();
2375 self.send_existing(cx)
2376 }
2377
2378 pub fn interrupt_for_summary(
2379 &mut self,
2380 cx: &mut Context<Self>,
2381 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2382 let context = self
2383 .subagent_context
2384 .as_ref()
2385 .context("Not a subagent thread")?;
2386 let prompt = context.context_low_prompt.clone();
2387 self.cancel(cx).detach();
2388 self.submit_user_message(prompt, cx)
2389 }
2390
2391 pub fn request_final_summary(
2392 &mut self,
2393 cx: &mut Context<Self>,
2394 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2395 let context = self
2396 .subagent_context
2397 .as_ref()
2398 .context("Not a subagent thread")?;
2399 let prompt = context.summary_prompt.clone();
2400 self.submit_user_message(prompt, cx)
2401 }
2402
2403 fn build_request_messages(
2404 &self,
2405 available_tools: Vec<SharedString>,
2406 cx: &App,
2407 ) -> Vec<LanguageModelRequestMessage> {
2408 log::trace!(
2409 "Building request messages from {} thread messages",
2410 self.messages.len()
2411 );
2412
2413 let system_prompt = SystemPromptTemplate {
2414 project: self.project_context.read(cx),
2415 available_tools,
2416 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
2417 }
2418 .render(&self.templates)
2419 .context("failed to build system prompt")
2420 .expect("Invalid template");
2421 let mut messages = vec![LanguageModelRequestMessage {
2422 role: Role::System,
2423 content: vec![system_prompt.into()],
2424 cache: false,
2425 reasoning_details: None,
2426 }];
2427 for message in &self.messages {
2428 messages.extend(message.to_request());
2429 }
2430
2431 if let Some(last_message) = messages.last_mut() {
2432 last_message.cache = true;
2433 }
2434
2435 if let Some(message) = self.pending_message.as_ref() {
2436 messages.extend(message.to_request());
2437 }
2438
2439 messages
2440 }
2441
2442 pub fn to_markdown(&self) -> String {
2443 let mut markdown = String::new();
2444 for (ix, message) in self.messages.iter().enumerate() {
2445 if ix > 0 {
2446 markdown.push('\n');
2447 }
2448 markdown.push_str(&message.to_markdown());
2449 }
2450
2451 if let Some(message) = self.pending_message.as_ref() {
2452 markdown.push('\n');
2453 markdown.push_str(&message.to_markdown());
2454 }
2455
2456 markdown
2457 }
2458
2459 fn advance_prompt_id(&mut self) {
2460 self.prompt_id = PromptId::new();
2461 }
2462
2463 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2464 use LanguageModelCompletionError::*;
2465 use http_client::StatusCode;
2466
2467 // General strategy here:
2468 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2469 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2470 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2471 match error {
2472 HttpResponseError {
2473 status_code: StatusCode::TOO_MANY_REQUESTS,
2474 ..
2475 } => Some(RetryStrategy::ExponentialBackoff {
2476 initial_delay: BASE_RETRY_DELAY,
2477 max_attempts: MAX_RETRY_ATTEMPTS,
2478 }),
2479 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2480 Some(RetryStrategy::Fixed {
2481 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2482 max_attempts: MAX_RETRY_ATTEMPTS,
2483 })
2484 }
2485 UpstreamProviderError {
2486 status,
2487 retry_after,
2488 ..
2489 } => match *status {
2490 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2491 Some(RetryStrategy::Fixed {
2492 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2493 max_attempts: MAX_RETRY_ATTEMPTS,
2494 })
2495 }
2496 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2497 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2498 // Internal Server Error could be anything, retry up to 3 times.
2499 max_attempts: 3,
2500 }),
2501 status => {
2502 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2503 // but we frequently get them in practice. See https://http.dev/529
2504 if status.as_u16() == 529 {
2505 Some(RetryStrategy::Fixed {
2506 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2507 max_attempts: MAX_RETRY_ATTEMPTS,
2508 })
2509 } else {
2510 Some(RetryStrategy::Fixed {
2511 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2512 max_attempts: 2,
2513 })
2514 }
2515 }
2516 },
2517 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2518 delay: BASE_RETRY_DELAY,
2519 max_attempts: 3,
2520 }),
2521 ApiReadResponseError { .. }
2522 | HttpSend { .. }
2523 | DeserializeResponse { .. }
2524 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2525 delay: BASE_RETRY_DELAY,
2526 max_attempts: 3,
2527 }),
2528 // Retrying these errors definitely shouldn't help.
2529 HttpResponseError {
2530 status_code:
2531 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2532 ..
2533 }
2534 | AuthenticationError { .. }
2535 | PermissionError { .. }
2536 | NoApiKey { .. }
2537 | ApiEndpointNotFound { .. }
2538 | PromptTooLarge { .. } => None,
2539 // These errors might be transient, so retry them
2540 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2541 delay: BASE_RETRY_DELAY,
2542 max_attempts: 1,
2543 }),
2544 // Retry all other 4xx and 5xx errors once.
2545 HttpResponseError { status_code, .. }
2546 if status_code.is_client_error() || status_code.is_server_error() =>
2547 {
2548 Some(RetryStrategy::Fixed {
2549 delay: BASE_RETRY_DELAY,
2550 max_attempts: 3,
2551 })
2552 }
2553 Other(err)
2554 if err.is::<language_model::PaymentRequiredError>()
2555 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2556 {
2557 // Retrying won't help for Payment Required or Model Request Limit errors (where
2558 // the user must upgrade to usage-based billing to get more requests, or else wait
2559 // for a significant amount of time for the request limit to reset).
2560 None
2561 }
2562 // Conservatively assume that any other errors are non-retryable
2563 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2564 delay: BASE_RETRY_DELAY,
2565 max_attempts: 2,
2566 }),
2567 }
2568 }
2569}
2570
2571struct RunningTurn {
2572 /// Holds the task that handles agent interaction until the end of the turn.
2573 /// Survives across multiple requests as the model performs tool calls and
2574 /// we run tools, report their results.
2575 _task: Task<()>,
2576 /// The current event stream for the running turn. Used to report a final
2577 /// cancellation event if we cancel the turn.
2578 event_stream: ThreadEventStream,
2579 /// The tools that were enabled for this turn.
2580 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2581 /// Sender to signal tool cancellation. When cancel is called, this is
2582 /// set to true so all tools can detect user-initiated cancellation.
2583 cancellation_tx: watch::Sender<bool>,
2584}
2585
2586impl RunningTurn {
2587 fn cancel(mut self) -> Task<()> {
2588 log::debug!("Cancelling in progress turn");
2589 self.cancellation_tx.send(true).ok();
2590 self.event_stream.send_canceled();
2591 self._task
2592 }
2593}
2594
2595pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2596
2597impl EventEmitter<TokenUsageUpdated> for Thread {}
2598
2599pub struct TitleUpdated;
2600
2601impl EventEmitter<TitleUpdated> for Thread {}
2602
2603pub trait AgentTool
2604where
2605 Self: 'static + Sized,
2606{
2607 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2608 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2609
2610 fn name() -> &'static str;
2611
2612 fn description() -> SharedString {
2613 let schema = schemars::schema_for!(Self::Input);
2614 SharedString::new(
2615 schema
2616 .get("description")
2617 .and_then(|description| description.as_str())
2618 .unwrap_or_default(),
2619 )
2620 }
2621
2622 fn kind() -> acp::ToolKind;
2623
2624 /// The initial tool title to display. Can be updated during the tool run.
2625 fn initial_title(
2626 &self,
2627 input: Result<Self::Input, serde_json::Value>,
2628 cx: &mut App,
2629 ) -> SharedString;
2630
2631 /// Returns the JSON schema that describes the tool's input.
2632 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2633 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2634 }
2635
2636 /// Some tools rely on a provider for the underlying billing or other reasons.
2637 /// Allow the tool to check if they are compatible, or should be filtered out.
2638 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2639 true
2640 }
2641
2642 /// Runs the tool with the provided input.
2643 fn run(
2644 self: Arc<Self>,
2645 input: Self::Input,
2646 event_stream: ToolCallEventStream,
2647 cx: &mut App,
2648 ) -> Task<Result<Self::Output>>;
2649
2650 /// Emits events for a previous execution of the tool.
2651 fn replay(
2652 &self,
2653 _input: Self::Input,
2654 _output: Self::Output,
2655 _event_stream: ToolCallEventStream,
2656 _cx: &mut App,
2657 ) -> Result<()> {
2658 Ok(())
2659 }
2660
2661 fn erase(self) -> Arc<dyn AnyAgentTool> {
2662 Arc::new(Erased(Arc::new(self)))
2663 }
2664}
2665
2666pub struct Erased<T>(T);
2667
2668pub struct AgentToolOutput {
2669 pub llm_output: LanguageModelToolResultContent,
2670 pub raw_output: serde_json::Value,
2671}
2672
2673pub trait AnyAgentTool {
2674 fn name(&self) -> SharedString;
2675 fn description(&self) -> SharedString;
2676 fn kind(&self) -> acp::ToolKind;
2677 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2678 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2679 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2680 true
2681 }
2682 fn run(
2683 self: Arc<Self>,
2684 input: serde_json::Value,
2685 event_stream: ToolCallEventStream,
2686 cx: &mut App,
2687 ) -> Task<Result<AgentToolOutput>>;
2688 fn replay(
2689 &self,
2690 input: serde_json::Value,
2691 output: serde_json::Value,
2692 event_stream: ToolCallEventStream,
2693 cx: &mut App,
2694 ) -> Result<()>;
2695}
2696
2697impl<T> AnyAgentTool for Erased<Arc<T>>
2698where
2699 T: AgentTool,
2700{
2701 fn name(&self) -> SharedString {
2702 T::name().into()
2703 }
2704
2705 fn description(&self) -> SharedString {
2706 T::description()
2707 }
2708
2709 fn kind(&self) -> agent_client_protocol::ToolKind {
2710 T::kind()
2711 }
2712
2713 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2714 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2715 self.0.initial_title(parsed_input, _cx)
2716 }
2717
2718 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2719 let mut json = serde_json::to_value(T::input_schema(format))?;
2720 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2721 Ok(json)
2722 }
2723
2724 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2725 T::supports_provider(provider)
2726 }
2727
2728 fn run(
2729 self: Arc<Self>,
2730 input: serde_json::Value,
2731 event_stream: ToolCallEventStream,
2732 cx: &mut App,
2733 ) -> Task<Result<AgentToolOutput>> {
2734 cx.spawn(async move |cx| {
2735 let input = serde_json::from_value(input)?;
2736 let output = cx
2737 .update(|cx| self.0.clone().run(input, event_stream, cx))
2738 .await?;
2739 let raw_output = serde_json::to_value(&output)?;
2740 Ok(AgentToolOutput {
2741 llm_output: output.into(),
2742 raw_output,
2743 })
2744 })
2745 }
2746
2747 fn replay(
2748 &self,
2749 input: serde_json::Value,
2750 output: serde_json::Value,
2751 event_stream: ToolCallEventStream,
2752 cx: &mut App,
2753 ) -> Result<()> {
2754 let input = serde_json::from_value(input)?;
2755 let output = serde_json::from_value(output)?;
2756 self.0.replay(input, output, event_stream, cx)
2757 }
2758}
2759
2760#[derive(Clone)]
2761struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2762
2763impl ThreadEventStream {
2764 fn send_user_message(&self, message: &UserMessage) {
2765 self.0
2766 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2767 .ok();
2768 }
2769
2770 fn send_text(&self, text: &str) {
2771 self.0
2772 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2773 .ok();
2774 }
2775
2776 fn send_thinking(&self, text: &str) {
2777 self.0
2778 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2779 .ok();
2780 }
2781
2782 fn send_tool_call(
2783 &self,
2784 id: &LanguageModelToolUseId,
2785 tool_name: &str,
2786 title: SharedString,
2787 kind: acp::ToolKind,
2788 input: serde_json::Value,
2789 ) {
2790 self.0
2791 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2792 id,
2793 tool_name,
2794 title.to_string(),
2795 kind,
2796 input,
2797 ))))
2798 .ok();
2799 }
2800
2801 fn initial_tool_call(
2802 id: &LanguageModelToolUseId,
2803 tool_name: &str,
2804 title: String,
2805 kind: acp::ToolKind,
2806 input: serde_json::Value,
2807 ) -> acp::ToolCall {
2808 acp::ToolCall::new(id.to_string(), title)
2809 .kind(kind)
2810 .raw_input(input)
2811 .meta(acp_thread::meta_with_tool_name(tool_name))
2812 }
2813
2814 fn update_tool_call_fields(
2815 &self,
2816 tool_use_id: &LanguageModelToolUseId,
2817 fields: acp::ToolCallUpdateFields,
2818 ) {
2819 self.0
2820 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2821 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2822 )))
2823 .ok();
2824 }
2825
2826 fn send_retry(&self, status: acp_thread::RetryStatus) {
2827 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2828 }
2829
2830 fn send_stop(&self, reason: acp::StopReason) {
2831 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2832 }
2833
2834 fn send_canceled(&self) {
2835 self.0
2836 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2837 .ok();
2838 }
2839
2840 fn send_error(&self, error: impl Into<anyhow::Error>) {
2841 self.0.unbounded_send(Err(error.into())).ok();
2842 }
2843}
2844
2845#[derive(Clone)]
2846pub struct ToolCallEventStream {
2847 tool_use_id: LanguageModelToolUseId,
2848 stream: ThreadEventStream,
2849 fs: Option<Arc<dyn Fs>>,
2850 cancellation_rx: watch::Receiver<bool>,
2851}
2852
2853impl ToolCallEventStream {
2854 #[cfg(any(test, feature = "test-support"))]
2855 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2856 let (stream, receiver, _cancellation_tx) = Self::test_with_cancellation();
2857 (stream, receiver)
2858 }
2859
2860 #[cfg(any(test, feature = "test-support"))]
2861 pub fn test_with_cancellation() -> (Self, ToolCallEventStreamReceiver, watch::Sender<bool>) {
2862 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2863 let (cancellation_tx, cancellation_rx) = watch::channel(false);
2864
2865 let stream = ToolCallEventStream::new(
2866 "test_id".into(),
2867 ThreadEventStream(events_tx),
2868 None,
2869 cancellation_rx,
2870 );
2871
2872 (
2873 stream,
2874 ToolCallEventStreamReceiver(events_rx),
2875 cancellation_tx,
2876 )
2877 }
2878
2879 /// Signal cancellation for this event stream. Only available in tests.
2880 #[cfg(any(test, feature = "test-support"))]
2881 pub fn signal_cancellation_with_sender(cancellation_tx: &mut watch::Sender<bool>) {
2882 cancellation_tx.send(true).ok();
2883 }
2884
2885 fn new(
2886 tool_use_id: LanguageModelToolUseId,
2887 stream: ThreadEventStream,
2888 fs: Option<Arc<dyn Fs>>,
2889 cancellation_rx: watch::Receiver<bool>,
2890 ) -> Self {
2891 Self {
2892 tool_use_id,
2893 stream,
2894 fs,
2895 cancellation_rx,
2896 }
2897 }
2898
2899 /// Returns a future that resolves when the user cancels the tool call.
2900 /// Tools should select on this alongside their main work to detect user cancellation.
2901 pub fn cancelled_by_user(&self) -> impl std::future::Future<Output = ()> + '_ {
2902 let mut rx = self.cancellation_rx.clone();
2903 async move {
2904 loop {
2905 if *rx.borrow() {
2906 return;
2907 }
2908 if rx.changed().await.is_err() {
2909 // Sender dropped, will never be cancelled
2910 std::future::pending::<()>().await;
2911 }
2912 }
2913 }
2914 }
2915
2916 /// Returns true if the user has cancelled this tool call.
2917 /// This is useful for checking cancellation state after an operation completes,
2918 /// to determine if the completion was due to user cancellation.
2919 pub fn was_cancelled_by_user(&self) -> bool {
2920 *self.cancellation_rx.clone().borrow()
2921 }
2922
2923 pub fn tool_use_id(&self) -> &LanguageModelToolUseId {
2924 &self.tool_use_id
2925 }
2926
2927 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2928 self.stream
2929 .update_tool_call_fields(&self.tool_use_id, fields);
2930 }
2931
2932 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2933 self.stream
2934 .0
2935 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2936 acp_thread::ToolCallUpdateDiff {
2937 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2938 diff,
2939 }
2940 .into(),
2941 )))
2942 .ok();
2943 }
2944
2945 pub fn update_subagent_thread(&self, thread: Entity<acp_thread::AcpThread>) {
2946 self.stream
2947 .0
2948 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2949 acp_thread::ToolCallUpdateSubagentThread {
2950 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2951 thread,
2952 }
2953 .into(),
2954 )))
2955 .ok();
2956 }
2957
2958 /// Authorize a third-party tool (e.g., MCP tool from a context server).
2959 ///
2960 /// Unlike built-in tools, third-party tools don't support pattern-based permissions.
2961 /// They only support `default_mode` (allow/deny/confirm) per tool.
2962 ///
2963 /// Shows 3 buttons:
2964 /// - "Always allow <display_name> MCP tool" → sets `tools.<tool_id>.default_mode = "allow"`
2965 /// - "Allow" → approve once
2966 /// - "Deny" → reject once
2967 pub fn authorize_third_party_tool(
2968 &self,
2969 title: impl Into<String>,
2970 tool_id: String,
2971 display_name: String,
2972 cx: &mut App,
2973 ) -> Task<Result<()>> {
2974 let settings = agent_settings::AgentSettings::get_global(cx);
2975
2976 let decision = decide_permission_from_settings(&tool_id, "", &settings);
2977
2978 match decision {
2979 ToolPermissionDecision::Allow => return Task::ready(Ok(())),
2980 ToolPermissionDecision::Deny(reason) => return Task::ready(Err(anyhow!(reason))),
2981 ToolPermissionDecision::Confirm => {}
2982 }
2983
2984 let (response_tx, response_rx) = oneshot::channel();
2985 self.stream
2986 .0
2987 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2988 ToolCallAuthorization {
2989 tool_call: acp::ToolCallUpdate::new(
2990 self.tool_use_id.to_string(),
2991 acp::ToolCallUpdateFields::new().title(title.into()),
2992 ),
2993 options: vec![
2994 acp::PermissionOption::new(
2995 acp::PermissionOptionId::new(format!("always_allow_mcp:{}", tool_id)),
2996 format!("Always allow {} MCP tool", display_name),
2997 acp::PermissionOptionKind::AllowAlways,
2998 ),
2999 acp::PermissionOption::new(
3000 acp::PermissionOptionId::new("allow"),
3001 "Allow once",
3002 acp::PermissionOptionKind::AllowOnce,
3003 ),
3004 acp::PermissionOption::new(
3005 acp::PermissionOptionId::new("deny"),
3006 "Deny",
3007 acp::PermissionOptionKind::RejectOnce,
3008 ),
3009 ],
3010 response: response_tx,
3011 context: None,
3012 },
3013 )))
3014 .ok();
3015
3016 let fs = self.fs.clone();
3017 cx.spawn(async move |cx| {
3018 let response_str = response_rx.await?.0.to_string();
3019
3020 if response_str == format!("always_allow_mcp:{}", tool_id) {
3021 if let Some(fs) = fs.clone() {
3022 cx.update(|cx| {
3023 update_settings_file(fs, cx, move |settings, _| {
3024 settings
3025 .agent
3026 .get_or_insert_default()
3027 .set_tool_default_mode(&tool_id, ToolPermissionMode::Allow);
3028 });
3029 });
3030 }
3031 return Ok(());
3032 }
3033
3034 if response_str == "allow" {
3035 return Ok(());
3036 }
3037
3038 Err(anyhow!("Permission to run tool denied by user"))
3039 })
3040 }
3041
3042 pub fn authorize(
3043 &self,
3044 title: impl Into<String>,
3045 context: ToolPermissionContext,
3046 cx: &mut App,
3047 ) -> Task<Result<()>> {
3048 use settings::ToolPermissionMode;
3049
3050 let options = context.build_permission_options();
3051
3052 let (response_tx, response_rx) = oneshot::channel();
3053 self.stream
3054 .0
3055 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
3056 ToolCallAuthorization {
3057 tool_call: acp::ToolCallUpdate::new(
3058 self.tool_use_id.to_string(),
3059 acp::ToolCallUpdateFields::new().title(title.into()),
3060 ),
3061 options,
3062 response: response_tx,
3063 context: Some(context),
3064 },
3065 )))
3066 .ok();
3067
3068 let fs = self.fs.clone();
3069 cx.spawn(async move |cx| {
3070 let response_str = response_rx.await?.0.to_string();
3071
3072 // Handle "always allow tool" - e.g., "always_allow:terminal"
3073 if let Some(tool) = response_str.strip_prefix("always_allow:") {
3074 if let Some(fs) = fs.clone() {
3075 let tool = tool.to_string();
3076 cx.update(|cx| {
3077 update_settings_file(fs, cx, move |settings, _| {
3078 settings
3079 .agent
3080 .get_or_insert_default()
3081 .set_tool_default_mode(&tool, ToolPermissionMode::Allow);
3082 });
3083 });
3084 }
3085 return Ok(());
3086 }
3087
3088 // Handle "always deny tool" - e.g., "always_deny:terminal"
3089 if let Some(tool) = response_str.strip_prefix("always_deny:") {
3090 if let Some(fs) = fs.clone() {
3091 let tool = tool.to_string();
3092 cx.update(|cx| {
3093 update_settings_file(fs, cx, move |settings, _| {
3094 settings
3095 .agent
3096 .get_or_insert_default()
3097 .set_tool_default_mode(&tool, ToolPermissionMode::Deny);
3098 });
3099 });
3100 }
3101 return Err(anyhow!("Permission to run tool denied by user"));
3102 }
3103
3104 // Handle "always allow pattern" - e.g., "always_allow_pattern:terminal:^cargo\s"
3105 if response_str.starts_with("always_allow_pattern:") {
3106 let parts: Vec<&str> = response_str.splitn(3, ':').collect();
3107 if parts.len() == 3 {
3108 let pattern_tool_name = parts[1].to_string();
3109 let pattern = parts[2].to_string();
3110 if let Some(fs) = fs.clone() {
3111 cx.update(|cx| {
3112 update_settings_file(fs, cx, move |settings, _| {
3113 settings
3114 .agent
3115 .get_or_insert_default()
3116 .add_tool_allow_pattern(&pattern_tool_name, pattern);
3117 });
3118 });
3119 }
3120 }
3121 return Ok(());
3122 }
3123
3124 // Handle "always deny pattern" - e.g., "always_deny_pattern:terminal:^cargo\s"
3125 if response_str.starts_with("always_deny_pattern:") {
3126 let parts: Vec<&str> = response_str.splitn(3, ':').collect();
3127 if parts.len() == 3 {
3128 let pattern_tool_name = parts[1].to_string();
3129 let pattern = parts[2].to_string();
3130 if let Some(fs) = fs.clone() {
3131 cx.update(|cx| {
3132 update_settings_file(fs, cx, move |settings, _| {
3133 settings
3134 .agent
3135 .get_or_insert_default()
3136 .add_tool_deny_pattern(&pattern_tool_name, pattern);
3137 });
3138 });
3139 }
3140 }
3141 return Err(anyhow!("Permission to run tool denied by user"));
3142 }
3143
3144 // Handle simple "allow" (allow once)
3145 if response_str == "allow" {
3146 return Ok(());
3147 }
3148
3149 // Handle simple "deny" (deny once)
3150 Err(anyhow!("Permission to run tool denied by user"))
3151 })
3152 }
3153}
3154
3155#[cfg(any(test, feature = "test-support"))]
3156pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
3157
3158#[cfg(any(test, feature = "test-support"))]
3159impl ToolCallEventStreamReceiver {
3160 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
3161 let event = self.0.next().await;
3162 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
3163 auth
3164 } else {
3165 panic!("Expected ToolCallAuthorization but got: {:?}", event);
3166 }
3167 }
3168
3169 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
3170 let event = self.0.next().await;
3171 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
3172 update,
3173 )))) = event
3174 {
3175 update.fields
3176 } else {
3177 panic!("Expected update fields but got: {:?}", event);
3178 }
3179 }
3180
3181 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
3182 let event = self.0.next().await;
3183 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
3184 update,
3185 )))) = event
3186 {
3187 update.diff
3188 } else {
3189 panic!("Expected diff but got: {:?}", event);
3190 }
3191 }
3192
3193 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
3194 let event = self.0.next().await;
3195 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
3196 update,
3197 )))) = event
3198 {
3199 update.terminal
3200 } else {
3201 panic!("Expected terminal but got: {:?}", event);
3202 }
3203 }
3204}
3205
3206#[cfg(any(test, feature = "test-support"))]
3207impl std::ops::Deref for ToolCallEventStreamReceiver {
3208 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
3209
3210 fn deref(&self) -> &Self::Target {
3211 &self.0
3212 }
3213}
3214
3215#[cfg(any(test, feature = "test-support"))]
3216impl std::ops::DerefMut for ToolCallEventStreamReceiver {
3217 fn deref_mut(&mut self) -> &mut Self::Target {
3218 &mut self.0
3219 }
3220}
3221
3222impl From<&str> for UserMessageContent {
3223 fn from(text: &str) -> Self {
3224 Self::Text(text.into())
3225 }
3226}
3227
3228impl UserMessageContent {
3229 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
3230 match value {
3231 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
3232 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
3233 acp::ContentBlock::Audio(_) => {
3234 // TODO
3235 Self::Text("[audio]".to_string())
3236 }
3237 acp::ContentBlock::ResourceLink(resource_link) => {
3238 match MentionUri::parse(&resource_link.uri, path_style) {
3239 Ok(uri) => Self::Mention {
3240 uri,
3241 content: String::new(),
3242 },
3243 Err(err) => {
3244 log::error!("Failed to parse mention link: {}", err);
3245 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
3246 }
3247 }
3248 }
3249 acp::ContentBlock::Resource(resource) => match resource.resource {
3250 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
3251 match MentionUri::parse(&resource.uri, path_style) {
3252 Ok(uri) => Self::Mention {
3253 uri,
3254 content: resource.text,
3255 },
3256 Err(err) => {
3257 log::error!("Failed to parse mention link: {}", err);
3258 Self::Text(
3259 MarkdownCodeBlock {
3260 tag: &resource.uri,
3261 text: &resource.text,
3262 }
3263 .to_string(),
3264 )
3265 }
3266 }
3267 }
3268 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
3269 // TODO
3270 Self::Text("[blob]".to_string())
3271 }
3272 other => {
3273 log::warn!("Unexpected content type: {:?}", other);
3274 Self::Text("[unknown]".to_string())
3275 }
3276 },
3277 other => {
3278 log::warn!("Unexpected content type: {:?}", other);
3279 Self::Text("[unknown]".to_string())
3280 }
3281 }
3282 }
3283}
3284
3285impl From<UserMessageContent> for acp::ContentBlock {
3286 fn from(content: UserMessageContent) -> Self {
3287 match content {
3288 UserMessageContent::Text(text) => text.into(),
3289 UserMessageContent::Image(image) => {
3290 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
3291 }
3292 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
3293 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
3294 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
3295 )),
3296 ),
3297 }
3298 }
3299}
3300
3301fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
3302 LanguageModelImage {
3303 source: image_content.data.into(),
3304 size: None,
3305 }
3306}