1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 RestoreFileFromDiskTool, SaveFileTool, SubagentTool, SystemPromptTemplate, Template, Templates,
6 TerminalTool, ThinkingTool, WebSearchTool,
7};
8use acp_thread::{MentionUri, UserMessageId};
9use action_log::ActionLog;
10use feature_flags::{FeatureFlagAppExt as _, SubagentsFeatureFlag};
11
12use agent_client_protocol as acp;
13use agent_settings::{
14 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
15 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
16};
17use anyhow::{Context as _, Result, anyhow};
18use chrono::{DateTime, Utc};
19use client::{ModelRequestUsage, RequestUsage, UserStore};
20use cloud_llm_client::{CompletionIntent, Plan, UsageLimit};
21use collections::{HashMap, HashSet, IndexMap};
22use fs::Fs;
23use futures::stream;
24use futures::{
25 FutureExt,
26 channel::{mpsc, oneshot},
27 future::Shared,
28 stream::FuturesUnordered,
29};
30use gpui::{
31 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
32};
33use language_model::{
34 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
35 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
36 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
37 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
38 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
39 ZED_CLOUD_PROVIDER_ID,
40};
41use project::Project;
42use prompt_store::ProjectContext;
43use schemars::{JsonSchema, Schema};
44use serde::{Deserialize, Serialize};
45use settings::{LanguageModelSelection, Settings, update_settings_file};
46use smol::stream::StreamExt;
47use std::{
48 collections::BTreeMap,
49 ops::RangeInclusive,
50 path::Path,
51 rc::Rc,
52 sync::Arc,
53 time::{Duration, Instant},
54};
55use std::{fmt::Write, path::PathBuf};
56use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
57use uuid::Uuid;
58
59const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
60pub const MAX_TOOL_NAME_LENGTH: usize = 64;
61pub const MAX_SUBAGENT_DEPTH: u8 = 4;
62pub const MAX_PARALLEL_SUBAGENTS: usize = 8;
63
64/// Context passed to a subagent thread for lifecycle management
65#[derive(Clone)]
66pub struct SubagentContext {
67 /// ID of the parent thread
68 pub parent_thread_id: acp::SessionId,
69
70 /// ID of the tool call that spawned this subagent
71 pub tool_use_id: LanguageModelToolUseId,
72
73 /// Current depth level (0 = root agent, 1 = first-level subagent, etc.)
74 pub depth: u8,
75
76 /// Prompt to send when subagent completes successfully
77 pub summary_prompt: String,
78
79 /// Prompt to send when context is running low (≤25% remaining)
80 pub context_low_prompt: String,
81}
82
83/// The ID of the user prompt that initiated a request.
84///
85/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
86#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
87pub struct PromptId(Arc<str>);
88
89impl PromptId {
90 pub fn new() -> Self {
91 Self(Uuid::new_v4().to_string().into())
92 }
93}
94
95impl std::fmt::Display for PromptId {
96 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
97 write!(f, "{}", self.0)
98 }
99}
100
101pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
102pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
103
104#[derive(Debug, Clone)]
105enum RetryStrategy {
106 ExponentialBackoff {
107 initial_delay: Duration,
108 max_attempts: u8,
109 },
110 Fixed {
111 delay: Duration,
112 max_attempts: u8,
113 },
114}
115
116#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
117pub enum Message {
118 User(UserMessage),
119 Agent(AgentMessage),
120 Resume,
121}
122
123impl Message {
124 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
125 match self {
126 Message::Agent(agent_message) => Some(agent_message),
127 _ => None,
128 }
129 }
130
131 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
132 match self {
133 Message::User(message) => {
134 if message.content.is_empty() {
135 vec![]
136 } else {
137 vec![message.to_request()]
138 }
139 }
140 Message::Agent(message) => message.to_request(),
141 Message::Resume => vec![LanguageModelRequestMessage {
142 role: Role::User,
143 content: vec!["Continue where you left off".into()],
144 cache: false,
145 reasoning_details: None,
146 }],
147 }
148 }
149
150 pub fn to_markdown(&self) -> String {
151 match self {
152 Message::User(message) => message.to_markdown(),
153 Message::Agent(message) => message.to_markdown(),
154 Message::Resume => "[resume]\n".into(),
155 }
156 }
157
158 pub fn role(&self) -> Role {
159 match self {
160 Message::User(_) | Message::Resume => Role::User,
161 Message::Agent(_) => Role::Assistant,
162 }
163 }
164}
165
166#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
167pub struct UserMessage {
168 pub id: UserMessageId,
169 pub content: Vec<UserMessageContent>,
170}
171
172#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
173pub enum UserMessageContent {
174 Text(String),
175 Mention { uri: MentionUri, content: String },
176 Image(LanguageModelImage),
177}
178
179impl UserMessage {
180 pub fn to_markdown(&self) -> String {
181 let mut markdown = String::from("## User\n\n");
182
183 for content in &self.content {
184 match content {
185 UserMessageContent::Text(text) => {
186 markdown.push_str(text);
187 markdown.push('\n');
188 }
189 UserMessageContent::Image(_) => {
190 markdown.push_str("<image />\n");
191 }
192 UserMessageContent::Mention { uri, content } => {
193 if !content.is_empty() {
194 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
195 } else {
196 let _ = writeln!(&mut markdown, "{}", uri.as_link());
197 }
198 }
199 }
200 }
201
202 markdown
203 }
204
205 fn to_request(&self) -> LanguageModelRequestMessage {
206 let mut message = LanguageModelRequestMessage {
207 role: Role::User,
208 content: Vec::with_capacity(self.content.len()),
209 cache: false,
210 reasoning_details: None,
211 };
212
213 const OPEN_CONTEXT: &str = "<context>\n\
214 The following items were attached by the user. \
215 They are up-to-date and don't need to be re-read.\n\n";
216
217 const OPEN_FILES_TAG: &str = "<files>";
218 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
219 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
220 const OPEN_SELECTIONS_TAG: &str = "<selections>";
221 const OPEN_THREADS_TAG: &str = "<threads>";
222 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
223 const OPEN_RULES_TAG: &str =
224 "<rules>\nThe user has specified the following rules that should be applied:\n";
225
226 let mut file_context = OPEN_FILES_TAG.to_string();
227 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
228 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
229 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
230 let mut thread_context = OPEN_THREADS_TAG.to_string();
231 let mut fetch_context = OPEN_FETCH_TAG.to_string();
232 let mut rules_context = OPEN_RULES_TAG.to_string();
233
234 for chunk in &self.content {
235 let chunk = match chunk {
236 UserMessageContent::Text(text) => {
237 language_model::MessageContent::Text(text.clone())
238 }
239 UserMessageContent::Image(value) => {
240 language_model::MessageContent::Image(value.clone())
241 }
242 UserMessageContent::Mention { uri, content } => {
243 match uri {
244 MentionUri::File { abs_path } => {
245 write!(
246 &mut file_context,
247 "\n{}",
248 MarkdownCodeBlock {
249 tag: &codeblock_tag(abs_path, None),
250 text: &content.to_string(),
251 }
252 )
253 .ok();
254 }
255 MentionUri::PastedImage => {
256 debug_panic!("pasted image URI should not be used in mention content")
257 }
258 MentionUri::Directory { .. } => {
259 write!(&mut directory_context, "\n{}\n", content).ok();
260 }
261 MentionUri::Symbol {
262 abs_path: path,
263 line_range,
264 ..
265 } => {
266 write!(
267 &mut symbol_context,
268 "\n{}",
269 MarkdownCodeBlock {
270 tag: &codeblock_tag(path, Some(line_range)),
271 text: content
272 }
273 )
274 .ok();
275 }
276 MentionUri::Selection {
277 abs_path: path,
278 line_range,
279 ..
280 } => {
281 write!(
282 &mut selection_context,
283 "\n{}",
284 MarkdownCodeBlock {
285 tag: &codeblock_tag(
286 path.as_deref().unwrap_or("Untitled".as_ref()),
287 Some(line_range)
288 ),
289 text: content
290 }
291 )
292 .ok();
293 }
294 MentionUri::Thread { .. } => {
295 write!(&mut thread_context, "\n{}\n", content).ok();
296 }
297 MentionUri::TextThread { .. } => {
298 write!(&mut thread_context, "\n{}\n", content).ok();
299 }
300 MentionUri::Rule { .. } => {
301 write!(
302 &mut rules_context,
303 "\n{}",
304 MarkdownCodeBlock {
305 tag: "",
306 text: content
307 }
308 )
309 .ok();
310 }
311 MentionUri::Fetch { url } => {
312 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
313 }
314 }
315
316 language_model::MessageContent::Text(uri.as_link().to_string())
317 }
318 };
319
320 message.content.push(chunk);
321 }
322
323 let len_before_context = message.content.len();
324
325 if file_context.len() > OPEN_FILES_TAG.len() {
326 file_context.push_str("</files>\n");
327 message
328 .content
329 .push(language_model::MessageContent::Text(file_context));
330 }
331
332 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
333 directory_context.push_str("</directories>\n");
334 message
335 .content
336 .push(language_model::MessageContent::Text(directory_context));
337 }
338
339 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
340 symbol_context.push_str("</symbols>\n");
341 message
342 .content
343 .push(language_model::MessageContent::Text(symbol_context));
344 }
345
346 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
347 selection_context.push_str("</selections>\n");
348 message
349 .content
350 .push(language_model::MessageContent::Text(selection_context));
351 }
352
353 if thread_context.len() > OPEN_THREADS_TAG.len() {
354 thread_context.push_str("</threads>\n");
355 message
356 .content
357 .push(language_model::MessageContent::Text(thread_context));
358 }
359
360 if fetch_context.len() > OPEN_FETCH_TAG.len() {
361 fetch_context.push_str("</fetched_urls>\n");
362 message
363 .content
364 .push(language_model::MessageContent::Text(fetch_context));
365 }
366
367 if rules_context.len() > OPEN_RULES_TAG.len() {
368 rules_context.push_str("</user_rules>\n");
369 message
370 .content
371 .push(language_model::MessageContent::Text(rules_context));
372 }
373
374 if message.content.len() > len_before_context {
375 message.content.insert(
376 len_before_context,
377 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
378 );
379 message
380 .content
381 .push(language_model::MessageContent::Text("</context>".into()));
382 }
383
384 message
385 }
386}
387
388fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
389 let mut result = String::new();
390
391 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
392 let _ = write!(result, "{} ", extension);
393 }
394
395 let _ = write!(result, "{}", full_path.display());
396
397 if let Some(range) = line_range {
398 if range.start() == range.end() {
399 let _ = write!(result, ":{}", range.start() + 1);
400 } else {
401 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
402 }
403 }
404
405 result
406}
407
408impl AgentMessage {
409 pub fn to_markdown(&self) -> String {
410 let mut markdown = String::from("## Assistant\n\n");
411
412 for content in &self.content {
413 match content {
414 AgentMessageContent::Text(text) => {
415 markdown.push_str(text);
416 markdown.push('\n');
417 }
418 AgentMessageContent::Thinking { text, .. } => {
419 markdown.push_str("<think>");
420 markdown.push_str(text);
421 markdown.push_str("</think>\n");
422 }
423 AgentMessageContent::RedactedThinking(_) => {
424 markdown.push_str("<redacted_thinking />\n")
425 }
426 AgentMessageContent::ToolUse(tool_use) => {
427 markdown.push_str(&format!(
428 "**Tool Use**: {} (ID: {})\n",
429 tool_use.name, tool_use.id
430 ));
431 markdown.push_str(&format!(
432 "{}\n",
433 MarkdownCodeBlock {
434 tag: "json",
435 text: &format!("{:#}", tool_use.input)
436 }
437 ));
438 }
439 }
440 }
441
442 for tool_result in self.tool_results.values() {
443 markdown.push_str(&format!(
444 "**Tool Result**: {} (ID: {})\n\n",
445 tool_result.tool_name, tool_result.tool_use_id
446 ));
447 if tool_result.is_error {
448 markdown.push_str("**ERROR:**\n");
449 }
450
451 match &tool_result.content {
452 LanguageModelToolResultContent::Text(text) => {
453 writeln!(markdown, "{text}\n").ok();
454 }
455 LanguageModelToolResultContent::Image(_) => {
456 writeln!(markdown, "<image />\n").ok();
457 }
458 }
459
460 if let Some(output) = tool_result.output.as_ref() {
461 writeln!(
462 markdown,
463 "**Debug Output**:\n\n```json\n{}\n```\n",
464 serde_json::to_string_pretty(output).unwrap()
465 )
466 .unwrap();
467 }
468 }
469
470 markdown
471 }
472
473 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
474 let mut assistant_message = LanguageModelRequestMessage {
475 role: Role::Assistant,
476 content: Vec::with_capacity(self.content.len()),
477 cache: false,
478 reasoning_details: self.reasoning_details.clone(),
479 };
480 for chunk in &self.content {
481 match chunk {
482 AgentMessageContent::Text(text) => {
483 assistant_message
484 .content
485 .push(language_model::MessageContent::Text(text.clone()));
486 }
487 AgentMessageContent::Thinking { text, signature } => {
488 assistant_message
489 .content
490 .push(language_model::MessageContent::Thinking {
491 text: text.clone(),
492 signature: signature.clone(),
493 });
494 }
495 AgentMessageContent::RedactedThinking(value) => {
496 assistant_message.content.push(
497 language_model::MessageContent::RedactedThinking(value.clone()),
498 );
499 }
500 AgentMessageContent::ToolUse(tool_use) => {
501 if self.tool_results.contains_key(&tool_use.id) {
502 assistant_message
503 .content
504 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
505 }
506 }
507 };
508 }
509
510 let mut user_message = LanguageModelRequestMessage {
511 role: Role::User,
512 content: Vec::new(),
513 cache: false,
514 reasoning_details: None,
515 };
516
517 for tool_result in self.tool_results.values() {
518 let mut tool_result = tool_result.clone();
519 // Surprisingly, the API fails if we return an empty string here.
520 // It thinks we are sending a tool use without a tool result.
521 if tool_result.content.is_empty() {
522 tool_result.content = "<Tool returned an empty string>".into();
523 }
524 user_message
525 .content
526 .push(language_model::MessageContent::ToolResult(tool_result));
527 }
528
529 let mut messages = Vec::new();
530 if !assistant_message.content.is_empty() {
531 messages.push(assistant_message);
532 }
533 if !user_message.content.is_empty() {
534 messages.push(user_message);
535 }
536 messages
537 }
538}
539
540#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
541pub struct AgentMessage {
542 pub content: Vec<AgentMessageContent>,
543 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
544 pub reasoning_details: Option<serde_json::Value>,
545}
546
547#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
548pub enum AgentMessageContent {
549 Text(String),
550 Thinking {
551 text: String,
552 signature: Option<String>,
553 },
554 RedactedThinking(String),
555 ToolUse(LanguageModelToolUse),
556}
557
558pub trait TerminalHandle {
559 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
560 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
561 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
562 fn kill(&self, cx: &AsyncApp) -> Result<()>;
563 fn was_stopped_by_user(&self, cx: &AsyncApp) -> Result<bool>;
564}
565
566pub trait ThreadEnvironment {
567 fn create_terminal(
568 &self,
569 command: String,
570 cwd: Option<PathBuf>,
571 output_byte_limit: Option<u64>,
572 cx: &mut AsyncApp,
573 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
574}
575
576#[derive(Debug)]
577pub enum ThreadEvent {
578 UserMessage(UserMessage),
579 AgentText(String),
580 AgentThinking(String),
581 ToolCall(acp::ToolCall),
582 ToolCallUpdate(acp_thread::ToolCallUpdate),
583 ToolCallAuthorization(ToolCallAuthorization),
584 Retry(acp_thread::RetryStatus),
585 Stop(acp::StopReason),
586}
587
588#[derive(Debug)]
589pub struct NewTerminal {
590 pub command: String,
591 pub output_byte_limit: Option<u64>,
592 pub cwd: Option<PathBuf>,
593 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
594}
595
596#[derive(Debug)]
597pub struct ToolCallAuthorization {
598 pub tool_call: acp::ToolCallUpdate,
599 pub options: Vec<acp::PermissionOption>,
600 pub response: oneshot::Sender<acp::PermissionOptionId>,
601}
602
603#[derive(Debug, thiserror::Error)]
604enum CompletionError {
605 #[error("max tokens")]
606 MaxTokens,
607 #[error("refusal")]
608 Refusal,
609 #[error(transparent)]
610 Other(#[from] anyhow::Error),
611}
612
613pub struct Thread {
614 id: acp::SessionId,
615 prompt_id: PromptId,
616 updated_at: DateTime<Utc>,
617 title: Option<SharedString>,
618 pending_title_generation: Option<Task<()>>,
619 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
620 summary: Option<SharedString>,
621 messages: Vec<Message>,
622 user_store: Entity<UserStore>,
623 completion_mode: CompletionMode,
624 /// Holds the task that handles agent interaction until the end of the turn.
625 /// Survives across multiple requests as the model performs tool calls and
626 /// we run tools, report their results.
627 running_turn: Option<RunningTurn>,
628 pending_message: Option<AgentMessage>,
629 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
630 tool_use_limit_reached: bool,
631 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
632 #[allow(unused)]
633 cumulative_token_usage: TokenUsage,
634 #[allow(unused)]
635 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
636 context_server_registry: Entity<ContextServerRegistry>,
637 profile_id: AgentProfileId,
638 project_context: Entity<ProjectContext>,
639 templates: Arc<Templates>,
640 model: Option<Arc<dyn LanguageModel>>,
641 summarization_model: Option<Arc<dyn LanguageModel>>,
642 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
643 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
644 pub(crate) project: Entity<Project>,
645 pub(crate) action_log: Entity<ActionLog>,
646 /// Tracks the last time files were read by the agent, to detect external modifications
647 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
648 /// True if this thread was imported from a shared thread and can be synced.
649 imported: bool,
650 /// If this is a subagent thread, contains context about the parent
651 subagent_context: Option<SubagentContext>,
652 /// Weak references to running subagent threads for cancellation propagation
653 running_subagents: Vec<WeakEntity<Thread>>,
654}
655
656impl Thread {
657 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
658 let image = model.map_or(true, |model| model.supports_images());
659 acp::PromptCapabilities::new()
660 .image(image)
661 .embedded_context(true)
662 }
663
664 pub fn new(
665 project: Entity<Project>,
666 project_context: Entity<ProjectContext>,
667 context_server_registry: Entity<ContextServerRegistry>,
668 templates: Arc<Templates>,
669 model: Option<Arc<dyn LanguageModel>>,
670 cx: &mut Context<Self>,
671 ) -> Self {
672 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
673 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
674 let (prompt_capabilities_tx, prompt_capabilities_rx) =
675 watch::channel(Self::prompt_capabilities(model.as_deref()));
676 Self {
677 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
678 prompt_id: PromptId::new(),
679 updated_at: Utc::now(),
680 title: None,
681 pending_title_generation: None,
682 pending_summary_generation: None,
683 summary: None,
684 messages: Vec::new(),
685 user_store: project.read(cx).user_store(),
686 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
687 running_turn: None,
688 pending_message: None,
689 tools: BTreeMap::default(),
690 tool_use_limit_reached: false,
691 request_token_usage: HashMap::default(),
692 cumulative_token_usage: TokenUsage::default(),
693 initial_project_snapshot: {
694 let project_snapshot = Self::project_snapshot(project.clone(), cx);
695 cx.foreground_executor()
696 .spawn(async move { Some(project_snapshot.await) })
697 .shared()
698 },
699 context_server_registry,
700 profile_id,
701 project_context,
702 templates,
703 model,
704 summarization_model: None,
705 prompt_capabilities_tx,
706 prompt_capabilities_rx,
707 project,
708 action_log,
709 file_read_times: HashMap::default(),
710 imported: false,
711 subagent_context: None,
712 running_subagents: Vec::new(),
713 }
714 }
715
716 pub fn new_subagent(
717 project: Entity<Project>,
718 project_context: Entity<ProjectContext>,
719 context_server_registry: Entity<ContextServerRegistry>,
720 templates: Arc<Templates>,
721 model: Arc<dyn LanguageModel>,
722 subagent_context: SubagentContext,
723 parent_tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
724 cx: &mut Context<Self>,
725 ) -> Self {
726 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
727 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
728 let (prompt_capabilities_tx, prompt_capabilities_rx) =
729 watch::channel(Self::prompt_capabilities(Some(model.as_ref())));
730 Self {
731 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
732 prompt_id: PromptId::new(),
733 updated_at: Utc::now(),
734 title: None,
735 pending_title_generation: None,
736 pending_summary_generation: None,
737 summary: None,
738 messages: Vec::new(),
739 user_store: project.read(cx).user_store(),
740 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
741 running_turn: None,
742 pending_message: None,
743 tools: parent_tools,
744 tool_use_limit_reached: false,
745 request_token_usage: HashMap::default(),
746 cumulative_token_usage: TokenUsage::default(),
747 initial_project_snapshot: Task::ready(None).shared(),
748 context_server_registry,
749 profile_id,
750 project_context,
751 templates,
752 model: Some(model),
753 summarization_model: None,
754 prompt_capabilities_tx,
755 prompt_capabilities_rx,
756 project,
757 action_log,
758 file_read_times: HashMap::default(),
759 imported: false,
760 subagent_context: Some(subagent_context),
761 running_subagents: Vec::new(),
762 }
763 }
764
765 pub fn id(&self) -> &acp::SessionId {
766 &self.id
767 }
768
769 /// Returns true if this thread was imported from a shared thread.
770 pub fn is_imported(&self) -> bool {
771 self.imported
772 }
773
774 pub fn replay(
775 &mut self,
776 cx: &mut Context<Self>,
777 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
778 let (tx, rx) = mpsc::unbounded();
779 let stream = ThreadEventStream(tx);
780 for message in &self.messages {
781 match message {
782 Message::User(user_message) => stream.send_user_message(user_message),
783 Message::Agent(assistant_message) => {
784 for content in &assistant_message.content {
785 match content {
786 AgentMessageContent::Text(text) => stream.send_text(text),
787 AgentMessageContent::Thinking { text, .. } => {
788 stream.send_thinking(text)
789 }
790 AgentMessageContent::RedactedThinking(_) => {}
791 AgentMessageContent::ToolUse(tool_use) => {
792 self.replay_tool_call(
793 tool_use,
794 assistant_message.tool_results.get(&tool_use.id),
795 &stream,
796 cx,
797 );
798 }
799 }
800 }
801 }
802 Message::Resume => {}
803 }
804 }
805 rx
806 }
807
808 fn replay_tool_call(
809 &self,
810 tool_use: &LanguageModelToolUse,
811 tool_result: Option<&LanguageModelToolResult>,
812 stream: &ThreadEventStream,
813 cx: &mut Context<Self>,
814 ) {
815 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
816 self.context_server_registry
817 .read(cx)
818 .servers()
819 .find_map(|(_, tools)| {
820 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
821 Some(tool.clone())
822 } else {
823 None
824 }
825 })
826 });
827
828 let Some(tool) = tool else {
829 stream
830 .0
831 .unbounded_send(Ok(ThreadEvent::ToolCall(
832 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
833 .status(acp::ToolCallStatus::Failed)
834 .raw_input(tool_use.input.clone()),
835 )))
836 .ok();
837 return;
838 };
839
840 let title = tool.initial_title(tool_use.input.clone(), cx);
841 let kind = tool.kind();
842 stream.send_tool_call(
843 &tool_use.id,
844 &tool_use.name,
845 title,
846 kind,
847 tool_use.input.clone(),
848 );
849
850 let output = tool_result
851 .as_ref()
852 .and_then(|result| result.output.clone());
853 if let Some(output) = output.clone() {
854 // For replay, we use a dummy cancellation receiver since the tool already completed
855 let (_cancellation_tx, cancellation_rx) = watch::channel(false);
856 let tool_event_stream = ToolCallEventStream::new(
857 tool_use.id.clone(),
858 stream.clone(),
859 Some(self.project.read(cx).fs().clone()),
860 cancellation_rx,
861 );
862 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
863 .log_err();
864 }
865
866 stream.update_tool_call_fields(
867 &tool_use.id,
868 acp::ToolCallUpdateFields::new()
869 .status(
870 tool_result
871 .as_ref()
872 .map_or(acp::ToolCallStatus::Failed, |result| {
873 if result.is_error {
874 acp::ToolCallStatus::Failed
875 } else {
876 acp::ToolCallStatus::Completed
877 }
878 }),
879 )
880 .raw_output(output),
881 );
882 }
883
884 pub fn from_db(
885 id: acp::SessionId,
886 db_thread: DbThread,
887 project: Entity<Project>,
888 project_context: Entity<ProjectContext>,
889 context_server_registry: Entity<ContextServerRegistry>,
890 templates: Arc<Templates>,
891 cx: &mut Context<Self>,
892 ) -> Self {
893 let profile_id = db_thread
894 .profile
895 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
896
897 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
898 db_thread
899 .model
900 .and_then(|model| {
901 let model = SelectedModel {
902 provider: model.provider.clone().into(),
903 model: model.model.into(),
904 };
905 registry.select_model(&model, cx)
906 })
907 .or_else(|| registry.default_model())
908 .map(|model| model.model)
909 });
910
911 if model.is_none() {
912 model = Self::resolve_profile_model(&profile_id, cx);
913 }
914 if model.is_none() {
915 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
916 registry.default_model().map(|model| model.model)
917 });
918 }
919
920 let (prompt_capabilities_tx, prompt_capabilities_rx) =
921 watch::channel(Self::prompt_capabilities(model.as_deref()));
922
923 let action_log = cx.new(|_| ActionLog::new(project.clone()));
924
925 Self {
926 id,
927 prompt_id: PromptId::new(),
928 title: if db_thread.title.is_empty() {
929 None
930 } else {
931 Some(db_thread.title.clone())
932 },
933 pending_title_generation: None,
934 pending_summary_generation: None,
935 summary: db_thread.detailed_summary,
936 messages: db_thread.messages,
937 user_store: project.read(cx).user_store(),
938 completion_mode: db_thread.completion_mode.unwrap_or_default(),
939 running_turn: None,
940 pending_message: None,
941 tools: BTreeMap::default(),
942 tool_use_limit_reached: false,
943 request_token_usage: db_thread.request_token_usage.clone(),
944 cumulative_token_usage: db_thread.cumulative_token_usage,
945 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
946 context_server_registry,
947 profile_id,
948 project_context,
949 templates,
950 model,
951 summarization_model: None,
952 project,
953 action_log,
954 updated_at: db_thread.updated_at,
955 prompt_capabilities_tx,
956 prompt_capabilities_rx,
957 file_read_times: HashMap::default(),
958 imported: db_thread.imported,
959 subagent_context: None,
960 running_subagents: Vec::new(),
961 }
962 }
963
964 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
965 let initial_project_snapshot = self.initial_project_snapshot.clone();
966 let mut thread = DbThread {
967 title: self.title(),
968 messages: self.messages.clone(),
969 updated_at: self.updated_at,
970 detailed_summary: self.summary.clone(),
971 initial_project_snapshot: None,
972 cumulative_token_usage: self.cumulative_token_usage,
973 request_token_usage: self.request_token_usage.clone(),
974 model: self.model.as_ref().map(|model| DbLanguageModel {
975 provider: model.provider_id().to_string(),
976 model: model.name().0.to_string(),
977 }),
978 completion_mode: Some(self.completion_mode),
979 profile: Some(self.profile_id.clone()),
980 imported: self.imported,
981 };
982
983 cx.background_spawn(async move {
984 let initial_project_snapshot = initial_project_snapshot.await;
985 thread.initial_project_snapshot = initial_project_snapshot;
986 thread
987 })
988 }
989
990 /// Create a snapshot of the current project state including git information and unsaved buffers.
991 fn project_snapshot(
992 project: Entity<Project>,
993 cx: &mut Context<Self>,
994 ) -> Task<Arc<ProjectSnapshot>> {
995 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
996 cx.spawn(async move |_, _| {
997 let snapshot = task.await;
998
999 Arc::new(ProjectSnapshot {
1000 worktree_snapshots: snapshot.worktree_snapshots,
1001 timestamp: Utc::now(),
1002 })
1003 })
1004 }
1005
1006 pub fn project_context(&self) -> &Entity<ProjectContext> {
1007 &self.project_context
1008 }
1009
1010 pub fn project(&self) -> &Entity<Project> {
1011 &self.project
1012 }
1013
1014 pub fn action_log(&self) -> &Entity<ActionLog> {
1015 &self.action_log
1016 }
1017
1018 pub fn is_empty(&self) -> bool {
1019 self.messages.is_empty() && self.title.is_none()
1020 }
1021
1022 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
1023 self.model.as_ref()
1024 }
1025
1026 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
1027 let old_usage = self.latest_token_usage();
1028 self.model = Some(model);
1029 let new_caps = Self::prompt_capabilities(self.model.as_deref());
1030 let new_usage = self.latest_token_usage();
1031 if old_usage != new_usage {
1032 cx.emit(TokenUsageUpdated(new_usage));
1033 }
1034 self.prompt_capabilities_tx.send(new_caps).log_err();
1035 cx.notify()
1036 }
1037
1038 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
1039 self.summarization_model.as_ref()
1040 }
1041
1042 pub fn set_summarization_model(
1043 &mut self,
1044 model: Option<Arc<dyn LanguageModel>>,
1045 cx: &mut Context<Self>,
1046 ) {
1047 self.summarization_model = model;
1048 cx.notify()
1049 }
1050
1051 pub fn completion_mode(&self) -> CompletionMode {
1052 self.completion_mode
1053 }
1054
1055 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
1056 let old_usage = self.latest_token_usage();
1057 self.completion_mode = mode;
1058 let new_usage = self.latest_token_usage();
1059 if old_usage != new_usage {
1060 cx.emit(TokenUsageUpdated(new_usage));
1061 }
1062 cx.notify()
1063 }
1064
1065 pub fn last_message(&self) -> Option<Message> {
1066 if let Some(message) = self.pending_message.clone() {
1067 Some(Message::Agent(message))
1068 } else {
1069 self.messages.last().cloned()
1070 }
1071 }
1072
1073 pub fn add_default_tools(
1074 &mut self,
1075 environment: Rc<dyn ThreadEnvironment>,
1076 cx: &mut Context<Self>,
1077 ) {
1078 let language_registry = self.project.read(cx).languages().clone();
1079 self.add_tool(CopyPathTool::new(self.project.clone()));
1080 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
1081 self.add_tool(DeletePathTool::new(
1082 self.project.clone(),
1083 self.action_log.clone(),
1084 ));
1085 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1086 self.add_tool(EditFileTool::new(
1087 self.project.clone(),
1088 cx.weak_entity(),
1089 language_registry,
1090 Templates::new(),
1091 ));
1092 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1093 self.add_tool(FindPathTool::new(self.project.clone()));
1094 self.add_tool(GrepTool::new(self.project.clone()));
1095 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1096 self.add_tool(MovePathTool::new(self.project.clone()));
1097 self.add_tool(NowTool);
1098 self.add_tool(OpenTool::new(self.project.clone()));
1099 self.add_tool(ReadFileTool::new(
1100 cx.weak_entity(),
1101 self.project.clone(),
1102 self.action_log.clone(),
1103 ));
1104 self.add_tool(SaveFileTool::new(self.project.clone()));
1105 self.add_tool(RestoreFileFromDiskTool::new(self.project.clone()));
1106 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1107 self.add_tool(ThinkingTool);
1108 self.add_tool(WebSearchTool);
1109
1110 if cx.has_flag::<SubagentsFeatureFlag>() && self.depth() < MAX_SUBAGENT_DEPTH {
1111 let parent_tools = self.tools.clone();
1112 self.add_tool(SubagentTool::new(
1113 cx.weak_entity(),
1114 self.project.clone(),
1115 self.project_context.clone(),
1116 self.context_server_registry.clone(),
1117 self.templates.clone(),
1118 self.depth(),
1119 parent_tools,
1120 ));
1121 }
1122 }
1123
1124 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1125 self.tools.insert(T::name().into(), tool.erase());
1126 }
1127
1128 pub fn remove_tool(&mut self, name: &str) -> bool {
1129 self.tools.remove(name).is_some()
1130 }
1131
1132 pub fn restrict_tools(&mut self, allowed: &collections::HashSet<SharedString>) {
1133 self.tools.retain(|name, _| allowed.contains(name));
1134 }
1135
1136 pub fn profile(&self) -> &AgentProfileId {
1137 &self.profile_id
1138 }
1139
1140 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1141 if self.profile_id == profile_id {
1142 return;
1143 }
1144
1145 self.profile_id = profile_id;
1146
1147 // Swap to the profile's preferred model when available.
1148 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1149 self.set_model(model, cx);
1150 }
1151 }
1152
1153 pub fn cancel(&mut self, cx: &mut Context<Self>) -> Task<()> {
1154 for subagent in self.running_subagents.drain(..) {
1155 if let Some(subagent) = subagent.upgrade() {
1156 subagent.update(cx, |thread, cx| thread.cancel(cx)).detach();
1157 }
1158 }
1159
1160 let Some(running_turn) = self.running_turn.take() else {
1161 self.flush_pending_message(cx);
1162 return Task::ready(());
1163 };
1164
1165 let turn_task = running_turn.cancel();
1166
1167 cx.spawn(async move |this, cx| {
1168 turn_task.await;
1169 this.update(cx, |this, cx| {
1170 this.flush_pending_message(cx);
1171 })
1172 .ok();
1173 })
1174 }
1175
1176 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1177 let Some(last_user_message) = self.last_user_message() else {
1178 return;
1179 };
1180
1181 self.request_token_usage
1182 .insert(last_user_message.id.clone(), update);
1183 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1184 cx.notify();
1185 }
1186
1187 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1188 self.cancel(cx).detach();
1189 // Clear pending message since cancel will try to flush it asynchronously,
1190 // and we don't want that content to be added after we truncate
1191 self.pending_message.take();
1192 let Some(position) = self.messages.iter().position(
1193 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1194 ) else {
1195 return Err(anyhow!("Message not found"));
1196 };
1197
1198 for message in self.messages.drain(position..) {
1199 match message {
1200 Message::User(message) => {
1201 self.request_token_usage.remove(&message.id);
1202 }
1203 Message::Agent(_) | Message::Resume => {}
1204 }
1205 }
1206 self.clear_summary();
1207 cx.notify();
1208 Ok(())
1209 }
1210
1211 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1212 let last_user_message = self.last_user_message()?;
1213 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1214 Some(*tokens)
1215 }
1216
1217 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1218 let usage = self.latest_request_token_usage()?;
1219 let model = self.model.clone()?;
1220 Some(acp_thread::TokenUsage {
1221 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1222 used_tokens: usage.total_tokens(),
1223 output_tokens: usage.output_tokens,
1224 })
1225 }
1226
1227 /// Get the total input token count as of the message before the given message.
1228 ///
1229 /// Returns `None` if:
1230 /// - `target_id` is the first message (no previous message)
1231 /// - The previous message hasn't received a response yet (no usage data)
1232 /// - `target_id` is not found in the messages
1233 pub fn tokens_before_message(&self, target_id: &UserMessageId) -> Option<u64> {
1234 let mut previous_user_message_id: Option<&UserMessageId> = None;
1235
1236 for message in &self.messages {
1237 if let Message::User(user_msg) = message {
1238 if &user_msg.id == target_id {
1239 let prev_id = previous_user_message_id?;
1240 let usage = self.request_token_usage.get(prev_id)?;
1241 return Some(usage.input_tokens);
1242 }
1243 previous_user_message_id = Some(&user_msg.id);
1244 }
1245 }
1246 None
1247 }
1248
1249 /// Look up the active profile and resolve its preferred model if one is configured.
1250 fn resolve_profile_model(
1251 profile_id: &AgentProfileId,
1252 cx: &mut Context<Self>,
1253 ) -> Option<Arc<dyn LanguageModel>> {
1254 let selection = AgentSettings::get_global(cx)
1255 .profiles
1256 .get(profile_id)?
1257 .default_model
1258 .clone()?;
1259 Self::resolve_model_from_selection(&selection, cx)
1260 }
1261
1262 /// Translate a stored model selection into the configured model from the registry.
1263 fn resolve_model_from_selection(
1264 selection: &LanguageModelSelection,
1265 cx: &mut Context<Self>,
1266 ) -> Option<Arc<dyn LanguageModel>> {
1267 let selected = SelectedModel {
1268 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1269 model: LanguageModelId::from(selection.model.clone()),
1270 };
1271 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1272 registry
1273 .select_model(&selected, cx)
1274 .map(|configured| configured.model)
1275 })
1276 }
1277
1278 pub fn resume(
1279 &mut self,
1280 cx: &mut Context<Self>,
1281 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1282 self.messages.push(Message::Resume);
1283 cx.notify();
1284
1285 log::debug!("Total messages in thread: {}", self.messages.len());
1286 self.run_turn(cx)
1287 }
1288
1289 /// Sending a message results in the model streaming a response, which could include tool calls.
1290 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1291 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1292 pub fn send<T>(
1293 &mut self,
1294 id: UserMessageId,
1295 content: impl IntoIterator<Item = T>,
1296 cx: &mut Context<Self>,
1297 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1298 where
1299 T: Into<UserMessageContent>,
1300 {
1301 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1302 log::debug!("Thread::send content: {:?}", content);
1303
1304 self.messages
1305 .push(Message::User(UserMessage { id, content }));
1306 cx.notify();
1307
1308 self.send_existing(cx)
1309 }
1310
1311 pub fn send_existing(
1312 &mut self,
1313 cx: &mut Context<Self>,
1314 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1315 let model = self.model().context("No language model configured")?;
1316
1317 log::info!("Thread::send called with model: {}", model.name().0);
1318 self.advance_prompt_id();
1319
1320 log::debug!("Total messages in thread: {}", self.messages.len());
1321 self.run_turn(cx)
1322 }
1323
1324 pub fn push_acp_user_block(
1325 &mut self,
1326 id: UserMessageId,
1327 blocks: impl IntoIterator<Item = acp::ContentBlock>,
1328 path_style: PathStyle,
1329 cx: &mut Context<Self>,
1330 ) {
1331 let content = blocks
1332 .into_iter()
1333 .map(|block| UserMessageContent::from_content_block(block, path_style))
1334 .collect::<Vec<_>>();
1335 self.messages
1336 .push(Message::User(UserMessage { id, content }));
1337 cx.notify();
1338 }
1339
1340 pub fn push_acp_agent_block(&mut self, block: acp::ContentBlock, cx: &mut Context<Self>) {
1341 let text = match block {
1342 acp::ContentBlock::Text(text_content) => text_content.text,
1343 acp::ContentBlock::Image(_) => "[image]".to_string(),
1344 acp::ContentBlock::Audio(_) => "[audio]".to_string(),
1345 acp::ContentBlock::ResourceLink(resource_link) => resource_link.uri,
1346 acp::ContentBlock::Resource(resource) => match resource.resource {
1347 acp::EmbeddedResourceResource::TextResourceContents(resource) => resource.uri,
1348 acp::EmbeddedResourceResource::BlobResourceContents(resource) => resource.uri,
1349 _ => "[resource]".to_string(),
1350 },
1351 _ => "[unknown]".to_string(),
1352 };
1353
1354 self.messages.push(Message::Agent(AgentMessage {
1355 content: vec![AgentMessageContent::Text(text)],
1356 ..Default::default()
1357 }));
1358 cx.notify();
1359 }
1360
1361 #[cfg(feature = "eval")]
1362 pub fn proceed(
1363 &mut self,
1364 cx: &mut Context<Self>,
1365 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1366 self.run_turn(cx)
1367 }
1368
1369 fn run_turn(
1370 &mut self,
1371 cx: &mut Context<Self>,
1372 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1373 // Flush the old pending message synchronously before cancelling,
1374 // to avoid a race where the detached cancel task might flush the NEW
1375 // turn's pending message instead of the old one.
1376 self.flush_pending_message(cx);
1377 self.cancel(cx).detach();
1378
1379 let model = self.model.clone().context("No language model configured")?;
1380 let profile = AgentSettings::get_global(cx)
1381 .profiles
1382 .get(&self.profile_id)
1383 .context("Profile not found")?;
1384 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1385 let event_stream = ThreadEventStream(events_tx);
1386 let message_ix = self.messages.len().saturating_sub(1);
1387 self.tool_use_limit_reached = false;
1388 self.clear_summary();
1389 let (cancellation_tx, mut cancellation_rx) = watch::channel(false);
1390 self.running_turn = Some(RunningTurn {
1391 event_stream: event_stream.clone(),
1392 tools: self.enabled_tools(profile, &model, cx),
1393 cancellation_tx,
1394 _task: cx.spawn(async move |this, cx| {
1395 log::debug!("Starting agent turn execution");
1396
1397 let turn_result = Self::run_turn_internal(
1398 &this,
1399 model,
1400 &event_stream,
1401 cancellation_rx.clone(),
1402 cx,
1403 )
1404 .await;
1405
1406 // Check if we were cancelled - if so, cancel() already took running_turn
1407 // and we shouldn't touch it (it might be a NEW turn now)
1408 let was_cancelled = *cancellation_rx.borrow();
1409 if was_cancelled {
1410 log::debug!("Turn was cancelled, skipping cleanup");
1411 return;
1412 }
1413
1414 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1415
1416 match turn_result {
1417 Ok(()) => {
1418 log::debug!("Turn execution completed");
1419 event_stream.send_stop(acp::StopReason::EndTurn);
1420 }
1421 Err(error) => {
1422 log::error!("Turn execution failed: {:?}", error);
1423 match error.downcast::<CompletionError>() {
1424 Ok(CompletionError::Refusal) => {
1425 event_stream.send_stop(acp::StopReason::Refusal);
1426 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1427 }
1428 Ok(CompletionError::MaxTokens) => {
1429 event_stream.send_stop(acp::StopReason::MaxTokens);
1430 }
1431 Ok(CompletionError::Other(error)) | Err(error) => {
1432 event_stream.send_error(error);
1433 }
1434 }
1435 }
1436 }
1437
1438 _ = this.update(cx, |this, _| this.running_turn.take());
1439 }),
1440 });
1441 Ok(events_rx)
1442 }
1443
1444 async fn run_turn_internal(
1445 this: &WeakEntity<Self>,
1446 model: Arc<dyn LanguageModel>,
1447 event_stream: &ThreadEventStream,
1448 mut cancellation_rx: watch::Receiver<bool>,
1449 cx: &mut AsyncApp,
1450 ) -> Result<()> {
1451 let mut attempt = 0;
1452 let mut intent = CompletionIntent::UserPrompt;
1453 loop {
1454 let request =
1455 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1456
1457 telemetry::event!(
1458 "Agent Thread Completion",
1459 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1460 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1461 model = model.telemetry_id(),
1462 model_provider = model.provider_id().to_string(),
1463 attempt
1464 );
1465
1466 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1467
1468 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1469 Ok(events) => (events, None),
1470 Err(err) => (stream::empty().boxed(), Some(err)),
1471 };
1472 let mut tool_results = FuturesUnordered::new();
1473 let mut cancelled = false;
1474 loop {
1475 // Race between getting the first event and cancellation
1476 let first_event = futures::select! {
1477 event = events.next().fuse() => event,
1478 _ = cancellation_rx.changed().fuse() => {
1479 if *cancellation_rx.borrow() {
1480 cancelled = true;
1481 break;
1482 }
1483 continue;
1484 }
1485 };
1486 let Some(first_event) = first_event else {
1487 break;
1488 };
1489
1490 // Collect all immediately available events to process as a batch
1491 let mut batch = vec![first_event];
1492 while let Some(event) = events.next().now_or_never().flatten() {
1493 batch.push(event);
1494 }
1495
1496 // Process the batch in a single update
1497 let batch_result = this.update(cx, |this, cx| {
1498 let mut batch_tool_results = Vec::new();
1499 let mut batch_error = None;
1500
1501 for event in batch {
1502 log::trace!("Received completion event: {:?}", event);
1503 match event {
1504 Ok(event) => {
1505 match this.handle_completion_event(
1506 event,
1507 event_stream,
1508 cancellation_rx.clone(),
1509 cx,
1510 ) {
1511 Ok(Some(task)) => batch_tool_results.push(task),
1512 Ok(None) => {}
1513 Err(err) => {
1514 batch_error = Some(err);
1515 break;
1516 }
1517 }
1518 }
1519 Err(err) => {
1520 batch_error = Some(err.into());
1521 break;
1522 }
1523 }
1524 }
1525
1526 cx.notify();
1527 (batch_tool_results, batch_error)
1528 })?;
1529
1530 tool_results.extend(batch_result.0);
1531 if let Some(err) = batch_result.1 {
1532 error = Some(err.downcast()?);
1533 break;
1534 }
1535 }
1536
1537 let end_turn = tool_results.is_empty();
1538 while let Some(tool_result) = tool_results.next().await {
1539 log::debug!("Tool finished {:?}", tool_result);
1540
1541 event_stream.update_tool_call_fields(
1542 &tool_result.tool_use_id,
1543 acp::ToolCallUpdateFields::new()
1544 .status(if tool_result.is_error {
1545 acp::ToolCallStatus::Failed
1546 } else {
1547 acp::ToolCallStatus::Completed
1548 })
1549 .raw_output(tool_result.output.clone()),
1550 );
1551 this.update(cx, |this, _cx| {
1552 this.pending_message()
1553 .tool_results
1554 .insert(tool_result.tool_use_id.clone(), tool_result);
1555 })?;
1556 }
1557
1558 this.update(cx, |this, cx| {
1559 this.flush_pending_message(cx);
1560 if this.title.is_none() && this.pending_title_generation.is_none() {
1561 this.generate_title(cx);
1562 }
1563 })?;
1564
1565 if cancelled {
1566 log::debug!("Turn cancelled by user, exiting");
1567 return Ok(());
1568 }
1569
1570 if let Some(error) = error {
1571 attempt += 1;
1572 let retry = this.update(cx, |this, cx| {
1573 let user_store = this.user_store.read(cx);
1574 this.handle_completion_error(error, attempt, user_store.plan())
1575 })??;
1576 let timer = cx.background_executor().timer(retry.duration);
1577 event_stream.send_retry(retry);
1578 timer.await;
1579 this.update(cx, |this, _cx| {
1580 if let Some(Message::Agent(message)) = this.messages.last() {
1581 if message.tool_results.is_empty() {
1582 intent = CompletionIntent::UserPrompt;
1583 this.messages.push(Message::Resume);
1584 }
1585 }
1586 })?;
1587 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1588 return Err(language_model::ToolUseLimitReachedError.into());
1589 } else if end_turn {
1590 return Ok(());
1591 } else {
1592 intent = CompletionIntent::ToolResults;
1593 attempt = 0;
1594 }
1595 }
1596 }
1597
1598 fn handle_completion_error(
1599 &mut self,
1600 error: LanguageModelCompletionError,
1601 attempt: u8,
1602 plan: Option<Plan>,
1603 ) -> Result<acp_thread::RetryStatus> {
1604 let Some(model) = self.model.as_ref() else {
1605 return Err(anyhow!(error));
1606 };
1607
1608 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1609 match plan {
1610 Some(Plan::V2(_)) => true,
1611 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1612 None => false,
1613 }
1614 } else {
1615 true
1616 };
1617
1618 if !auto_retry {
1619 return Err(anyhow!(error));
1620 }
1621
1622 let Some(strategy) = Self::retry_strategy_for(&error) else {
1623 return Err(anyhow!(error));
1624 };
1625
1626 let max_attempts = match &strategy {
1627 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1628 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1629 };
1630
1631 if attempt > max_attempts {
1632 return Err(anyhow!(error));
1633 }
1634
1635 let delay = match &strategy {
1636 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1637 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1638 Duration::from_secs(delay_secs)
1639 }
1640 RetryStrategy::Fixed { delay, .. } => *delay,
1641 };
1642 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1643
1644 Ok(acp_thread::RetryStatus {
1645 last_error: error.to_string().into(),
1646 attempt: attempt as usize,
1647 max_attempts: max_attempts as usize,
1648 started_at: Instant::now(),
1649 duration: delay,
1650 })
1651 }
1652
1653 /// A helper method that's called on every streamed completion event.
1654 /// Returns an optional tool result task, which the main agentic loop will
1655 /// send back to the model when it resolves.
1656 fn handle_completion_event(
1657 &mut self,
1658 event: LanguageModelCompletionEvent,
1659 event_stream: &ThreadEventStream,
1660 cancellation_rx: watch::Receiver<bool>,
1661 cx: &mut Context<Self>,
1662 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1663 log::trace!("Handling streamed completion event: {:?}", event);
1664 use LanguageModelCompletionEvent::*;
1665
1666 match event {
1667 StartMessage { .. } => {
1668 self.flush_pending_message(cx);
1669 self.pending_message = Some(AgentMessage::default());
1670 }
1671 Text(new_text) => self.handle_text_event(new_text, event_stream),
1672 Thinking { text, signature } => {
1673 self.handle_thinking_event(text, signature, event_stream)
1674 }
1675 RedactedThinking { data } => self.handle_redacted_thinking_event(data),
1676 ReasoningDetails(details) => {
1677 let last_message = self.pending_message();
1678 // Store the last non-empty reasoning_details (overwrites earlier ones)
1679 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1680 if let serde_json::Value::Array(ref arr) = details {
1681 if !arr.is_empty() {
1682 last_message.reasoning_details = Some(details);
1683 }
1684 } else {
1685 last_message.reasoning_details = Some(details);
1686 }
1687 }
1688 ToolUse(tool_use) => {
1689 return Ok(self.handle_tool_use_event(tool_use, event_stream, cancellation_rx, cx));
1690 }
1691 ToolUseJsonParseError {
1692 id,
1693 tool_name,
1694 raw_input,
1695 json_parse_error,
1696 } => {
1697 return Ok(Some(Task::ready(
1698 self.handle_tool_use_json_parse_error_event(
1699 id,
1700 tool_name,
1701 raw_input,
1702 json_parse_error,
1703 ),
1704 )));
1705 }
1706 UsageUpdate(usage) => {
1707 telemetry::event!(
1708 "Agent Thread Completion Usage Updated",
1709 thread_id = self.id.to_string(),
1710 prompt_id = self.prompt_id.to_string(),
1711 model = self.model.as_ref().map(|m| m.telemetry_id()),
1712 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1713 input_tokens = usage.input_tokens,
1714 output_tokens = usage.output_tokens,
1715 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1716 cache_read_input_tokens = usage.cache_read_input_tokens,
1717 );
1718 self.update_token_usage(usage, cx);
1719 }
1720 UsageUpdated { amount, limit } => {
1721 self.update_model_request_usage(amount, limit, cx);
1722 }
1723 ToolUseLimitReached => {
1724 self.tool_use_limit_reached = true;
1725 }
1726 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1727 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1728 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1729 Started | Queued { .. } => {}
1730 }
1731
1732 Ok(None)
1733 }
1734
1735 fn handle_text_event(&mut self, new_text: String, event_stream: &ThreadEventStream) {
1736 event_stream.send_text(&new_text);
1737
1738 let last_message = self.pending_message();
1739 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1740 text.push_str(&new_text);
1741 } else {
1742 last_message
1743 .content
1744 .push(AgentMessageContent::Text(new_text));
1745 }
1746 }
1747
1748 fn handle_thinking_event(
1749 &mut self,
1750 new_text: String,
1751 new_signature: Option<String>,
1752 event_stream: &ThreadEventStream,
1753 ) {
1754 event_stream.send_thinking(&new_text);
1755
1756 let last_message = self.pending_message();
1757 if let Some(AgentMessageContent::Thinking { text, signature }) =
1758 last_message.content.last_mut()
1759 {
1760 text.push_str(&new_text);
1761 *signature = new_signature.or(signature.take());
1762 } else {
1763 last_message.content.push(AgentMessageContent::Thinking {
1764 text: new_text,
1765 signature: new_signature,
1766 });
1767 }
1768 }
1769
1770 fn handle_redacted_thinking_event(&mut self, data: String) {
1771 let last_message = self.pending_message();
1772 last_message
1773 .content
1774 .push(AgentMessageContent::RedactedThinking(data));
1775 }
1776
1777 fn handle_tool_use_event(
1778 &mut self,
1779 tool_use: LanguageModelToolUse,
1780 event_stream: &ThreadEventStream,
1781 cancellation_rx: watch::Receiver<bool>,
1782 cx: &mut Context<Self>,
1783 ) -> Option<Task<LanguageModelToolResult>> {
1784 cx.notify();
1785
1786 let tool = self.tool(tool_use.name.as_ref());
1787 let mut title = SharedString::from(&tool_use.name);
1788 let mut kind = acp::ToolKind::Other;
1789 if let Some(tool) = tool.as_ref() {
1790 title = tool.initial_title(tool_use.input.clone(), cx);
1791 kind = tool.kind();
1792 }
1793
1794 // Ensure the last message ends in the current tool use
1795 let last_message = self.pending_message();
1796 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1797 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1798 if last_tool_use.id == tool_use.id {
1799 *last_tool_use = tool_use.clone();
1800 false
1801 } else {
1802 true
1803 }
1804 } else {
1805 true
1806 }
1807 });
1808
1809 if push_new_tool_use {
1810 event_stream.send_tool_call(
1811 &tool_use.id,
1812 &tool_use.name,
1813 title,
1814 kind,
1815 tool_use.input.clone(),
1816 );
1817 last_message
1818 .content
1819 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1820 } else {
1821 event_stream.update_tool_call_fields(
1822 &tool_use.id,
1823 acp::ToolCallUpdateFields::new()
1824 .title(title.as_str())
1825 .kind(kind)
1826 .raw_input(tool_use.input.clone()),
1827 );
1828 }
1829
1830 if !tool_use.is_input_complete {
1831 return None;
1832 }
1833
1834 let Some(tool) = tool else {
1835 let content = format!("No tool named {} exists", tool_use.name);
1836 return Some(Task::ready(LanguageModelToolResult {
1837 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1838 tool_use_id: tool_use.id,
1839 tool_name: tool_use.name,
1840 is_error: true,
1841 output: None,
1842 }));
1843 };
1844
1845 let fs = self.project.read(cx).fs().clone();
1846 let tool_event_stream = ToolCallEventStream::new(
1847 tool_use.id.clone(),
1848 event_stream.clone(),
1849 Some(fs),
1850 cancellation_rx,
1851 );
1852 tool_event_stream.update_fields(
1853 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1854 );
1855 let supports_images = self.model().is_some_and(|model| model.supports_images());
1856 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1857 log::debug!("Running tool {}", tool_use.name);
1858 Some(cx.foreground_executor().spawn(async move {
1859 let tool_result = tool_result.await.and_then(|output| {
1860 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1861 && !supports_images
1862 {
1863 return Err(anyhow!(
1864 "Attempted to read an image, but this model doesn't support it.",
1865 ));
1866 }
1867 Ok(output)
1868 });
1869
1870 match tool_result {
1871 Ok(output) => LanguageModelToolResult {
1872 tool_use_id: tool_use.id,
1873 tool_name: tool_use.name,
1874 is_error: false,
1875 content: output.llm_output,
1876 output: Some(output.raw_output),
1877 },
1878 Err(error) => LanguageModelToolResult {
1879 tool_use_id: tool_use.id,
1880 tool_name: tool_use.name,
1881 is_error: true,
1882 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1883 output: Some(error.to_string().into()),
1884 },
1885 }
1886 }))
1887 }
1888
1889 fn handle_tool_use_json_parse_error_event(
1890 &mut self,
1891 tool_use_id: LanguageModelToolUseId,
1892 tool_name: Arc<str>,
1893 raw_input: Arc<str>,
1894 json_parse_error: String,
1895 ) -> LanguageModelToolResult {
1896 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1897 LanguageModelToolResult {
1898 tool_use_id,
1899 tool_name,
1900 is_error: true,
1901 content: LanguageModelToolResultContent::Text(tool_output.into()),
1902 output: Some(serde_json::Value::String(raw_input.to_string())),
1903 }
1904 }
1905
1906 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1907 self.project
1908 .read(cx)
1909 .user_store()
1910 .update(cx, |user_store, cx| {
1911 user_store.update_model_request_usage(
1912 ModelRequestUsage(RequestUsage {
1913 amount: amount as i32,
1914 limit,
1915 }),
1916 cx,
1917 )
1918 });
1919 }
1920
1921 pub fn title(&self) -> SharedString {
1922 self.title.clone().unwrap_or("New Thread".into())
1923 }
1924
1925 pub fn is_generating_summary(&self) -> bool {
1926 self.pending_summary_generation.is_some()
1927 }
1928
1929 pub fn is_generating_title(&self) -> bool {
1930 self.pending_title_generation.is_some()
1931 }
1932
1933 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1934 if let Some(summary) = self.summary.as_ref() {
1935 return Task::ready(Some(summary.clone())).shared();
1936 }
1937 if let Some(task) = self.pending_summary_generation.clone() {
1938 return task;
1939 }
1940 let Some(model) = self.summarization_model.clone() else {
1941 log::error!("No summarization model available");
1942 return Task::ready(None).shared();
1943 };
1944 let mut request = LanguageModelRequest {
1945 intent: Some(CompletionIntent::ThreadContextSummarization),
1946 temperature: AgentSettings::temperature_for_model(&model, cx),
1947 ..Default::default()
1948 };
1949
1950 for message in &self.messages {
1951 request.messages.extend(message.to_request());
1952 }
1953
1954 request.messages.push(LanguageModelRequestMessage {
1955 role: Role::User,
1956 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1957 cache: false,
1958 reasoning_details: None,
1959 });
1960
1961 let task = cx
1962 .spawn(async move |this, cx| {
1963 let mut summary = String::new();
1964 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1965 while let Some(event) = messages.next().await {
1966 let event = event.log_err()?;
1967 let text = match event {
1968 LanguageModelCompletionEvent::Text(text) => text,
1969 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1970 this.update(cx, |thread, cx| {
1971 thread.update_model_request_usage(amount, limit, cx);
1972 })
1973 .ok()?;
1974 continue;
1975 }
1976 _ => continue,
1977 };
1978
1979 let mut lines = text.lines();
1980 summary.extend(lines.next());
1981 }
1982
1983 log::debug!("Setting summary: {}", summary);
1984 let summary = SharedString::from(summary);
1985
1986 this.update(cx, |this, cx| {
1987 this.summary = Some(summary.clone());
1988 this.pending_summary_generation = None;
1989 cx.notify()
1990 })
1991 .ok()?;
1992
1993 Some(summary)
1994 })
1995 .shared();
1996 self.pending_summary_generation = Some(task.clone());
1997 task
1998 }
1999
2000 pub fn generate_title(&mut self, cx: &mut Context<Self>) {
2001 let Some(model) = self.summarization_model.clone() else {
2002 return;
2003 };
2004
2005 log::debug!(
2006 "Generating title with model: {:?}",
2007 self.summarization_model.as_ref().map(|model| model.name())
2008 );
2009 let mut request = LanguageModelRequest {
2010 intent: Some(CompletionIntent::ThreadSummarization),
2011 temperature: AgentSettings::temperature_for_model(&model, cx),
2012 ..Default::default()
2013 };
2014
2015 for message in &self.messages {
2016 request.messages.extend(message.to_request());
2017 }
2018
2019 request.messages.push(LanguageModelRequestMessage {
2020 role: Role::User,
2021 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
2022 cache: false,
2023 reasoning_details: None,
2024 });
2025 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
2026 let mut title = String::new();
2027
2028 let generate = async {
2029 let mut messages = model.stream_completion(request, cx).await?;
2030 while let Some(event) = messages.next().await {
2031 let event = event?;
2032 let text = match event {
2033 LanguageModelCompletionEvent::Text(text) => text,
2034 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
2035 this.update(cx, |thread, cx| {
2036 thread.update_model_request_usage(amount, limit, cx);
2037 })?;
2038 continue;
2039 }
2040 _ => continue,
2041 };
2042
2043 let mut lines = text.lines();
2044 title.extend(lines.next());
2045
2046 // Stop if the LLM generated multiple lines.
2047 if lines.next().is_some() {
2048 break;
2049 }
2050 }
2051 anyhow::Ok(())
2052 };
2053
2054 if generate.await.context("failed to generate title").is_ok() {
2055 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
2056 }
2057 _ = this.update(cx, |this, _| this.pending_title_generation = None);
2058 }));
2059 }
2060
2061 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
2062 self.pending_title_generation = None;
2063 if Some(&title) != self.title.as_ref() {
2064 self.title = Some(title);
2065 cx.emit(TitleUpdated);
2066 cx.notify();
2067 }
2068 }
2069
2070 fn clear_summary(&mut self) {
2071 self.summary = None;
2072 self.pending_summary_generation = None;
2073 }
2074
2075 fn last_user_message(&self) -> Option<&UserMessage> {
2076 self.messages
2077 .iter()
2078 .rev()
2079 .find_map(|message| match message {
2080 Message::User(user_message) => Some(user_message),
2081 Message::Agent(_) => None,
2082 Message::Resume => None,
2083 })
2084 }
2085
2086 fn pending_message(&mut self) -> &mut AgentMessage {
2087 self.pending_message.get_or_insert_default()
2088 }
2089
2090 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
2091 let Some(mut message) = self.pending_message.take() else {
2092 return;
2093 };
2094
2095 if message.content.is_empty() {
2096 return;
2097 }
2098
2099 for content in &message.content {
2100 let AgentMessageContent::ToolUse(tool_use) = content else {
2101 continue;
2102 };
2103
2104 if !message.tool_results.contains_key(&tool_use.id) {
2105 message.tool_results.insert(
2106 tool_use.id.clone(),
2107 LanguageModelToolResult {
2108 tool_use_id: tool_use.id.clone(),
2109 tool_name: tool_use.name.clone(),
2110 is_error: true,
2111 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
2112 output: None,
2113 },
2114 );
2115 }
2116 }
2117
2118 self.messages.push(Message::Agent(message));
2119 self.updated_at = Utc::now();
2120 self.clear_summary();
2121 cx.notify()
2122 }
2123
2124 pub(crate) fn build_completion_request(
2125 &self,
2126 completion_intent: CompletionIntent,
2127 cx: &App,
2128 ) -> Result<LanguageModelRequest> {
2129 let model = self.model().context("No language model configured")?;
2130 let tools = if let Some(turn) = self.running_turn.as_ref() {
2131 turn.tools
2132 .iter()
2133 .filter_map(|(tool_name, tool)| {
2134 log::trace!("Including tool: {}", tool_name);
2135 Some(LanguageModelRequestTool {
2136 name: tool_name.to_string(),
2137 description: tool.description().to_string(),
2138 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
2139 })
2140 })
2141 .collect::<Vec<_>>()
2142 } else {
2143 Vec::new()
2144 };
2145
2146 log::debug!("Building completion request");
2147 log::debug!("Completion intent: {:?}", completion_intent);
2148 log::debug!("Completion mode: {:?}", self.completion_mode);
2149
2150 let available_tools: Vec<_> = self
2151 .running_turn
2152 .as_ref()
2153 .map(|turn| turn.tools.keys().cloned().collect())
2154 .unwrap_or_default();
2155
2156 log::debug!("Request includes {} tools", available_tools.len());
2157 let messages = self.build_request_messages(available_tools, cx);
2158 log::debug!("Request will include {} messages", messages.len());
2159
2160 let request = LanguageModelRequest {
2161 thread_id: Some(self.id.to_string()),
2162 prompt_id: Some(self.prompt_id.to_string()),
2163 intent: Some(completion_intent),
2164 mode: Some(self.completion_mode.into()),
2165 messages,
2166 tools,
2167 tool_choice: None,
2168 stop: Vec::new(),
2169 temperature: AgentSettings::temperature_for_model(model, cx),
2170 thinking_allowed: true,
2171 };
2172
2173 log::debug!("Completion request built successfully");
2174 Ok(request)
2175 }
2176
2177 fn enabled_tools(
2178 &self,
2179 profile: &AgentProfileSettings,
2180 model: &Arc<dyn LanguageModel>,
2181 cx: &App,
2182 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
2183 fn truncate(tool_name: &SharedString) -> SharedString {
2184 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
2185 let mut truncated = tool_name.to_string();
2186 truncated.truncate(MAX_TOOL_NAME_LENGTH);
2187 truncated.into()
2188 } else {
2189 tool_name.clone()
2190 }
2191 }
2192
2193 let mut tools = self
2194 .tools
2195 .iter()
2196 .filter_map(|(tool_name, tool)| {
2197 if tool.supports_provider(&model.provider_id())
2198 && profile.is_tool_enabled(tool_name)
2199 {
2200 Some((truncate(tool_name), tool.clone()))
2201 } else {
2202 None
2203 }
2204 })
2205 .collect::<BTreeMap<_, _>>();
2206
2207 let mut context_server_tools = Vec::new();
2208 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
2209 let mut duplicate_tool_names = HashSet::default();
2210 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
2211 for (tool_name, tool) in server_tools {
2212 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
2213 let tool_name = truncate(tool_name);
2214 if !seen_tools.insert(tool_name.clone()) {
2215 duplicate_tool_names.insert(tool_name.clone());
2216 }
2217 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
2218 }
2219 }
2220 }
2221
2222 // When there are duplicate tool names, disambiguate by prefixing them
2223 // with the server ID. In the rare case there isn't enough space for the
2224 // disambiguated tool name, keep only the last tool with this name.
2225 for (server_id, tool_name, tool) in context_server_tools {
2226 if duplicate_tool_names.contains(&tool_name) {
2227 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
2228 if available >= 2 {
2229 let mut disambiguated = server_id.0.to_string();
2230 disambiguated.truncate(available - 1);
2231 disambiguated.push('_');
2232 disambiguated.push_str(&tool_name);
2233 tools.insert(disambiguated.into(), tool.clone());
2234 } else {
2235 tools.insert(tool_name, tool.clone());
2236 }
2237 } else {
2238 tools.insert(tool_name, tool.clone());
2239 }
2240 }
2241
2242 tools
2243 }
2244
2245 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
2246 self.running_turn.as_ref()?.tools.get(name).cloned()
2247 }
2248
2249 pub fn has_tool(&self, name: &str) -> bool {
2250 self.running_turn
2251 .as_ref()
2252 .is_some_and(|turn| turn.tools.contains_key(name))
2253 }
2254
2255 #[cfg(any(test, feature = "test-support"))]
2256 pub fn has_registered_tool(&self, name: &str) -> bool {
2257 self.tools.contains_key(name)
2258 }
2259
2260 pub fn registered_tool_names(&self) -> Vec<SharedString> {
2261 self.tools.keys().cloned().collect()
2262 }
2263
2264 pub fn register_running_subagent(&mut self, subagent: WeakEntity<Thread>) {
2265 self.running_subagents.push(subagent);
2266 }
2267
2268 pub fn unregister_running_subagent(&mut self, subagent: &WeakEntity<Thread>) {
2269 self.running_subagents
2270 .retain(|s| s.entity_id() != subagent.entity_id());
2271 }
2272
2273 pub fn running_subagent_count(&self) -> usize {
2274 self.running_subagents
2275 .iter()
2276 .filter(|s| s.upgrade().is_some())
2277 .count()
2278 }
2279
2280 pub fn is_subagent(&self) -> bool {
2281 self.subagent_context.is_some()
2282 }
2283
2284 pub fn depth(&self) -> u8 {
2285 self.subagent_context.as_ref().map(|c| c.depth).unwrap_or(0)
2286 }
2287
2288 pub fn is_turn_complete(&self) -> bool {
2289 self.running_turn.is_none()
2290 }
2291
2292 pub fn submit_user_message(
2293 &mut self,
2294 content: impl Into<String>,
2295 cx: &mut Context<Self>,
2296 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2297 let content = content.into();
2298 self.messages.push(Message::User(UserMessage {
2299 id: UserMessageId::new(),
2300 content: vec![UserMessageContent::Text(content)],
2301 }));
2302 cx.notify();
2303 self.send_existing(cx)
2304 }
2305
2306 pub fn interrupt_for_summary(
2307 &mut self,
2308 cx: &mut Context<Self>,
2309 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2310 let context = self
2311 .subagent_context
2312 .as_ref()
2313 .context("Not a subagent thread")?;
2314 let prompt = context.context_low_prompt.clone();
2315 self.cancel(cx).detach();
2316 self.submit_user_message(prompt, cx)
2317 }
2318
2319 pub fn request_final_summary(
2320 &mut self,
2321 cx: &mut Context<Self>,
2322 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
2323 let context = self
2324 .subagent_context
2325 .as_ref()
2326 .context("Not a subagent thread")?;
2327 let prompt = context.summary_prompt.clone();
2328 self.submit_user_message(prompt, cx)
2329 }
2330
2331 fn build_request_messages(
2332 &self,
2333 available_tools: Vec<SharedString>,
2334 cx: &App,
2335 ) -> Vec<LanguageModelRequestMessage> {
2336 log::trace!(
2337 "Building request messages from {} thread messages",
2338 self.messages.len()
2339 );
2340
2341 let system_prompt = SystemPromptTemplate {
2342 project: self.project_context.read(cx),
2343 available_tools,
2344 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
2345 }
2346 .render(&self.templates)
2347 .context("failed to build system prompt")
2348 .expect("Invalid template");
2349 let mut messages = vec![LanguageModelRequestMessage {
2350 role: Role::System,
2351 content: vec![system_prompt.into()],
2352 cache: false,
2353 reasoning_details: None,
2354 }];
2355 for message in &self.messages {
2356 messages.extend(message.to_request());
2357 }
2358
2359 if let Some(last_message) = messages.last_mut() {
2360 last_message.cache = true;
2361 }
2362
2363 if let Some(message) = self.pending_message.as_ref() {
2364 messages.extend(message.to_request());
2365 }
2366
2367 messages
2368 }
2369
2370 pub fn to_markdown(&self) -> String {
2371 let mut markdown = String::new();
2372 for (ix, message) in self.messages.iter().enumerate() {
2373 if ix > 0 {
2374 markdown.push('\n');
2375 }
2376 markdown.push_str(&message.to_markdown());
2377 }
2378
2379 if let Some(message) = self.pending_message.as_ref() {
2380 markdown.push('\n');
2381 markdown.push_str(&message.to_markdown());
2382 }
2383
2384 markdown
2385 }
2386
2387 fn advance_prompt_id(&mut self) {
2388 self.prompt_id = PromptId::new();
2389 }
2390
2391 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2392 use LanguageModelCompletionError::*;
2393 use http_client::StatusCode;
2394
2395 // General strategy here:
2396 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2397 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2398 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2399 match error {
2400 HttpResponseError {
2401 status_code: StatusCode::TOO_MANY_REQUESTS,
2402 ..
2403 } => Some(RetryStrategy::ExponentialBackoff {
2404 initial_delay: BASE_RETRY_DELAY,
2405 max_attempts: MAX_RETRY_ATTEMPTS,
2406 }),
2407 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2408 Some(RetryStrategy::Fixed {
2409 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2410 max_attempts: MAX_RETRY_ATTEMPTS,
2411 })
2412 }
2413 UpstreamProviderError {
2414 status,
2415 retry_after,
2416 ..
2417 } => match *status {
2418 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2419 Some(RetryStrategy::Fixed {
2420 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2421 max_attempts: MAX_RETRY_ATTEMPTS,
2422 })
2423 }
2424 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2425 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2426 // Internal Server Error could be anything, retry up to 3 times.
2427 max_attempts: 3,
2428 }),
2429 status => {
2430 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2431 // but we frequently get them in practice. See https://http.dev/529
2432 if status.as_u16() == 529 {
2433 Some(RetryStrategy::Fixed {
2434 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2435 max_attempts: MAX_RETRY_ATTEMPTS,
2436 })
2437 } else {
2438 Some(RetryStrategy::Fixed {
2439 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2440 max_attempts: 2,
2441 })
2442 }
2443 }
2444 },
2445 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2446 delay: BASE_RETRY_DELAY,
2447 max_attempts: 3,
2448 }),
2449 ApiReadResponseError { .. }
2450 | HttpSend { .. }
2451 | DeserializeResponse { .. }
2452 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2453 delay: BASE_RETRY_DELAY,
2454 max_attempts: 3,
2455 }),
2456 // Retrying these errors definitely shouldn't help.
2457 HttpResponseError {
2458 status_code:
2459 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2460 ..
2461 }
2462 | AuthenticationError { .. }
2463 | PermissionError { .. }
2464 | NoApiKey { .. }
2465 | ApiEndpointNotFound { .. }
2466 | PromptTooLarge { .. } => None,
2467 // These errors might be transient, so retry them
2468 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2469 delay: BASE_RETRY_DELAY,
2470 max_attempts: 1,
2471 }),
2472 // Retry all other 4xx and 5xx errors once.
2473 HttpResponseError { status_code, .. }
2474 if status_code.is_client_error() || status_code.is_server_error() =>
2475 {
2476 Some(RetryStrategy::Fixed {
2477 delay: BASE_RETRY_DELAY,
2478 max_attempts: 3,
2479 })
2480 }
2481 Other(err)
2482 if err.is::<language_model::PaymentRequiredError>()
2483 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2484 {
2485 // Retrying won't help for Payment Required or Model Request Limit errors (where
2486 // the user must upgrade to usage-based billing to get more requests, or else wait
2487 // for a significant amount of time for the request limit to reset).
2488 None
2489 }
2490 // Conservatively assume that any other errors are non-retryable
2491 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2492 delay: BASE_RETRY_DELAY,
2493 max_attempts: 2,
2494 }),
2495 }
2496 }
2497}
2498
2499struct RunningTurn {
2500 /// Holds the task that handles agent interaction until the end of the turn.
2501 /// Survives across multiple requests as the model performs tool calls and
2502 /// we run tools, report their results.
2503 _task: Task<()>,
2504 /// The current event stream for the running turn. Used to report a final
2505 /// cancellation event if we cancel the turn.
2506 event_stream: ThreadEventStream,
2507 /// The tools that were enabled for this turn.
2508 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2509 /// Sender to signal tool cancellation. When cancel is called, this is
2510 /// set to true so all tools can detect user-initiated cancellation.
2511 cancellation_tx: watch::Sender<bool>,
2512}
2513
2514impl RunningTurn {
2515 fn cancel(mut self) -> Task<()> {
2516 log::debug!("Cancelling in progress turn");
2517 self.cancellation_tx.send(true).ok();
2518 self.event_stream.send_canceled();
2519 self._task
2520 }
2521}
2522
2523pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2524
2525impl EventEmitter<TokenUsageUpdated> for Thread {}
2526
2527pub struct TitleUpdated;
2528
2529impl EventEmitter<TitleUpdated> for Thread {}
2530
2531pub trait AgentTool
2532where
2533 Self: 'static + Sized,
2534{
2535 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2536 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2537
2538 fn name() -> &'static str;
2539
2540 fn description() -> SharedString {
2541 let schema = schemars::schema_for!(Self::Input);
2542 SharedString::new(
2543 schema
2544 .get("description")
2545 .and_then(|description| description.as_str())
2546 .unwrap_or_default(),
2547 )
2548 }
2549
2550 fn kind() -> acp::ToolKind;
2551
2552 /// The initial tool title to display. Can be updated during the tool run.
2553 fn initial_title(
2554 &self,
2555 input: Result<Self::Input, serde_json::Value>,
2556 cx: &mut App,
2557 ) -> SharedString;
2558
2559 /// Returns the JSON schema that describes the tool's input.
2560 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2561 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2562 }
2563
2564 /// Some tools rely on a provider for the underlying billing or other reasons.
2565 /// Allow the tool to check if they are compatible, or should be filtered out.
2566 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2567 true
2568 }
2569
2570 /// Runs the tool with the provided input.
2571 fn run(
2572 self: Arc<Self>,
2573 input: Self::Input,
2574 event_stream: ToolCallEventStream,
2575 cx: &mut App,
2576 ) -> Task<Result<Self::Output>>;
2577
2578 /// Emits events for a previous execution of the tool.
2579 fn replay(
2580 &self,
2581 _input: Self::Input,
2582 _output: Self::Output,
2583 _event_stream: ToolCallEventStream,
2584 _cx: &mut App,
2585 ) -> Result<()> {
2586 Ok(())
2587 }
2588
2589 fn erase(self) -> Arc<dyn AnyAgentTool> {
2590 Arc::new(Erased(Arc::new(self)))
2591 }
2592}
2593
2594pub struct Erased<T>(T);
2595
2596pub struct AgentToolOutput {
2597 pub llm_output: LanguageModelToolResultContent,
2598 pub raw_output: serde_json::Value,
2599}
2600
2601pub trait AnyAgentTool {
2602 fn name(&self) -> SharedString;
2603 fn description(&self) -> SharedString;
2604 fn kind(&self) -> acp::ToolKind;
2605 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2606 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2607 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2608 true
2609 }
2610 fn run(
2611 self: Arc<Self>,
2612 input: serde_json::Value,
2613 event_stream: ToolCallEventStream,
2614 cx: &mut App,
2615 ) -> Task<Result<AgentToolOutput>>;
2616 fn replay(
2617 &self,
2618 input: serde_json::Value,
2619 output: serde_json::Value,
2620 event_stream: ToolCallEventStream,
2621 cx: &mut App,
2622 ) -> Result<()>;
2623}
2624
2625impl<T> AnyAgentTool for Erased<Arc<T>>
2626where
2627 T: AgentTool,
2628{
2629 fn name(&self) -> SharedString {
2630 T::name().into()
2631 }
2632
2633 fn description(&self) -> SharedString {
2634 T::description()
2635 }
2636
2637 fn kind(&self) -> agent_client_protocol::ToolKind {
2638 T::kind()
2639 }
2640
2641 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2642 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2643 self.0.initial_title(parsed_input, _cx)
2644 }
2645
2646 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2647 let mut json = serde_json::to_value(T::input_schema(format))?;
2648 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2649 Ok(json)
2650 }
2651
2652 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2653 T::supports_provider(provider)
2654 }
2655
2656 fn run(
2657 self: Arc<Self>,
2658 input: serde_json::Value,
2659 event_stream: ToolCallEventStream,
2660 cx: &mut App,
2661 ) -> Task<Result<AgentToolOutput>> {
2662 cx.spawn(async move |cx| {
2663 let input = serde_json::from_value(input)?;
2664 let output = cx
2665 .update(|cx| self.0.clone().run(input, event_stream, cx))
2666 .await?;
2667 let raw_output = serde_json::to_value(&output)?;
2668 Ok(AgentToolOutput {
2669 llm_output: output.into(),
2670 raw_output,
2671 })
2672 })
2673 }
2674
2675 fn replay(
2676 &self,
2677 input: serde_json::Value,
2678 output: serde_json::Value,
2679 event_stream: ToolCallEventStream,
2680 cx: &mut App,
2681 ) -> Result<()> {
2682 let input = serde_json::from_value(input)?;
2683 let output = serde_json::from_value(output)?;
2684 self.0.replay(input, output, event_stream, cx)
2685 }
2686}
2687
2688#[derive(Clone)]
2689struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2690
2691impl ThreadEventStream {
2692 fn send_user_message(&self, message: &UserMessage) {
2693 self.0
2694 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2695 .ok();
2696 }
2697
2698 fn send_text(&self, text: &str) {
2699 self.0
2700 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2701 .ok();
2702 }
2703
2704 fn send_thinking(&self, text: &str) {
2705 self.0
2706 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2707 .ok();
2708 }
2709
2710 fn send_tool_call(
2711 &self,
2712 id: &LanguageModelToolUseId,
2713 tool_name: &str,
2714 title: SharedString,
2715 kind: acp::ToolKind,
2716 input: serde_json::Value,
2717 ) {
2718 self.0
2719 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2720 id,
2721 tool_name,
2722 title.to_string(),
2723 kind,
2724 input,
2725 ))))
2726 .ok();
2727 }
2728
2729 fn initial_tool_call(
2730 id: &LanguageModelToolUseId,
2731 tool_name: &str,
2732 title: String,
2733 kind: acp::ToolKind,
2734 input: serde_json::Value,
2735 ) -> acp::ToolCall {
2736 acp::ToolCall::new(id.to_string(), title)
2737 .kind(kind)
2738 .raw_input(input)
2739 .meta(acp_thread::meta_with_tool_name(tool_name))
2740 }
2741
2742 fn update_tool_call_fields(
2743 &self,
2744 tool_use_id: &LanguageModelToolUseId,
2745 fields: acp::ToolCallUpdateFields,
2746 ) {
2747 self.0
2748 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2749 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2750 )))
2751 .ok();
2752 }
2753
2754 fn send_retry(&self, status: acp_thread::RetryStatus) {
2755 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2756 }
2757
2758 fn send_stop(&self, reason: acp::StopReason) {
2759 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2760 }
2761
2762 fn send_canceled(&self) {
2763 self.0
2764 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2765 .ok();
2766 }
2767
2768 fn send_error(&self, error: impl Into<anyhow::Error>) {
2769 self.0.unbounded_send(Err(error.into())).ok();
2770 }
2771}
2772
2773#[derive(Clone)]
2774pub struct ToolCallEventStream {
2775 tool_use_id: LanguageModelToolUseId,
2776 stream: ThreadEventStream,
2777 fs: Option<Arc<dyn Fs>>,
2778 cancellation_rx: watch::Receiver<bool>,
2779}
2780
2781impl ToolCallEventStream {
2782 #[cfg(any(test, feature = "test-support"))]
2783 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2784 let (stream, receiver, _cancellation_tx) = Self::test_with_cancellation();
2785 (stream, receiver)
2786 }
2787
2788 #[cfg(any(test, feature = "test-support"))]
2789 pub fn test_with_cancellation() -> (Self, ToolCallEventStreamReceiver, watch::Sender<bool>) {
2790 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2791 let (cancellation_tx, cancellation_rx) = watch::channel(false);
2792
2793 let stream = ToolCallEventStream::new(
2794 "test_id".into(),
2795 ThreadEventStream(events_tx),
2796 None,
2797 cancellation_rx,
2798 );
2799
2800 (
2801 stream,
2802 ToolCallEventStreamReceiver(events_rx),
2803 cancellation_tx,
2804 )
2805 }
2806
2807 /// Signal cancellation for this event stream. Only available in tests.
2808 #[cfg(any(test, feature = "test-support"))]
2809 pub fn signal_cancellation_with_sender(cancellation_tx: &mut watch::Sender<bool>) {
2810 cancellation_tx.send(true).ok();
2811 }
2812
2813 fn new(
2814 tool_use_id: LanguageModelToolUseId,
2815 stream: ThreadEventStream,
2816 fs: Option<Arc<dyn Fs>>,
2817 cancellation_rx: watch::Receiver<bool>,
2818 ) -> Self {
2819 Self {
2820 tool_use_id,
2821 stream,
2822 fs,
2823 cancellation_rx,
2824 }
2825 }
2826
2827 /// Returns a future that resolves when the user cancels the tool call.
2828 /// Tools should select on this alongside their main work to detect user cancellation.
2829 pub fn cancelled_by_user(&self) -> impl std::future::Future<Output = ()> + '_ {
2830 let mut rx = self.cancellation_rx.clone();
2831 async move {
2832 loop {
2833 if *rx.borrow() {
2834 return;
2835 }
2836 if rx.changed().await.is_err() {
2837 // Sender dropped, will never be cancelled
2838 std::future::pending::<()>().await;
2839 }
2840 }
2841 }
2842 }
2843
2844 /// Returns true if the user has cancelled this tool call.
2845 /// This is useful for checking cancellation state after an operation completes,
2846 /// to determine if the completion was due to user cancellation.
2847 pub fn was_cancelled_by_user(&self) -> bool {
2848 *self.cancellation_rx.clone().borrow()
2849 }
2850
2851 pub fn tool_use_id(&self) -> &LanguageModelToolUseId {
2852 &self.tool_use_id
2853 }
2854
2855 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2856 self.stream
2857 .update_tool_call_fields(&self.tool_use_id, fields);
2858 }
2859
2860 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2861 self.stream
2862 .0
2863 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2864 acp_thread::ToolCallUpdateDiff {
2865 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2866 diff,
2867 }
2868 .into(),
2869 )))
2870 .ok();
2871 }
2872
2873 pub fn update_subagent_thread(&self, thread: Entity<acp_thread::AcpThread>) {
2874 self.stream
2875 .0
2876 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2877 acp_thread::ToolCallUpdateSubagentThread {
2878 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2879 thread,
2880 }
2881 .into(),
2882 )))
2883 .ok();
2884 }
2885
2886 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2887 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2888 return Task::ready(Ok(()));
2889 }
2890
2891 self.authorize_required(title, cx)
2892 }
2893
2894 /// Like `authorize`, but always prompts for confirmation regardless of
2895 /// the `always_allow_tool_actions` setting. Use this when tool-specific
2896 /// permission rules (like `always_confirm` patterns) have already determined
2897 /// that confirmation is required.
2898 pub fn authorize_required(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2899 let (response_tx, response_rx) = oneshot::channel();
2900 self.stream
2901 .0
2902 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2903 ToolCallAuthorization {
2904 tool_call: acp::ToolCallUpdate::new(
2905 self.tool_use_id.to_string(),
2906 acp::ToolCallUpdateFields::new().title(title.into()),
2907 ),
2908 options: vec![
2909 acp::PermissionOption::new(
2910 acp::PermissionOptionId::new("always_allow"),
2911 "Always Allow",
2912 acp::PermissionOptionKind::AllowAlways,
2913 ),
2914 acp::PermissionOption::new(
2915 acp::PermissionOptionId::new("allow"),
2916 "Allow",
2917 acp::PermissionOptionKind::AllowOnce,
2918 ),
2919 acp::PermissionOption::new(
2920 acp::PermissionOptionId::new("deny"),
2921 "Deny",
2922 acp::PermissionOptionKind::RejectOnce,
2923 ),
2924 ],
2925 response: response_tx,
2926 },
2927 )))
2928 .ok();
2929 let fs = self.fs.clone();
2930 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2931 "always_allow" => {
2932 if let Some(fs) = fs.clone() {
2933 cx.update(|cx| {
2934 update_settings_file(fs, cx, |settings, _| {
2935 settings
2936 .agent
2937 .get_or_insert_default()
2938 .set_always_allow_tool_actions(true);
2939 });
2940 });
2941 }
2942
2943 Ok(())
2944 }
2945 "allow" => Ok(()),
2946 _ => Err(anyhow!("Permission to run tool denied by user")),
2947 })
2948 }
2949}
2950
2951#[cfg(any(test, feature = "test-support"))]
2952pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2953
2954#[cfg(any(test, feature = "test-support"))]
2955impl ToolCallEventStreamReceiver {
2956 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2957 let event = self.0.next().await;
2958 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2959 auth
2960 } else {
2961 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2962 }
2963 }
2964
2965 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2966 let event = self.0.next().await;
2967 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2968 update,
2969 )))) = event
2970 {
2971 update.fields
2972 } else {
2973 panic!("Expected update fields but got: {:?}", event);
2974 }
2975 }
2976
2977 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2978 let event = self.0.next().await;
2979 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2980 update,
2981 )))) = event
2982 {
2983 update.diff
2984 } else {
2985 panic!("Expected diff but got: {:?}", event);
2986 }
2987 }
2988
2989 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2990 let event = self.0.next().await;
2991 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2992 update,
2993 )))) = event
2994 {
2995 update.terminal
2996 } else {
2997 panic!("Expected terminal but got: {:?}", event);
2998 }
2999 }
3000}
3001
3002#[cfg(any(test, feature = "test-support"))]
3003impl std::ops::Deref for ToolCallEventStreamReceiver {
3004 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
3005
3006 fn deref(&self) -> &Self::Target {
3007 &self.0
3008 }
3009}
3010
3011#[cfg(any(test, feature = "test-support"))]
3012impl std::ops::DerefMut for ToolCallEventStreamReceiver {
3013 fn deref_mut(&mut self) -> &mut Self::Target {
3014 &mut self.0
3015 }
3016}
3017
3018impl From<&str> for UserMessageContent {
3019 fn from(text: &str) -> Self {
3020 Self::Text(text.into())
3021 }
3022}
3023
3024impl UserMessageContent {
3025 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
3026 match value {
3027 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
3028 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
3029 acp::ContentBlock::Audio(_) => {
3030 // TODO
3031 Self::Text("[audio]".to_string())
3032 }
3033 acp::ContentBlock::ResourceLink(resource_link) => {
3034 match MentionUri::parse(&resource_link.uri, path_style) {
3035 Ok(uri) => Self::Mention {
3036 uri,
3037 content: String::new(),
3038 },
3039 Err(err) => {
3040 log::error!("Failed to parse mention link: {}", err);
3041 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
3042 }
3043 }
3044 }
3045 acp::ContentBlock::Resource(resource) => match resource.resource {
3046 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
3047 match MentionUri::parse(&resource.uri, path_style) {
3048 Ok(uri) => Self::Mention {
3049 uri,
3050 content: resource.text,
3051 },
3052 Err(err) => {
3053 log::error!("Failed to parse mention link: {}", err);
3054 Self::Text(
3055 MarkdownCodeBlock {
3056 tag: &resource.uri,
3057 text: &resource.text,
3058 }
3059 .to_string(),
3060 )
3061 }
3062 }
3063 }
3064 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
3065 // TODO
3066 Self::Text("[blob]".to_string())
3067 }
3068 other => {
3069 log::warn!("Unexpected content type: {:?}", other);
3070 Self::Text("[unknown]".to_string())
3071 }
3072 },
3073 other => {
3074 log::warn!("Unexpected content type: {:?}", other);
3075 Self::Text("[unknown]".to_string())
3076 }
3077 }
3078 }
3079}
3080
3081impl From<UserMessageContent> for acp::ContentBlock {
3082 fn from(content: UserMessageContent) -> Self {
3083 match content {
3084 UserMessageContent::Text(text) => text.into(),
3085 UserMessageContent::Image(image) => {
3086 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
3087 }
3088 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
3089 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
3090 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
3091 )),
3092 ),
3093 }
3094 }
3095}
3096
3097fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
3098 LanguageModelImage {
3099 source: image_content.data.into(),
3100 size: None,
3101 }
3102}