1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 RestoreFileFromDiskTool, SaveFileTool, SystemPromptTemplate, Template, Templates, TerminalTool,
6 ThinkingTool, WebSearchTool,
7};
8use acp_thread::{MentionUri, UserMessageId};
9use action_log::ActionLog;
10
11use agent_client_protocol as acp;
12use agent_settings::{
13 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
14 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
15};
16use anyhow::{Context as _, Result, anyhow};
17use chrono::{DateTime, Utc};
18use client::{ModelRequestUsage, RequestUsage, UserStore};
19use cloud_llm_client::{CompletionIntent, Plan, UsageLimit};
20use collections::{HashMap, HashSet, IndexMap};
21use fs::Fs;
22use futures::stream;
23use futures::{
24 FutureExt,
25 channel::{mpsc, oneshot},
26 future::Shared,
27 stream::FuturesUnordered,
28};
29use gpui::{
30 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
31};
32use language_model::{
33 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
34 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
35 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
36 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
37 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
38 ZED_CLOUD_PROVIDER_ID,
39};
40use project::Project;
41use prompt_store::ProjectContext;
42use schemars::{JsonSchema, Schema};
43use serde::{Deserialize, Serialize};
44use settings::{LanguageModelSelection, Settings, update_settings_file};
45use smol::stream::StreamExt;
46use std::{
47 collections::BTreeMap,
48 ops::RangeInclusive,
49 path::Path,
50 rc::Rc,
51 sync::Arc,
52 time::{Duration, Instant},
53};
54use std::{fmt::Write, path::PathBuf};
55use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
56use uuid::Uuid;
57
58const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
59pub const MAX_TOOL_NAME_LENGTH: usize = 64;
60
61/// The ID of the user prompt that initiated a request.
62///
63/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
64#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
65pub struct PromptId(Arc<str>);
66
67impl PromptId {
68 pub fn new() -> Self {
69 Self(Uuid::new_v4().to_string().into())
70 }
71}
72
73impl std::fmt::Display for PromptId {
74 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
75 write!(f, "{}", self.0)
76 }
77}
78
79pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
80pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
81
82#[derive(Debug, Clone)]
83enum RetryStrategy {
84 ExponentialBackoff {
85 initial_delay: Duration,
86 max_attempts: u8,
87 },
88 Fixed {
89 delay: Duration,
90 max_attempts: u8,
91 },
92}
93
94#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
95pub enum Message {
96 User(UserMessage),
97 Agent(AgentMessage),
98 Resume,
99}
100
101impl Message {
102 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
103 match self {
104 Message::Agent(agent_message) => Some(agent_message),
105 _ => None,
106 }
107 }
108
109 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
110 match self {
111 Message::User(message) => vec![message.to_request()],
112 Message::Agent(message) => message.to_request(),
113 Message::Resume => vec![LanguageModelRequestMessage {
114 role: Role::User,
115 content: vec!["Continue where you left off".into()],
116 cache: false,
117 reasoning_details: None,
118 }],
119 }
120 }
121
122 pub fn to_markdown(&self) -> String {
123 match self {
124 Message::User(message) => message.to_markdown(),
125 Message::Agent(message) => message.to_markdown(),
126 Message::Resume => "[resume]\n".into(),
127 }
128 }
129
130 pub fn role(&self) -> Role {
131 match self {
132 Message::User(_) | Message::Resume => Role::User,
133 Message::Agent(_) => Role::Assistant,
134 }
135 }
136}
137
138#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
139pub struct UserMessage {
140 pub id: UserMessageId,
141 pub content: Vec<UserMessageContent>,
142}
143
144#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
145pub enum UserMessageContent {
146 Text(String),
147 Mention { uri: MentionUri, content: String },
148 Image(LanguageModelImage),
149}
150
151impl UserMessage {
152 pub fn to_markdown(&self) -> String {
153 let mut markdown = String::from("## User\n\n");
154
155 for content in &self.content {
156 match content {
157 UserMessageContent::Text(text) => {
158 markdown.push_str(text);
159 markdown.push('\n');
160 }
161 UserMessageContent::Image(_) => {
162 markdown.push_str("<image />\n");
163 }
164 UserMessageContent::Mention { uri, content } => {
165 if !content.is_empty() {
166 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
167 } else {
168 let _ = writeln!(&mut markdown, "{}", uri.as_link());
169 }
170 }
171 }
172 }
173
174 markdown
175 }
176
177 fn to_request(&self) -> LanguageModelRequestMessage {
178 let mut message = LanguageModelRequestMessage {
179 role: Role::User,
180 content: Vec::with_capacity(self.content.len()),
181 cache: false,
182 reasoning_details: None,
183 };
184
185 const OPEN_CONTEXT: &str = "<context>\n\
186 The following items were attached by the user. \
187 They are up-to-date and don't need to be re-read.\n\n";
188
189 const OPEN_FILES_TAG: &str = "<files>";
190 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
191 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
192 const OPEN_SELECTIONS_TAG: &str = "<selections>";
193 const OPEN_THREADS_TAG: &str = "<threads>";
194 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
195 const OPEN_RULES_TAG: &str =
196 "<rules>\nThe user has specified the following rules that should be applied:\n";
197
198 let mut file_context = OPEN_FILES_TAG.to_string();
199 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
200 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
201 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
202 let mut thread_context = OPEN_THREADS_TAG.to_string();
203 let mut fetch_context = OPEN_FETCH_TAG.to_string();
204 let mut rules_context = OPEN_RULES_TAG.to_string();
205
206 for chunk in &self.content {
207 let chunk = match chunk {
208 UserMessageContent::Text(text) => {
209 language_model::MessageContent::Text(text.clone())
210 }
211 UserMessageContent::Image(value) => {
212 language_model::MessageContent::Image(value.clone())
213 }
214 UserMessageContent::Mention { uri, content } => {
215 match uri {
216 MentionUri::File { abs_path } => {
217 write!(
218 &mut file_context,
219 "\n{}",
220 MarkdownCodeBlock {
221 tag: &codeblock_tag(abs_path, None),
222 text: &content.to_string(),
223 }
224 )
225 .ok();
226 }
227 MentionUri::PastedImage => {
228 debug_panic!("pasted image URI should not be used in mention content")
229 }
230 MentionUri::Directory { .. } => {
231 write!(&mut directory_context, "\n{}\n", content).ok();
232 }
233 MentionUri::Symbol {
234 abs_path: path,
235 line_range,
236 ..
237 } => {
238 write!(
239 &mut symbol_context,
240 "\n{}",
241 MarkdownCodeBlock {
242 tag: &codeblock_tag(path, Some(line_range)),
243 text: content
244 }
245 )
246 .ok();
247 }
248 MentionUri::Selection {
249 abs_path: path,
250 line_range,
251 ..
252 } => {
253 write!(
254 &mut selection_context,
255 "\n{}",
256 MarkdownCodeBlock {
257 tag: &codeblock_tag(
258 path.as_deref().unwrap_or("Untitled".as_ref()),
259 Some(line_range)
260 ),
261 text: content
262 }
263 )
264 .ok();
265 }
266 MentionUri::Thread { .. } => {
267 write!(&mut thread_context, "\n{}\n", content).ok();
268 }
269 MentionUri::TextThread { .. } => {
270 write!(&mut thread_context, "\n{}\n", content).ok();
271 }
272 MentionUri::Rule { .. } => {
273 write!(
274 &mut rules_context,
275 "\n{}",
276 MarkdownCodeBlock {
277 tag: "",
278 text: content
279 }
280 )
281 .ok();
282 }
283 MentionUri::Fetch { url } => {
284 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
285 }
286 }
287
288 language_model::MessageContent::Text(uri.as_link().to_string())
289 }
290 };
291
292 message.content.push(chunk);
293 }
294
295 let len_before_context = message.content.len();
296
297 if file_context.len() > OPEN_FILES_TAG.len() {
298 file_context.push_str("</files>\n");
299 message
300 .content
301 .push(language_model::MessageContent::Text(file_context));
302 }
303
304 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
305 directory_context.push_str("</directories>\n");
306 message
307 .content
308 .push(language_model::MessageContent::Text(directory_context));
309 }
310
311 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
312 symbol_context.push_str("</symbols>\n");
313 message
314 .content
315 .push(language_model::MessageContent::Text(symbol_context));
316 }
317
318 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
319 selection_context.push_str("</selections>\n");
320 message
321 .content
322 .push(language_model::MessageContent::Text(selection_context));
323 }
324
325 if thread_context.len() > OPEN_THREADS_TAG.len() {
326 thread_context.push_str("</threads>\n");
327 message
328 .content
329 .push(language_model::MessageContent::Text(thread_context));
330 }
331
332 if fetch_context.len() > OPEN_FETCH_TAG.len() {
333 fetch_context.push_str("</fetched_urls>\n");
334 message
335 .content
336 .push(language_model::MessageContent::Text(fetch_context));
337 }
338
339 if rules_context.len() > OPEN_RULES_TAG.len() {
340 rules_context.push_str("</user_rules>\n");
341 message
342 .content
343 .push(language_model::MessageContent::Text(rules_context));
344 }
345
346 if message.content.len() > len_before_context {
347 message.content.insert(
348 len_before_context,
349 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
350 );
351 message
352 .content
353 .push(language_model::MessageContent::Text("</context>".into()));
354 }
355
356 message
357 }
358}
359
360fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
361 let mut result = String::new();
362
363 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
364 let _ = write!(result, "{} ", extension);
365 }
366
367 let _ = write!(result, "{}", full_path.display());
368
369 if let Some(range) = line_range {
370 if range.start() == range.end() {
371 let _ = write!(result, ":{}", range.start() + 1);
372 } else {
373 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
374 }
375 }
376
377 result
378}
379
380impl AgentMessage {
381 pub fn to_markdown(&self) -> String {
382 let mut markdown = String::from("## Assistant\n\n");
383
384 for content in &self.content {
385 match content {
386 AgentMessageContent::Text(text) => {
387 markdown.push_str(text);
388 markdown.push('\n');
389 }
390 AgentMessageContent::Thinking { text, .. } => {
391 markdown.push_str("<think>");
392 markdown.push_str(text);
393 markdown.push_str("</think>\n");
394 }
395 AgentMessageContent::RedactedThinking(_) => {
396 markdown.push_str("<redacted_thinking />\n")
397 }
398 AgentMessageContent::ToolUse(tool_use) => {
399 markdown.push_str(&format!(
400 "**Tool Use**: {} (ID: {})\n",
401 tool_use.name, tool_use.id
402 ));
403 markdown.push_str(&format!(
404 "{}\n",
405 MarkdownCodeBlock {
406 tag: "json",
407 text: &format!("{:#}", tool_use.input)
408 }
409 ));
410 }
411 }
412 }
413
414 for tool_result in self.tool_results.values() {
415 markdown.push_str(&format!(
416 "**Tool Result**: {} (ID: {})\n\n",
417 tool_result.tool_name, tool_result.tool_use_id
418 ));
419 if tool_result.is_error {
420 markdown.push_str("**ERROR:**\n");
421 }
422
423 match &tool_result.content {
424 LanguageModelToolResultContent::Text(text) => {
425 writeln!(markdown, "{text}\n").ok();
426 }
427 LanguageModelToolResultContent::Image(_) => {
428 writeln!(markdown, "<image />\n").ok();
429 }
430 }
431
432 if let Some(output) = tool_result.output.as_ref() {
433 writeln!(
434 markdown,
435 "**Debug Output**:\n\n```json\n{}\n```\n",
436 serde_json::to_string_pretty(output).unwrap()
437 )
438 .unwrap();
439 }
440 }
441
442 markdown
443 }
444
445 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
446 let mut assistant_message = LanguageModelRequestMessage {
447 role: Role::Assistant,
448 content: Vec::with_capacity(self.content.len()),
449 cache: false,
450 reasoning_details: self.reasoning_details.clone(),
451 };
452 for chunk in &self.content {
453 match chunk {
454 AgentMessageContent::Text(text) => {
455 assistant_message
456 .content
457 .push(language_model::MessageContent::Text(text.clone()));
458 }
459 AgentMessageContent::Thinking { text, signature } => {
460 assistant_message
461 .content
462 .push(language_model::MessageContent::Thinking {
463 text: text.clone(),
464 signature: signature.clone(),
465 });
466 }
467 AgentMessageContent::RedactedThinking(value) => {
468 assistant_message.content.push(
469 language_model::MessageContent::RedactedThinking(value.clone()),
470 );
471 }
472 AgentMessageContent::ToolUse(tool_use) => {
473 if self.tool_results.contains_key(&tool_use.id) {
474 assistant_message
475 .content
476 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
477 }
478 }
479 };
480 }
481
482 let mut user_message = LanguageModelRequestMessage {
483 role: Role::User,
484 content: Vec::new(),
485 cache: false,
486 reasoning_details: None,
487 };
488
489 for tool_result in self.tool_results.values() {
490 let mut tool_result = tool_result.clone();
491 // Surprisingly, the API fails if we return an empty string here.
492 // It thinks we are sending a tool use without a tool result.
493 if tool_result.content.is_empty() {
494 tool_result.content = "<Tool returned an empty string>".into();
495 }
496 user_message
497 .content
498 .push(language_model::MessageContent::ToolResult(tool_result));
499 }
500
501 let mut messages = Vec::new();
502 if !assistant_message.content.is_empty() {
503 messages.push(assistant_message);
504 }
505 if !user_message.content.is_empty() {
506 messages.push(user_message);
507 }
508 messages
509 }
510}
511
512#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
513pub struct AgentMessage {
514 pub content: Vec<AgentMessageContent>,
515 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
516 pub reasoning_details: Option<serde_json::Value>,
517}
518
519#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
520pub enum AgentMessageContent {
521 Text(String),
522 Thinking {
523 text: String,
524 signature: Option<String>,
525 },
526 RedactedThinking(String),
527 ToolUse(LanguageModelToolUse),
528}
529
530pub trait TerminalHandle {
531 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
532 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
533 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
534 fn kill(&self, cx: &AsyncApp) -> Result<()>;
535}
536
537pub trait ThreadEnvironment {
538 fn create_terminal(
539 &self,
540 command: String,
541 cwd: Option<PathBuf>,
542 output_byte_limit: Option<u64>,
543 cx: &mut AsyncApp,
544 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
545}
546
547#[derive(Debug)]
548pub enum ThreadEvent {
549 UserMessage(UserMessage),
550 AgentText(String),
551 AgentThinking(String),
552 ToolCall(acp::ToolCall),
553 ToolCallUpdate(acp_thread::ToolCallUpdate),
554 ToolCallAuthorization(ToolCallAuthorization),
555 Retry(acp_thread::RetryStatus),
556 Stop(acp::StopReason),
557}
558
559#[derive(Debug)]
560pub struct NewTerminal {
561 pub command: String,
562 pub output_byte_limit: Option<u64>,
563 pub cwd: Option<PathBuf>,
564 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
565}
566
567#[derive(Debug)]
568pub struct ToolCallAuthorization {
569 pub tool_call: acp::ToolCallUpdate,
570 pub options: Vec<acp::PermissionOption>,
571 pub response: oneshot::Sender<acp::PermissionOptionId>,
572}
573
574#[derive(Debug, thiserror::Error)]
575enum CompletionError {
576 #[error("max tokens")]
577 MaxTokens,
578 #[error("refusal")]
579 Refusal,
580 #[error(transparent)]
581 Other(#[from] anyhow::Error),
582}
583
584pub struct Thread {
585 id: acp::SessionId,
586 prompt_id: PromptId,
587 updated_at: DateTime<Utc>,
588 title: Option<SharedString>,
589 pending_title_generation: Option<Task<()>>,
590 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
591 summary: Option<SharedString>,
592 messages: Vec<Message>,
593 user_store: Entity<UserStore>,
594 completion_mode: CompletionMode,
595 /// Holds the task that handles agent interaction until the end of the turn.
596 /// Survives across multiple requests as the model performs tool calls and
597 /// we run tools, report their results.
598 running_turn: Option<RunningTurn>,
599 pending_message: Option<AgentMessage>,
600 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
601 tool_use_limit_reached: bool,
602 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
603 #[allow(unused)]
604 cumulative_token_usage: TokenUsage,
605 #[allow(unused)]
606 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
607 context_server_registry: Entity<ContextServerRegistry>,
608 profile_id: AgentProfileId,
609 project_context: Entity<ProjectContext>,
610 templates: Arc<Templates>,
611 model: Option<Arc<dyn LanguageModel>>,
612 summarization_model: Option<Arc<dyn LanguageModel>>,
613 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
614 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
615 pub(crate) project: Entity<Project>,
616 pub(crate) action_log: Entity<ActionLog>,
617 /// Tracks the last time files were read by the agent, to detect external modifications
618 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
619}
620
621impl Thread {
622 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
623 let image = model.map_or(true, |model| model.supports_images());
624 acp::PromptCapabilities::new()
625 .image(image)
626 .embedded_context(true)
627 }
628
629 pub fn new(
630 project: Entity<Project>,
631 project_context: Entity<ProjectContext>,
632 context_server_registry: Entity<ContextServerRegistry>,
633 templates: Arc<Templates>,
634 model: Option<Arc<dyn LanguageModel>>,
635 cx: &mut Context<Self>,
636 ) -> Self {
637 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
638 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
639 let (prompt_capabilities_tx, prompt_capabilities_rx) =
640 watch::channel(Self::prompt_capabilities(model.as_deref()));
641 Self {
642 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
643 prompt_id: PromptId::new(),
644 updated_at: Utc::now(),
645 title: None,
646 pending_title_generation: None,
647 pending_summary_generation: None,
648 summary: None,
649 messages: Vec::new(),
650 user_store: project.read(cx).user_store(),
651 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
652 running_turn: None,
653 pending_message: None,
654 tools: BTreeMap::default(),
655 tool_use_limit_reached: false,
656 request_token_usage: HashMap::default(),
657 cumulative_token_usage: TokenUsage::default(),
658 initial_project_snapshot: {
659 let project_snapshot = Self::project_snapshot(project.clone(), cx);
660 cx.foreground_executor()
661 .spawn(async move { Some(project_snapshot.await) })
662 .shared()
663 },
664 context_server_registry,
665 profile_id,
666 project_context,
667 templates,
668 model,
669 summarization_model: None,
670 prompt_capabilities_tx,
671 prompt_capabilities_rx,
672 project,
673 action_log,
674 file_read_times: HashMap::default(),
675 }
676 }
677
678 pub fn id(&self) -> &acp::SessionId {
679 &self.id
680 }
681
682 pub fn replay(
683 &mut self,
684 cx: &mut Context<Self>,
685 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
686 let (tx, rx) = mpsc::unbounded();
687 let stream = ThreadEventStream(tx);
688 for message in &self.messages {
689 match message {
690 Message::User(user_message) => stream.send_user_message(user_message),
691 Message::Agent(assistant_message) => {
692 for content in &assistant_message.content {
693 match content {
694 AgentMessageContent::Text(text) => stream.send_text(text),
695 AgentMessageContent::Thinking { text, .. } => {
696 stream.send_thinking(text)
697 }
698 AgentMessageContent::RedactedThinking(_) => {}
699 AgentMessageContent::ToolUse(tool_use) => {
700 self.replay_tool_call(
701 tool_use,
702 assistant_message.tool_results.get(&tool_use.id),
703 &stream,
704 cx,
705 );
706 }
707 }
708 }
709 }
710 Message::Resume => {}
711 }
712 }
713 rx
714 }
715
716 fn replay_tool_call(
717 &self,
718 tool_use: &LanguageModelToolUse,
719 tool_result: Option<&LanguageModelToolResult>,
720 stream: &ThreadEventStream,
721 cx: &mut Context<Self>,
722 ) {
723 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
724 self.context_server_registry
725 .read(cx)
726 .servers()
727 .find_map(|(_, tools)| {
728 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
729 Some(tool.clone())
730 } else {
731 None
732 }
733 })
734 });
735
736 let Some(tool) = tool else {
737 stream
738 .0
739 .unbounded_send(Ok(ThreadEvent::ToolCall(
740 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
741 .status(acp::ToolCallStatus::Failed)
742 .raw_input(tool_use.input.clone()),
743 )))
744 .ok();
745 return;
746 };
747
748 let title = tool.initial_title(tool_use.input.clone(), cx);
749 let kind = tool.kind();
750 stream.send_tool_call(
751 &tool_use.id,
752 &tool_use.name,
753 title,
754 kind,
755 tool_use.input.clone(),
756 );
757
758 let output = tool_result
759 .as_ref()
760 .and_then(|result| result.output.clone());
761 if let Some(output) = output.clone() {
762 let tool_event_stream = ToolCallEventStream::new(
763 tool_use.id.clone(),
764 stream.clone(),
765 Some(self.project.read(cx).fs().clone()),
766 );
767 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
768 .log_err();
769 }
770
771 stream.update_tool_call_fields(
772 &tool_use.id,
773 acp::ToolCallUpdateFields::new()
774 .status(
775 tool_result
776 .as_ref()
777 .map_or(acp::ToolCallStatus::Failed, |result| {
778 if result.is_error {
779 acp::ToolCallStatus::Failed
780 } else {
781 acp::ToolCallStatus::Completed
782 }
783 }),
784 )
785 .raw_output(output),
786 );
787 }
788
789 pub fn from_db(
790 id: acp::SessionId,
791 db_thread: DbThread,
792 project: Entity<Project>,
793 project_context: Entity<ProjectContext>,
794 context_server_registry: Entity<ContextServerRegistry>,
795 templates: Arc<Templates>,
796 cx: &mut Context<Self>,
797 ) -> Self {
798 let profile_id = db_thread
799 .profile
800 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
801
802 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
803 db_thread
804 .model
805 .and_then(|model| {
806 let model = SelectedModel {
807 provider: model.provider.clone().into(),
808 model: model.model.into(),
809 };
810 registry.select_model(&model, cx)
811 })
812 .or_else(|| registry.default_model())
813 .map(|model| model.model)
814 });
815
816 if model.is_none() {
817 model = Self::resolve_profile_model(&profile_id, cx);
818 }
819 if model.is_none() {
820 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
821 registry.default_model().map(|model| model.model)
822 });
823 }
824
825 let (prompt_capabilities_tx, prompt_capabilities_rx) =
826 watch::channel(Self::prompt_capabilities(model.as_deref()));
827
828 let action_log = cx.new(|_| ActionLog::new(project.clone()));
829
830 Self {
831 id,
832 prompt_id: PromptId::new(),
833 title: if db_thread.title.is_empty() {
834 None
835 } else {
836 Some(db_thread.title.clone())
837 },
838 pending_title_generation: None,
839 pending_summary_generation: None,
840 summary: db_thread.detailed_summary,
841 messages: db_thread.messages,
842 user_store: project.read(cx).user_store(),
843 completion_mode: db_thread.completion_mode.unwrap_or_default(),
844 running_turn: None,
845 pending_message: None,
846 tools: BTreeMap::default(),
847 tool_use_limit_reached: false,
848 request_token_usage: db_thread.request_token_usage.clone(),
849 cumulative_token_usage: db_thread.cumulative_token_usage,
850 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
851 context_server_registry,
852 profile_id,
853 project_context,
854 templates,
855 model,
856 summarization_model: None,
857 project,
858 action_log,
859 updated_at: db_thread.updated_at,
860 prompt_capabilities_tx,
861 prompt_capabilities_rx,
862 file_read_times: HashMap::default(),
863 }
864 }
865
866 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
867 let initial_project_snapshot = self.initial_project_snapshot.clone();
868 let mut thread = DbThread {
869 title: self.title(),
870 messages: self.messages.clone(),
871 updated_at: self.updated_at,
872 detailed_summary: self.summary.clone(),
873 initial_project_snapshot: None,
874 cumulative_token_usage: self.cumulative_token_usage,
875 request_token_usage: self.request_token_usage.clone(),
876 model: self.model.as_ref().map(|model| DbLanguageModel {
877 provider: model.provider_id().to_string(),
878 model: model.name().0.to_string(),
879 }),
880 completion_mode: Some(self.completion_mode),
881 profile: Some(self.profile_id.clone()),
882 };
883
884 cx.background_spawn(async move {
885 let initial_project_snapshot = initial_project_snapshot.await;
886 thread.initial_project_snapshot = initial_project_snapshot;
887 thread
888 })
889 }
890
891 /// Create a snapshot of the current project state including git information and unsaved buffers.
892 fn project_snapshot(
893 project: Entity<Project>,
894 cx: &mut Context<Self>,
895 ) -> Task<Arc<ProjectSnapshot>> {
896 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
897 cx.spawn(async move |_, _| {
898 let snapshot = task.await;
899
900 Arc::new(ProjectSnapshot {
901 worktree_snapshots: snapshot.worktree_snapshots,
902 timestamp: Utc::now(),
903 })
904 })
905 }
906
907 pub fn project_context(&self) -> &Entity<ProjectContext> {
908 &self.project_context
909 }
910
911 pub fn project(&self) -> &Entity<Project> {
912 &self.project
913 }
914
915 pub fn action_log(&self) -> &Entity<ActionLog> {
916 &self.action_log
917 }
918
919 pub fn is_empty(&self) -> bool {
920 self.messages.is_empty() && self.title.is_none()
921 }
922
923 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
924 self.model.as_ref()
925 }
926
927 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
928 let old_usage = self.latest_token_usage();
929 self.model = Some(model);
930 let new_caps = Self::prompt_capabilities(self.model.as_deref());
931 let new_usage = self.latest_token_usage();
932 if old_usage != new_usage {
933 cx.emit(TokenUsageUpdated(new_usage));
934 }
935 self.prompt_capabilities_tx.send(new_caps).log_err();
936 cx.notify()
937 }
938
939 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
940 self.summarization_model.as_ref()
941 }
942
943 pub fn set_summarization_model(
944 &mut self,
945 model: Option<Arc<dyn LanguageModel>>,
946 cx: &mut Context<Self>,
947 ) {
948 self.summarization_model = model;
949 cx.notify()
950 }
951
952 pub fn completion_mode(&self) -> CompletionMode {
953 self.completion_mode
954 }
955
956 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
957 let old_usage = self.latest_token_usage();
958 self.completion_mode = mode;
959 let new_usage = self.latest_token_usage();
960 if old_usage != new_usage {
961 cx.emit(TokenUsageUpdated(new_usage));
962 }
963 cx.notify()
964 }
965
966 #[cfg(any(test, feature = "test-support"))]
967 pub fn last_message(&self) -> Option<Message> {
968 if let Some(message) = self.pending_message.clone() {
969 Some(Message::Agent(message))
970 } else {
971 self.messages.last().cloned()
972 }
973 }
974
975 pub fn add_default_tools(
976 &mut self,
977 environment: Rc<dyn ThreadEnvironment>,
978 cx: &mut Context<Self>,
979 ) {
980 let language_registry = self.project.read(cx).languages().clone();
981 self.add_tool(CopyPathTool::new(self.project.clone()));
982 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
983 self.add_tool(DeletePathTool::new(
984 self.project.clone(),
985 self.action_log.clone(),
986 ));
987 self.add_tool(DiagnosticsTool::new(self.project.clone()));
988 self.add_tool(EditFileTool::new(
989 self.project.clone(),
990 cx.weak_entity(),
991 language_registry,
992 Templates::new(),
993 ));
994 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
995 self.add_tool(FindPathTool::new(self.project.clone()));
996 self.add_tool(GrepTool::new(self.project.clone()));
997 self.add_tool(ListDirectoryTool::new(self.project.clone()));
998 self.add_tool(MovePathTool::new(self.project.clone()));
999 self.add_tool(NowTool);
1000 self.add_tool(OpenTool::new(self.project.clone()));
1001 self.add_tool(ReadFileTool::new(
1002 cx.weak_entity(),
1003 self.project.clone(),
1004 self.action_log.clone(),
1005 ));
1006 self.add_tool(SaveFileTool::new(self.project.clone()));
1007 self.add_tool(RestoreFileFromDiskTool::new(self.project.clone()));
1008 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1009 self.add_tool(ThinkingTool);
1010 self.add_tool(WebSearchTool);
1011 }
1012
1013 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1014 self.tools.insert(T::name().into(), tool.erase());
1015 }
1016
1017 pub fn remove_tool(&mut self, name: &str) -> bool {
1018 self.tools.remove(name).is_some()
1019 }
1020
1021 pub fn profile(&self) -> &AgentProfileId {
1022 &self.profile_id
1023 }
1024
1025 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1026 if self.profile_id == profile_id {
1027 return;
1028 }
1029
1030 self.profile_id = profile_id;
1031
1032 // Swap to the profile's preferred model when available.
1033 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1034 self.set_model(model, cx);
1035 }
1036 }
1037
1038 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1039 if let Some(running_turn) = self.running_turn.take() {
1040 running_turn.cancel();
1041 }
1042 self.flush_pending_message(cx);
1043 }
1044
1045 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1046 let Some(last_user_message) = self.last_user_message() else {
1047 return;
1048 };
1049
1050 self.request_token_usage
1051 .insert(last_user_message.id.clone(), update);
1052 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1053 cx.notify();
1054 }
1055
1056 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1057 self.cancel(cx);
1058 let Some(position) = self.messages.iter().position(
1059 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1060 ) else {
1061 return Err(anyhow!("Message not found"));
1062 };
1063
1064 for message in self.messages.drain(position..) {
1065 match message {
1066 Message::User(message) => {
1067 self.request_token_usage.remove(&message.id);
1068 }
1069 Message::Agent(_) | Message::Resume => {}
1070 }
1071 }
1072 self.clear_summary();
1073 cx.notify();
1074 Ok(())
1075 }
1076
1077 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1078 let last_user_message = self.last_user_message()?;
1079 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1080 Some(*tokens)
1081 }
1082
1083 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1084 let usage = self.latest_request_token_usage()?;
1085 let model = self.model.clone()?;
1086 Some(acp_thread::TokenUsage {
1087 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1088 used_tokens: usage.total_tokens(),
1089 })
1090 }
1091
1092 /// Look up the active profile and resolve its preferred model if one is configured.
1093 fn resolve_profile_model(
1094 profile_id: &AgentProfileId,
1095 cx: &mut Context<Self>,
1096 ) -> Option<Arc<dyn LanguageModel>> {
1097 let selection = AgentSettings::get_global(cx)
1098 .profiles
1099 .get(profile_id)?
1100 .default_model
1101 .clone()?;
1102 Self::resolve_model_from_selection(&selection, cx)
1103 }
1104
1105 /// Translate a stored model selection into the configured model from the registry.
1106 fn resolve_model_from_selection(
1107 selection: &LanguageModelSelection,
1108 cx: &mut Context<Self>,
1109 ) -> Option<Arc<dyn LanguageModel>> {
1110 let selected = SelectedModel {
1111 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1112 model: LanguageModelId::from(selection.model.clone()),
1113 };
1114 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1115 registry
1116 .select_model(&selected, cx)
1117 .map(|configured| configured.model)
1118 })
1119 }
1120
1121 pub fn resume(
1122 &mut self,
1123 cx: &mut Context<Self>,
1124 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1125 self.messages.push(Message::Resume);
1126 cx.notify();
1127
1128 log::debug!("Total messages in thread: {}", self.messages.len());
1129 self.run_turn(cx)
1130 }
1131
1132 /// Sending a message results in the model streaming a response, which could include tool calls.
1133 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1134 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1135 pub fn send<T>(
1136 &mut self,
1137 id: UserMessageId,
1138 content: impl IntoIterator<Item = T>,
1139 cx: &mut Context<Self>,
1140 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1141 where
1142 T: Into<UserMessageContent>,
1143 {
1144 let model = self.model().context("No language model configured")?;
1145
1146 log::info!("Thread::send called with model: {}", model.name().0);
1147 self.advance_prompt_id();
1148
1149 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1150 log::debug!("Thread::send content: {:?}", content);
1151
1152 self.messages
1153 .push(Message::User(UserMessage { id, content }));
1154 cx.notify();
1155
1156 log::debug!("Total messages in thread: {}", self.messages.len());
1157 self.run_turn(cx)
1158 }
1159
1160 #[cfg(feature = "eval")]
1161 pub fn proceed(
1162 &mut self,
1163 cx: &mut Context<Self>,
1164 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1165 self.run_turn(cx)
1166 }
1167
1168 fn run_turn(
1169 &mut self,
1170 cx: &mut Context<Self>,
1171 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1172 self.cancel(cx);
1173
1174 let model = self.model.clone().context("No language model configured")?;
1175 let profile = AgentSettings::get_global(cx)
1176 .profiles
1177 .get(&self.profile_id)
1178 .context("Profile not found")?;
1179 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1180 let event_stream = ThreadEventStream(events_tx);
1181 let message_ix = self.messages.len().saturating_sub(1);
1182 self.tool_use_limit_reached = false;
1183 self.clear_summary();
1184 self.running_turn = Some(RunningTurn {
1185 event_stream: event_stream.clone(),
1186 tools: self.enabled_tools(profile, &model, cx),
1187 _task: cx.spawn(async move |this, cx| {
1188 log::debug!("Starting agent turn execution");
1189
1190 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1191 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1192
1193 match turn_result {
1194 Ok(()) => {
1195 log::debug!("Turn execution completed");
1196 event_stream.send_stop(acp::StopReason::EndTurn);
1197 }
1198 Err(error) => {
1199 log::error!("Turn execution failed: {:?}", error);
1200 match error.downcast::<CompletionError>() {
1201 Ok(CompletionError::Refusal) => {
1202 event_stream.send_stop(acp::StopReason::Refusal);
1203 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1204 }
1205 Ok(CompletionError::MaxTokens) => {
1206 event_stream.send_stop(acp::StopReason::MaxTokens);
1207 }
1208 Ok(CompletionError::Other(error)) | Err(error) => {
1209 event_stream.send_error(error);
1210 }
1211 }
1212 }
1213 }
1214
1215 _ = this.update(cx, |this, _| this.running_turn.take());
1216 }),
1217 });
1218 Ok(events_rx)
1219 }
1220
1221 async fn run_turn_internal(
1222 this: &WeakEntity<Self>,
1223 model: Arc<dyn LanguageModel>,
1224 event_stream: &ThreadEventStream,
1225 cx: &mut AsyncApp,
1226 ) -> Result<()> {
1227 let mut attempt = 0;
1228 let mut intent = CompletionIntent::UserPrompt;
1229 loop {
1230 let request =
1231 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1232
1233 telemetry::event!(
1234 "Agent Thread Completion",
1235 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1236 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1237 model = model.telemetry_id(),
1238 model_provider = model.provider_id().to_string(),
1239 attempt
1240 );
1241
1242 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1243
1244 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1245 Ok(events) => (events, None),
1246 Err(err) => (stream::empty().boxed(), Some(err)),
1247 };
1248 let mut tool_results = FuturesUnordered::new();
1249 while let Some(event) = events.next().await {
1250 log::trace!("Received completion event: {:?}", event);
1251 match event {
1252 Ok(event) => {
1253 tool_results.extend(this.update(cx, |this, cx| {
1254 this.handle_completion_event(event, event_stream, cx)
1255 })??);
1256 }
1257 Err(err) => {
1258 error = Some(err);
1259 break;
1260 }
1261 }
1262 }
1263
1264 let end_turn = tool_results.is_empty();
1265 while let Some(tool_result) = tool_results.next().await {
1266 log::debug!("Tool finished {:?}", tool_result);
1267
1268 event_stream.update_tool_call_fields(
1269 &tool_result.tool_use_id,
1270 acp::ToolCallUpdateFields::new()
1271 .status(if tool_result.is_error {
1272 acp::ToolCallStatus::Failed
1273 } else {
1274 acp::ToolCallStatus::Completed
1275 })
1276 .raw_output(tool_result.output.clone()),
1277 );
1278 this.update(cx, |this, _cx| {
1279 this.pending_message()
1280 .tool_results
1281 .insert(tool_result.tool_use_id.clone(), tool_result);
1282 })?;
1283 }
1284
1285 this.update(cx, |this, cx| {
1286 this.flush_pending_message(cx);
1287 if this.title.is_none() && this.pending_title_generation.is_none() {
1288 this.generate_title(cx);
1289 }
1290 })?;
1291
1292 if let Some(error) = error {
1293 attempt += 1;
1294 let retry = this.update(cx, |this, cx| {
1295 let user_store = this.user_store.read(cx);
1296 this.handle_completion_error(error, attempt, user_store.plan())
1297 })??;
1298 let timer = cx.background_executor().timer(retry.duration);
1299 event_stream.send_retry(retry);
1300 timer.await;
1301 this.update(cx, |this, _cx| {
1302 if let Some(Message::Agent(message)) = this.messages.last() {
1303 if message.tool_results.is_empty() {
1304 intent = CompletionIntent::UserPrompt;
1305 this.messages.push(Message::Resume);
1306 }
1307 }
1308 })?;
1309 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1310 return Err(language_model::ToolUseLimitReachedError.into());
1311 } else if end_turn {
1312 return Ok(());
1313 } else {
1314 intent = CompletionIntent::ToolResults;
1315 attempt = 0;
1316 }
1317 }
1318 }
1319
1320 fn handle_completion_error(
1321 &mut self,
1322 error: LanguageModelCompletionError,
1323 attempt: u8,
1324 plan: Option<Plan>,
1325 ) -> Result<acp_thread::RetryStatus> {
1326 let Some(model) = self.model.as_ref() else {
1327 return Err(anyhow!(error));
1328 };
1329
1330 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1331 match plan {
1332 Some(Plan::V2(_)) => true,
1333 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1334 None => false,
1335 }
1336 } else {
1337 true
1338 };
1339
1340 if !auto_retry {
1341 return Err(anyhow!(error));
1342 }
1343
1344 let Some(strategy) = Self::retry_strategy_for(&error) else {
1345 return Err(anyhow!(error));
1346 };
1347
1348 let max_attempts = match &strategy {
1349 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1350 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1351 };
1352
1353 if attempt > max_attempts {
1354 return Err(anyhow!(error));
1355 }
1356
1357 let delay = match &strategy {
1358 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1359 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1360 Duration::from_secs(delay_secs)
1361 }
1362 RetryStrategy::Fixed { delay, .. } => *delay,
1363 };
1364 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1365
1366 Ok(acp_thread::RetryStatus {
1367 last_error: error.to_string().into(),
1368 attempt: attempt as usize,
1369 max_attempts: max_attempts as usize,
1370 started_at: Instant::now(),
1371 duration: delay,
1372 })
1373 }
1374
1375 /// A helper method that's called on every streamed completion event.
1376 /// Returns an optional tool result task, which the main agentic loop will
1377 /// send back to the model when it resolves.
1378 fn handle_completion_event(
1379 &mut self,
1380 event: LanguageModelCompletionEvent,
1381 event_stream: &ThreadEventStream,
1382 cx: &mut Context<Self>,
1383 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1384 log::trace!("Handling streamed completion event: {:?}", event);
1385 use LanguageModelCompletionEvent::*;
1386
1387 match event {
1388 StartMessage { .. } => {
1389 self.flush_pending_message(cx);
1390 self.pending_message = Some(AgentMessage::default());
1391 }
1392 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1393 Thinking { text, signature } => {
1394 self.handle_thinking_event(text, signature, event_stream, cx)
1395 }
1396 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1397 ReasoningDetails(details) => {
1398 let last_message = self.pending_message();
1399 // Store the last non-empty reasoning_details (overwrites earlier ones)
1400 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1401 if let serde_json::Value::Array(ref arr) = details {
1402 if !arr.is_empty() {
1403 last_message.reasoning_details = Some(details);
1404 }
1405 } else {
1406 last_message.reasoning_details = Some(details);
1407 }
1408 }
1409 ToolUse(tool_use) => {
1410 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1411 }
1412 ToolUseJsonParseError {
1413 id,
1414 tool_name,
1415 raw_input,
1416 json_parse_error,
1417 } => {
1418 return Ok(Some(Task::ready(
1419 self.handle_tool_use_json_parse_error_event(
1420 id,
1421 tool_name,
1422 raw_input,
1423 json_parse_error,
1424 ),
1425 )));
1426 }
1427 UsageUpdate(usage) => {
1428 telemetry::event!(
1429 "Agent Thread Completion Usage Updated",
1430 thread_id = self.id.to_string(),
1431 prompt_id = self.prompt_id.to_string(),
1432 model = self.model.as_ref().map(|m| m.telemetry_id()),
1433 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1434 input_tokens = usage.input_tokens,
1435 output_tokens = usage.output_tokens,
1436 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1437 cache_read_input_tokens = usage.cache_read_input_tokens,
1438 );
1439 self.update_token_usage(usage, cx);
1440 }
1441 UsageUpdated { amount, limit } => {
1442 self.update_model_request_usage(amount, limit, cx);
1443 }
1444 ToolUseLimitReached => {
1445 self.tool_use_limit_reached = true;
1446 }
1447 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1448 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1449 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1450 Started | Queued { .. } => {}
1451 }
1452
1453 Ok(None)
1454 }
1455
1456 fn handle_text_event(
1457 &mut self,
1458 new_text: String,
1459 event_stream: &ThreadEventStream,
1460 cx: &mut Context<Self>,
1461 ) {
1462 event_stream.send_text(&new_text);
1463
1464 let last_message = self.pending_message();
1465 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1466 text.push_str(&new_text);
1467 } else {
1468 last_message
1469 .content
1470 .push(AgentMessageContent::Text(new_text));
1471 }
1472
1473 cx.notify();
1474 }
1475
1476 fn handle_thinking_event(
1477 &mut self,
1478 new_text: String,
1479 new_signature: Option<String>,
1480 event_stream: &ThreadEventStream,
1481 cx: &mut Context<Self>,
1482 ) {
1483 event_stream.send_thinking(&new_text);
1484
1485 let last_message = self.pending_message();
1486 if let Some(AgentMessageContent::Thinking { text, signature }) =
1487 last_message.content.last_mut()
1488 {
1489 text.push_str(&new_text);
1490 *signature = new_signature.or(signature.take());
1491 } else {
1492 last_message.content.push(AgentMessageContent::Thinking {
1493 text: new_text,
1494 signature: new_signature,
1495 });
1496 }
1497
1498 cx.notify();
1499 }
1500
1501 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1502 let last_message = self.pending_message();
1503 last_message
1504 .content
1505 .push(AgentMessageContent::RedactedThinking(data));
1506 cx.notify();
1507 }
1508
1509 fn handle_tool_use_event(
1510 &mut self,
1511 tool_use: LanguageModelToolUse,
1512 event_stream: &ThreadEventStream,
1513 cx: &mut Context<Self>,
1514 ) -> Option<Task<LanguageModelToolResult>> {
1515 cx.notify();
1516
1517 let tool = self.tool(tool_use.name.as_ref());
1518 let mut title = SharedString::from(&tool_use.name);
1519 let mut kind = acp::ToolKind::Other;
1520 if let Some(tool) = tool.as_ref() {
1521 title = tool.initial_title(tool_use.input.clone(), cx);
1522 kind = tool.kind();
1523 }
1524
1525 // Ensure the last message ends in the current tool use
1526 let last_message = self.pending_message();
1527 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1528 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1529 if last_tool_use.id == tool_use.id {
1530 *last_tool_use = tool_use.clone();
1531 false
1532 } else {
1533 true
1534 }
1535 } else {
1536 true
1537 }
1538 });
1539
1540 if push_new_tool_use {
1541 event_stream.send_tool_call(
1542 &tool_use.id,
1543 &tool_use.name,
1544 title,
1545 kind,
1546 tool_use.input.clone(),
1547 );
1548 last_message
1549 .content
1550 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1551 } else {
1552 event_stream.update_tool_call_fields(
1553 &tool_use.id,
1554 acp::ToolCallUpdateFields::new()
1555 .title(title.as_str())
1556 .kind(kind)
1557 .raw_input(tool_use.input.clone()),
1558 );
1559 }
1560
1561 if !tool_use.is_input_complete {
1562 return None;
1563 }
1564
1565 let Some(tool) = tool else {
1566 let content = format!("No tool named {} exists", tool_use.name);
1567 return Some(Task::ready(LanguageModelToolResult {
1568 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1569 tool_use_id: tool_use.id,
1570 tool_name: tool_use.name,
1571 is_error: true,
1572 output: None,
1573 }));
1574 };
1575
1576 let fs = self.project.read(cx).fs().clone();
1577 let tool_event_stream =
1578 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1579 tool_event_stream.update_fields(
1580 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1581 );
1582 let supports_images = self.model().is_some_and(|model| model.supports_images());
1583 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1584 log::debug!("Running tool {}", tool_use.name);
1585 Some(cx.foreground_executor().spawn(async move {
1586 let tool_result = tool_result.await.and_then(|output| {
1587 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1588 && !supports_images
1589 {
1590 return Err(anyhow!(
1591 "Attempted to read an image, but this model doesn't support it.",
1592 ));
1593 }
1594 Ok(output)
1595 });
1596
1597 match tool_result {
1598 Ok(output) => LanguageModelToolResult {
1599 tool_use_id: tool_use.id,
1600 tool_name: tool_use.name,
1601 is_error: false,
1602 content: output.llm_output,
1603 output: Some(output.raw_output),
1604 },
1605 Err(error) => LanguageModelToolResult {
1606 tool_use_id: tool_use.id,
1607 tool_name: tool_use.name,
1608 is_error: true,
1609 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1610 output: Some(error.to_string().into()),
1611 },
1612 }
1613 }))
1614 }
1615
1616 fn handle_tool_use_json_parse_error_event(
1617 &mut self,
1618 tool_use_id: LanguageModelToolUseId,
1619 tool_name: Arc<str>,
1620 raw_input: Arc<str>,
1621 json_parse_error: String,
1622 ) -> LanguageModelToolResult {
1623 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1624 LanguageModelToolResult {
1625 tool_use_id,
1626 tool_name,
1627 is_error: true,
1628 content: LanguageModelToolResultContent::Text(tool_output.into()),
1629 output: Some(serde_json::Value::String(raw_input.to_string())),
1630 }
1631 }
1632
1633 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1634 self.project
1635 .read(cx)
1636 .user_store()
1637 .update(cx, |user_store, cx| {
1638 user_store.update_model_request_usage(
1639 ModelRequestUsage(RequestUsage {
1640 amount: amount as i32,
1641 limit,
1642 }),
1643 cx,
1644 )
1645 });
1646 }
1647
1648 pub fn title(&self) -> SharedString {
1649 self.title.clone().unwrap_or("New Thread".into())
1650 }
1651
1652 pub fn is_generating_summary(&self) -> bool {
1653 self.pending_summary_generation.is_some()
1654 }
1655
1656 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1657 if let Some(summary) = self.summary.as_ref() {
1658 return Task::ready(Some(summary.clone())).shared();
1659 }
1660 if let Some(task) = self.pending_summary_generation.clone() {
1661 return task;
1662 }
1663 let Some(model) = self.summarization_model.clone() else {
1664 log::error!("No summarization model available");
1665 return Task::ready(None).shared();
1666 };
1667 let mut request = LanguageModelRequest {
1668 intent: Some(CompletionIntent::ThreadContextSummarization),
1669 temperature: AgentSettings::temperature_for_model(&model, cx),
1670 ..Default::default()
1671 };
1672
1673 for message in &self.messages {
1674 request.messages.extend(message.to_request());
1675 }
1676
1677 request.messages.push(LanguageModelRequestMessage {
1678 role: Role::User,
1679 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1680 cache: false,
1681 reasoning_details: None,
1682 });
1683
1684 let task = cx
1685 .spawn(async move |this, cx| {
1686 let mut summary = String::new();
1687 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1688 while let Some(event) = messages.next().await {
1689 let event = event.log_err()?;
1690 let text = match event {
1691 LanguageModelCompletionEvent::Text(text) => text,
1692 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1693 this.update(cx, |thread, cx| {
1694 thread.update_model_request_usage(amount, limit, cx);
1695 })
1696 .ok()?;
1697 continue;
1698 }
1699 _ => continue,
1700 };
1701
1702 let mut lines = text.lines();
1703 summary.extend(lines.next());
1704 }
1705
1706 log::debug!("Setting summary: {}", summary);
1707 let summary = SharedString::from(summary);
1708
1709 this.update(cx, |this, cx| {
1710 this.summary = Some(summary.clone());
1711 this.pending_summary_generation = None;
1712 cx.notify()
1713 })
1714 .ok()?;
1715
1716 Some(summary)
1717 })
1718 .shared();
1719 self.pending_summary_generation = Some(task.clone());
1720 task
1721 }
1722
1723 fn generate_title(&mut self, cx: &mut Context<Self>) {
1724 let Some(model) = self.summarization_model.clone() else {
1725 return;
1726 };
1727
1728 log::debug!(
1729 "Generating title with model: {:?}",
1730 self.summarization_model.as_ref().map(|model| model.name())
1731 );
1732 let mut request = LanguageModelRequest {
1733 intent: Some(CompletionIntent::ThreadSummarization),
1734 temperature: AgentSettings::temperature_for_model(&model, cx),
1735 ..Default::default()
1736 };
1737
1738 for message in &self.messages {
1739 request.messages.extend(message.to_request());
1740 }
1741
1742 request.messages.push(LanguageModelRequestMessage {
1743 role: Role::User,
1744 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1745 cache: false,
1746 reasoning_details: None,
1747 });
1748 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1749 let mut title = String::new();
1750
1751 let generate = async {
1752 let mut messages = model.stream_completion(request, cx).await?;
1753 while let Some(event) = messages.next().await {
1754 let event = event?;
1755 let text = match event {
1756 LanguageModelCompletionEvent::Text(text) => text,
1757 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1758 this.update(cx, |thread, cx| {
1759 thread.update_model_request_usage(amount, limit, cx);
1760 })?;
1761 continue;
1762 }
1763 _ => continue,
1764 };
1765
1766 let mut lines = text.lines();
1767 title.extend(lines.next());
1768
1769 // Stop if the LLM generated multiple lines.
1770 if lines.next().is_some() {
1771 break;
1772 }
1773 }
1774 anyhow::Ok(())
1775 };
1776
1777 if generate.await.context("failed to generate title").is_ok() {
1778 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1779 }
1780 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1781 }));
1782 }
1783
1784 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1785 self.pending_title_generation = None;
1786 if Some(&title) != self.title.as_ref() {
1787 self.title = Some(title);
1788 cx.emit(TitleUpdated);
1789 cx.notify();
1790 }
1791 }
1792
1793 fn clear_summary(&mut self) {
1794 self.summary = None;
1795 self.pending_summary_generation = None;
1796 }
1797
1798 fn last_user_message(&self) -> Option<&UserMessage> {
1799 self.messages
1800 .iter()
1801 .rev()
1802 .find_map(|message| match message {
1803 Message::User(user_message) => Some(user_message),
1804 Message::Agent(_) => None,
1805 Message::Resume => None,
1806 })
1807 }
1808
1809 fn pending_message(&mut self) -> &mut AgentMessage {
1810 self.pending_message.get_or_insert_default()
1811 }
1812
1813 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1814 let Some(mut message) = self.pending_message.take() else {
1815 return;
1816 };
1817
1818 if message.content.is_empty() {
1819 return;
1820 }
1821
1822 for content in &message.content {
1823 let AgentMessageContent::ToolUse(tool_use) = content else {
1824 continue;
1825 };
1826
1827 if !message.tool_results.contains_key(&tool_use.id) {
1828 message.tool_results.insert(
1829 tool_use.id.clone(),
1830 LanguageModelToolResult {
1831 tool_use_id: tool_use.id.clone(),
1832 tool_name: tool_use.name.clone(),
1833 is_error: true,
1834 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1835 output: None,
1836 },
1837 );
1838 }
1839 }
1840
1841 self.messages.push(Message::Agent(message));
1842 self.updated_at = Utc::now();
1843 self.clear_summary();
1844 cx.notify()
1845 }
1846
1847 pub(crate) fn build_completion_request(
1848 &self,
1849 completion_intent: CompletionIntent,
1850 cx: &App,
1851 ) -> Result<LanguageModelRequest> {
1852 let model = self.model().context("No language model configured")?;
1853 let tools = if let Some(turn) = self.running_turn.as_ref() {
1854 turn.tools
1855 .iter()
1856 .filter_map(|(tool_name, tool)| {
1857 log::trace!("Including tool: {}", tool_name);
1858 Some(LanguageModelRequestTool {
1859 name: tool_name.to_string(),
1860 description: tool.description().to_string(),
1861 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1862 })
1863 })
1864 .collect::<Vec<_>>()
1865 } else {
1866 Vec::new()
1867 };
1868
1869 log::debug!("Building completion request");
1870 log::debug!("Completion intent: {:?}", completion_intent);
1871 log::debug!("Completion mode: {:?}", self.completion_mode);
1872
1873 let available_tools: Vec<_> = self
1874 .running_turn
1875 .as_ref()
1876 .map(|turn| turn.tools.keys().cloned().collect())
1877 .unwrap_or_default();
1878
1879 log::debug!("Request includes {} tools", available_tools.len());
1880 let messages = self.build_request_messages(available_tools, cx);
1881 log::debug!("Request will include {} messages", messages.len());
1882
1883 let request = LanguageModelRequest {
1884 thread_id: Some(self.id.to_string()),
1885 prompt_id: Some(self.prompt_id.to_string()),
1886 intent: Some(completion_intent),
1887 mode: Some(self.completion_mode.into()),
1888 messages,
1889 tools,
1890 tool_choice: None,
1891 stop: Vec::new(),
1892 temperature: AgentSettings::temperature_for_model(model, cx),
1893 thinking_allowed: true,
1894 };
1895
1896 log::debug!("Completion request built successfully");
1897 Ok(request)
1898 }
1899
1900 fn enabled_tools(
1901 &self,
1902 profile: &AgentProfileSettings,
1903 model: &Arc<dyn LanguageModel>,
1904 cx: &App,
1905 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1906 fn truncate(tool_name: &SharedString) -> SharedString {
1907 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1908 let mut truncated = tool_name.to_string();
1909 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1910 truncated.into()
1911 } else {
1912 tool_name.clone()
1913 }
1914 }
1915
1916 let mut tools = self
1917 .tools
1918 .iter()
1919 .filter_map(|(tool_name, tool)| {
1920 if tool.supports_provider(&model.provider_id())
1921 && profile.is_tool_enabled(tool_name)
1922 {
1923 Some((truncate(tool_name), tool.clone()))
1924 } else {
1925 None
1926 }
1927 })
1928 .collect::<BTreeMap<_, _>>();
1929
1930 let mut context_server_tools = Vec::new();
1931 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1932 let mut duplicate_tool_names = HashSet::default();
1933 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1934 for (tool_name, tool) in server_tools {
1935 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1936 let tool_name = truncate(tool_name);
1937 if !seen_tools.insert(tool_name.clone()) {
1938 duplicate_tool_names.insert(tool_name.clone());
1939 }
1940 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1941 }
1942 }
1943 }
1944
1945 // When there are duplicate tool names, disambiguate by prefixing them
1946 // with the server ID. In the rare case there isn't enough space for the
1947 // disambiguated tool name, keep only the last tool with this name.
1948 for (server_id, tool_name, tool) in context_server_tools {
1949 if duplicate_tool_names.contains(&tool_name) {
1950 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1951 if available >= 2 {
1952 let mut disambiguated = server_id.0.to_string();
1953 disambiguated.truncate(available - 1);
1954 disambiguated.push('_');
1955 disambiguated.push_str(&tool_name);
1956 tools.insert(disambiguated.into(), tool.clone());
1957 } else {
1958 tools.insert(tool_name, tool.clone());
1959 }
1960 } else {
1961 tools.insert(tool_name, tool.clone());
1962 }
1963 }
1964
1965 tools
1966 }
1967
1968 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1969 self.running_turn.as_ref()?.tools.get(name).cloned()
1970 }
1971
1972 pub fn has_tool(&self, name: &str) -> bool {
1973 self.running_turn
1974 .as_ref()
1975 .is_some_and(|turn| turn.tools.contains_key(name))
1976 }
1977
1978 fn build_request_messages(
1979 &self,
1980 available_tools: Vec<SharedString>,
1981 cx: &App,
1982 ) -> Vec<LanguageModelRequestMessage> {
1983 log::trace!(
1984 "Building request messages from {} thread messages",
1985 self.messages.len()
1986 );
1987
1988 let system_prompt = SystemPromptTemplate {
1989 project: self.project_context.read(cx),
1990 available_tools,
1991 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
1992 }
1993 .render(&self.templates)
1994 .context("failed to build system prompt")
1995 .expect("Invalid template");
1996 let mut messages = vec![LanguageModelRequestMessage {
1997 role: Role::System,
1998 content: vec![system_prompt.into()],
1999 cache: false,
2000 reasoning_details: None,
2001 }];
2002 for message in &self.messages {
2003 messages.extend(message.to_request());
2004 }
2005
2006 if let Some(last_message) = messages.last_mut() {
2007 last_message.cache = true;
2008 }
2009
2010 if let Some(message) = self.pending_message.as_ref() {
2011 messages.extend(message.to_request());
2012 }
2013
2014 messages
2015 }
2016
2017 pub fn to_markdown(&self) -> String {
2018 let mut markdown = String::new();
2019 for (ix, message) in self.messages.iter().enumerate() {
2020 if ix > 0 {
2021 markdown.push('\n');
2022 }
2023 markdown.push_str(&message.to_markdown());
2024 }
2025
2026 if let Some(message) = self.pending_message.as_ref() {
2027 markdown.push('\n');
2028 markdown.push_str(&message.to_markdown());
2029 }
2030
2031 markdown
2032 }
2033
2034 fn advance_prompt_id(&mut self) {
2035 self.prompt_id = PromptId::new();
2036 }
2037
2038 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2039 use LanguageModelCompletionError::*;
2040 use http_client::StatusCode;
2041
2042 // General strategy here:
2043 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2044 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2045 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2046 match error {
2047 HttpResponseError {
2048 status_code: StatusCode::TOO_MANY_REQUESTS,
2049 ..
2050 } => Some(RetryStrategy::ExponentialBackoff {
2051 initial_delay: BASE_RETRY_DELAY,
2052 max_attempts: MAX_RETRY_ATTEMPTS,
2053 }),
2054 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2055 Some(RetryStrategy::Fixed {
2056 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2057 max_attempts: MAX_RETRY_ATTEMPTS,
2058 })
2059 }
2060 UpstreamProviderError {
2061 status,
2062 retry_after,
2063 ..
2064 } => match *status {
2065 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2066 Some(RetryStrategy::Fixed {
2067 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2068 max_attempts: MAX_RETRY_ATTEMPTS,
2069 })
2070 }
2071 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2072 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2073 // Internal Server Error could be anything, retry up to 3 times.
2074 max_attempts: 3,
2075 }),
2076 status => {
2077 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2078 // but we frequently get them in practice. See https://http.dev/529
2079 if status.as_u16() == 529 {
2080 Some(RetryStrategy::Fixed {
2081 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2082 max_attempts: MAX_RETRY_ATTEMPTS,
2083 })
2084 } else {
2085 Some(RetryStrategy::Fixed {
2086 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2087 max_attempts: 2,
2088 })
2089 }
2090 }
2091 },
2092 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2093 delay: BASE_RETRY_DELAY,
2094 max_attempts: 3,
2095 }),
2096 ApiReadResponseError { .. }
2097 | HttpSend { .. }
2098 | DeserializeResponse { .. }
2099 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2100 delay: BASE_RETRY_DELAY,
2101 max_attempts: 3,
2102 }),
2103 // Retrying these errors definitely shouldn't help.
2104 HttpResponseError {
2105 status_code:
2106 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2107 ..
2108 }
2109 | AuthenticationError { .. }
2110 | PermissionError { .. }
2111 | NoApiKey { .. }
2112 | ApiEndpointNotFound { .. }
2113 | PromptTooLarge { .. } => None,
2114 // These errors might be transient, so retry them
2115 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2116 delay: BASE_RETRY_DELAY,
2117 max_attempts: 1,
2118 }),
2119 // Retry all other 4xx and 5xx errors once.
2120 HttpResponseError { status_code, .. }
2121 if status_code.is_client_error() || status_code.is_server_error() =>
2122 {
2123 Some(RetryStrategy::Fixed {
2124 delay: BASE_RETRY_DELAY,
2125 max_attempts: 3,
2126 })
2127 }
2128 Other(err)
2129 if err.is::<language_model::PaymentRequiredError>()
2130 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2131 {
2132 // Retrying won't help for Payment Required or Model Request Limit errors (where
2133 // the user must upgrade to usage-based billing to get more requests, or else wait
2134 // for a significant amount of time for the request limit to reset).
2135 None
2136 }
2137 // Conservatively assume that any other errors are non-retryable
2138 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2139 delay: BASE_RETRY_DELAY,
2140 max_attempts: 2,
2141 }),
2142 }
2143 }
2144}
2145
2146struct RunningTurn {
2147 /// Holds the task that handles agent interaction until the end of the turn.
2148 /// Survives across multiple requests as the model performs tool calls and
2149 /// we run tools, report their results.
2150 _task: Task<()>,
2151 /// The current event stream for the running turn. Used to report a final
2152 /// cancellation event if we cancel the turn.
2153 event_stream: ThreadEventStream,
2154 /// The tools that were enabled for this turn.
2155 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2156}
2157
2158impl RunningTurn {
2159 fn cancel(self) {
2160 log::debug!("Cancelling in progress turn");
2161 self.event_stream.send_canceled();
2162 }
2163}
2164
2165pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2166
2167impl EventEmitter<TokenUsageUpdated> for Thread {}
2168
2169pub struct TitleUpdated;
2170
2171impl EventEmitter<TitleUpdated> for Thread {}
2172
2173pub trait AgentTool
2174where
2175 Self: 'static + Sized,
2176{
2177 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2178 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2179
2180 fn name() -> &'static str;
2181
2182 fn description() -> SharedString {
2183 let schema = schemars::schema_for!(Self::Input);
2184 SharedString::new(
2185 schema
2186 .get("description")
2187 .and_then(|description| description.as_str())
2188 .unwrap_or_default(),
2189 )
2190 }
2191
2192 fn kind() -> acp::ToolKind;
2193
2194 /// The initial tool title to display. Can be updated during the tool run.
2195 fn initial_title(
2196 &self,
2197 input: Result<Self::Input, serde_json::Value>,
2198 cx: &mut App,
2199 ) -> SharedString;
2200
2201 /// Returns the JSON schema that describes the tool's input.
2202 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2203 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2204 }
2205
2206 /// Some tools rely on a provider for the underlying billing or other reasons.
2207 /// Allow the tool to check if they are compatible, or should be filtered out.
2208 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2209 true
2210 }
2211
2212 /// Runs the tool with the provided input.
2213 fn run(
2214 self: Arc<Self>,
2215 input: Self::Input,
2216 event_stream: ToolCallEventStream,
2217 cx: &mut App,
2218 ) -> Task<Result<Self::Output>>;
2219
2220 /// Emits events for a previous execution of the tool.
2221 fn replay(
2222 &self,
2223 _input: Self::Input,
2224 _output: Self::Output,
2225 _event_stream: ToolCallEventStream,
2226 _cx: &mut App,
2227 ) -> Result<()> {
2228 Ok(())
2229 }
2230
2231 fn erase(self) -> Arc<dyn AnyAgentTool> {
2232 Arc::new(Erased(Arc::new(self)))
2233 }
2234}
2235
2236pub struct Erased<T>(T);
2237
2238pub struct AgentToolOutput {
2239 pub llm_output: LanguageModelToolResultContent,
2240 pub raw_output: serde_json::Value,
2241}
2242
2243pub trait AnyAgentTool {
2244 fn name(&self) -> SharedString;
2245 fn description(&self) -> SharedString;
2246 fn kind(&self) -> acp::ToolKind;
2247 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2248 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2249 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2250 true
2251 }
2252 fn run(
2253 self: Arc<Self>,
2254 input: serde_json::Value,
2255 event_stream: ToolCallEventStream,
2256 cx: &mut App,
2257 ) -> Task<Result<AgentToolOutput>>;
2258 fn replay(
2259 &self,
2260 input: serde_json::Value,
2261 output: serde_json::Value,
2262 event_stream: ToolCallEventStream,
2263 cx: &mut App,
2264 ) -> Result<()>;
2265}
2266
2267impl<T> AnyAgentTool for Erased<Arc<T>>
2268where
2269 T: AgentTool,
2270{
2271 fn name(&self) -> SharedString {
2272 T::name().into()
2273 }
2274
2275 fn description(&self) -> SharedString {
2276 T::description()
2277 }
2278
2279 fn kind(&self) -> agent_client_protocol::ToolKind {
2280 T::kind()
2281 }
2282
2283 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2284 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2285 self.0.initial_title(parsed_input, _cx)
2286 }
2287
2288 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2289 let mut json = serde_json::to_value(T::input_schema(format))?;
2290 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2291 Ok(json)
2292 }
2293
2294 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2295 T::supports_provider(provider)
2296 }
2297
2298 fn run(
2299 self: Arc<Self>,
2300 input: serde_json::Value,
2301 event_stream: ToolCallEventStream,
2302 cx: &mut App,
2303 ) -> Task<Result<AgentToolOutput>> {
2304 cx.spawn(async move |cx| {
2305 let input = serde_json::from_value(input)?;
2306 let output = cx
2307 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2308 .await?;
2309 let raw_output = serde_json::to_value(&output)?;
2310 Ok(AgentToolOutput {
2311 llm_output: output.into(),
2312 raw_output,
2313 })
2314 })
2315 }
2316
2317 fn replay(
2318 &self,
2319 input: serde_json::Value,
2320 output: serde_json::Value,
2321 event_stream: ToolCallEventStream,
2322 cx: &mut App,
2323 ) -> Result<()> {
2324 let input = serde_json::from_value(input)?;
2325 let output = serde_json::from_value(output)?;
2326 self.0.replay(input, output, event_stream, cx)
2327 }
2328}
2329
2330#[derive(Clone)]
2331struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2332
2333impl ThreadEventStream {
2334 fn send_user_message(&self, message: &UserMessage) {
2335 self.0
2336 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2337 .ok();
2338 }
2339
2340 fn send_text(&self, text: &str) {
2341 self.0
2342 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2343 .ok();
2344 }
2345
2346 fn send_thinking(&self, text: &str) {
2347 self.0
2348 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2349 .ok();
2350 }
2351
2352 fn send_tool_call(
2353 &self,
2354 id: &LanguageModelToolUseId,
2355 tool_name: &str,
2356 title: SharedString,
2357 kind: acp::ToolKind,
2358 input: serde_json::Value,
2359 ) {
2360 self.0
2361 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2362 id,
2363 tool_name,
2364 title.to_string(),
2365 kind,
2366 input,
2367 ))))
2368 .ok();
2369 }
2370
2371 fn initial_tool_call(
2372 id: &LanguageModelToolUseId,
2373 tool_name: &str,
2374 title: String,
2375 kind: acp::ToolKind,
2376 input: serde_json::Value,
2377 ) -> acp::ToolCall {
2378 acp::ToolCall::new(id.to_string(), title)
2379 .kind(kind)
2380 .raw_input(input)
2381 .meta(acp::Meta::from_iter([(
2382 "tool_name".into(),
2383 tool_name.into(),
2384 )]))
2385 }
2386
2387 fn update_tool_call_fields(
2388 &self,
2389 tool_use_id: &LanguageModelToolUseId,
2390 fields: acp::ToolCallUpdateFields,
2391 ) {
2392 self.0
2393 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2394 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2395 )))
2396 .ok();
2397 }
2398
2399 fn send_retry(&self, status: acp_thread::RetryStatus) {
2400 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2401 }
2402
2403 fn send_stop(&self, reason: acp::StopReason) {
2404 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2405 }
2406
2407 fn send_canceled(&self) {
2408 self.0
2409 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2410 .ok();
2411 }
2412
2413 fn send_error(&self, error: impl Into<anyhow::Error>) {
2414 self.0.unbounded_send(Err(error.into())).ok();
2415 }
2416}
2417
2418#[derive(Clone)]
2419pub struct ToolCallEventStream {
2420 tool_use_id: LanguageModelToolUseId,
2421 stream: ThreadEventStream,
2422 fs: Option<Arc<dyn Fs>>,
2423}
2424
2425impl ToolCallEventStream {
2426 #[cfg(any(test, feature = "test-support"))]
2427 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2428 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2429
2430 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2431
2432 (stream, ToolCallEventStreamReceiver(events_rx))
2433 }
2434
2435 fn new(
2436 tool_use_id: LanguageModelToolUseId,
2437 stream: ThreadEventStream,
2438 fs: Option<Arc<dyn Fs>>,
2439 ) -> Self {
2440 Self {
2441 tool_use_id,
2442 stream,
2443 fs,
2444 }
2445 }
2446
2447 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2448 self.stream
2449 .update_tool_call_fields(&self.tool_use_id, fields);
2450 }
2451
2452 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2453 self.stream
2454 .0
2455 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2456 acp_thread::ToolCallUpdateDiff {
2457 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2458 diff,
2459 }
2460 .into(),
2461 )))
2462 .ok();
2463 }
2464
2465 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2466 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2467 return Task::ready(Ok(()));
2468 }
2469
2470 let (response_tx, response_rx) = oneshot::channel();
2471 self.stream
2472 .0
2473 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2474 ToolCallAuthorization {
2475 tool_call: acp::ToolCallUpdate::new(
2476 self.tool_use_id.to_string(),
2477 acp::ToolCallUpdateFields::new().title(title.into()),
2478 ),
2479 options: vec![
2480 acp::PermissionOption::new(
2481 acp::PermissionOptionId::new("always_allow"),
2482 "Always Allow",
2483 acp::PermissionOptionKind::AllowAlways,
2484 ),
2485 acp::PermissionOption::new(
2486 acp::PermissionOptionId::new("allow"),
2487 "Allow",
2488 acp::PermissionOptionKind::AllowOnce,
2489 ),
2490 acp::PermissionOption::new(
2491 acp::PermissionOptionId::new("deny"),
2492 "Deny",
2493 acp::PermissionOptionKind::RejectOnce,
2494 ),
2495 ],
2496 response: response_tx,
2497 },
2498 )))
2499 .ok();
2500 let fs = self.fs.clone();
2501 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2502 "always_allow" => {
2503 if let Some(fs) = fs.clone() {
2504 cx.update(|cx| {
2505 update_settings_file(fs, cx, |settings, _| {
2506 settings
2507 .agent
2508 .get_or_insert_default()
2509 .set_always_allow_tool_actions(true);
2510 });
2511 })?;
2512 }
2513
2514 Ok(())
2515 }
2516 "allow" => Ok(()),
2517 _ => Err(anyhow!("Permission to run tool denied by user")),
2518 })
2519 }
2520}
2521
2522#[cfg(any(test, feature = "test-support"))]
2523pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2524
2525#[cfg(any(test, feature = "test-support"))]
2526impl ToolCallEventStreamReceiver {
2527 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2528 let event = self.0.next().await;
2529 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2530 auth
2531 } else {
2532 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2533 }
2534 }
2535
2536 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2537 let event = self.0.next().await;
2538 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2539 update,
2540 )))) = event
2541 {
2542 update.fields
2543 } else {
2544 panic!("Expected update fields but got: {:?}", event);
2545 }
2546 }
2547
2548 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2549 let event = self.0.next().await;
2550 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2551 update,
2552 )))) = event
2553 {
2554 update.diff
2555 } else {
2556 panic!("Expected diff but got: {:?}", event);
2557 }
2558 }
2559
2560 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2561 let event = self.0.next().await;
2562 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2563 update,
2564 )))) = event
2565 {
2566 update.terminal
2567 } else {
2568 panic!("Expected terminal but got: {:?}", event);
2569 }
2570 }
2571}
2572
2573#[cfg(any(test, feature = "test-support"))]
2574impl std::ops::Deref for ToolCallEventStreamReceiver {
2575 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2576
2577 fn deref(&self) -> &Self::Target {
2578 &self.0
2579 }
2580}
2581
2582#[cfg(any(test, feature = "test-support"))]
2583impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2584 fn deref_mut(&mut self) -> &mut Self::Target {
2585 &mut self.0
2586 }
2587}
2588
2589impl From<&str> for UserMessageContent {
2590 fn from(text: &str) -> Self {
2591 Self::Text(text.into())
2592 }
2593}
2594
2595impl UserMessageContent {
2596 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
2597 match value {
2598 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2599 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2600 acp::ContentBlock::Audio(_) => {
2601 // TODO
2602 Self::Text("[audio]".to_string())
2603 }
2604 acp::ContentBlock::ResourceLink(resource_link) => {
2605 match MentionUri::parse(&resource_link.uri, path_style) {
2606 Ok(uri) => Self::Mention {
2607 uri,
2608 content: String::new(),
2609 },
2610 Err(err) => {
2611 log::error!("Failed to parse mention link: {}", err);
2612 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2613 }
2614 }
2615 }
2616 acp::ContentBlock::Resource(resource) => match resource.resource {
2617 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2618 match MentionUri::parse(&resource.uri, path_style) {
2619 Ok(uri) => Self::Mention {
2620 uri,
2621 content: resource.text,
2622 },
2623 Err(err) => {
2624 log::error!("Failed to parse mention link: {}", err);
2625 Self::Text(
2626 MarkdownCodeBlock {
2627 tag: &resource.uri,
2628 text: &resource.text,
2629 }
2630 .to_string(),
2631 )
2632 }
2633 }
2634 }
2635 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2636 // TODO
2637 Self::Text("[blob]".to_string())
2638 }
2639 other => {
2640 log::warn!("Unexpected content type: {:?}", other);
2641 Self::Text("[unknown]".to_string())
2642 }
2643 },
2644 other => {
2645 log::warn!("Unexpected content type: {:?}", other);
2646 Self::Text("[unknown]".to_string())
2647 }
2648 }
2649 }
2650}
2651
2652impl From<UserMessageContent> for acp::ContentBlock {
2653 fn from(content: UserMessageContent) -> Self {
2654 match content {
2655 UserMessageContent::Text(text) => text.into(),
2656 UserMessageContent::Image(image) => {
2657 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
2658 }
2659 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
2660 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
2661 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
2662 )),
2663 ),
2664 }
2665 }
2666}
2667
2668fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2669 LanguageModelImage {
2670 source: image_content.data.into(),
2671 size: None,
2672 }
2673}