1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 RestoreFileFromDiskTool, SaveFileTool, SpawnSubagentTool, SystemPromptTemplate, Template,
6 Templates, TerminalTool, ThinkingTool, WebSearchTool,
7};
8use acp_thread::{MentionUri, UserMessageId};
9use action_log::ActionLog;
10
11use agent_client_protocol as acp;
12use agent_settings::{
13 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
14 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
15};
16use anyhow::{Context as _, Result, anyhow};
17use chrono::{DateTime, Utc};
18use client::{ModelRequestUsage, RequestUsage, UserStore};
19use cloud_llm_client::{CompletionIntent, Plan, UsageLimit};
20use collections::{HashMap, HashSet, IndexMap};
21use fs::Fs;
22use futures::stream;
23use futures::{
24 FutureExt,
25 channel::{mpsc, oneshot},
26 future::Shared,
27 stream::FuturesUnordered,
28};
29use gpui::{
30 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
31};
32use language_model::{
33 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
34 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
35 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
36 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
37 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
38 ZED_CLOUD_PROVIDER_ID,
39};
40use project::Project;
41use prompt_store::ProjectContext;
42use schemars::{JsonSchema, Schema};
43use serde::{Deserialize, Serialize};
44use settings::{LanguageModelSelection, Settings, update_settings_file};
45use smol::stream::StreamExt;
46use std::{
47 collections::BTreeMap,
48 ops::RangeInclusive,
49 path::Path,
50 rc::Rc,
51 sync::Arc,
52 time::{Duration, Instant},
53};
54use std::{fmt::Write, path::PathBuf};
55use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
56use uuid::Uuid;
57
58const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
59pub const MAX_TOOL_NAME_LENGTH: usize = 64;
60
61/// The ID of the user prompt that initiated a request.
62///
63/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
64#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
65pub struct PromptId(Arc<str>);
66
67impl PromptId {
68 pub fn new() -> Self {
69 Self(Uuid::new_v4().to_string().into())
70 }
71}
72
73impl std::fmt::Display for PromptId {
74 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
75 write!(f, "{}", self.0)
76 }
77}
78
79pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
80pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
81
82#[derive(Debug, Clone)]
83enum RetryStrategy {
84 ExponentialBackoff {
85 initial_delay: Duration,
86 max_attempts: u8,
87 },
88 Fixed {
89 delay: Duration,
90 max_attempts: u8,
91 },
92}
93
94#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
95pub enum Message {
96 User(UserMessage),
97 Agent(AgentMessage),
98 Resume,
99}
100
101impl Message {
102 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
103 match self {
104 Message::Agent(agent_message) => Some(agent_message),
105 _ => None,
106 }
107 }
108
109 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
110 match self {
111 Message::User(message) => {
112 if message.content.is_empty() {
113 vec![]
114 } else {
115 vec![message.to_request()]
116 }
117 }
118 Message::Agent(message) => message.to_request(),
119 Message::Resume => vec![LanguageModelRequestMessage {
120 role: Role::User,
121 content: vec!["Continue where you left off".into()],
122 cache: false,
123 reasoning_details: None,
124 }],
125 }
126 }
127
128 pub fn to_markdown(&self) -> String {
129 match self {
130 Message::User(message) => message.to_markdown(),
131 Message::Agent(message) => message.to_markdown(),
132 Message::Resume => "[resume]\n".into(),
133 }
134 }
135
136 pub fn role(&self) -> Role {
137 match self {
138 Message::User(_) | Message::Resume => Role::User,
139 Message::Agent(_) => Role::Assistant,
140 }
141 }
142}
143
144#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
145pub struct UserMessage {
146 pub id: UserMessageId,
147 pub content: Vec<UserMessageContent>,
148}
149
150#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
151pub enum UserMessageContent {
152 Text(String),
153 Mention { uri: MentionUri, content: String },
154 Image(LanguageModelImage),
155}
156
157impl UserMessage {
158 pub fn to_markdown(&self) -> String {
159 let mut markdown = String::from("## User\n\n");
160
161 for content in &self.content {
162 match content {
163 UserMessageContent::Text(text) => {
164 markdown.push_str(text);
165 markdown.push('\n');
166 }
167 UserMessageContent::Image(_) => {
168 markdown.push_str("<image />\n");
169 }
170 UserMessageContent::Mention { uri, content } => {
171 if !content.is_empty() {
172 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
173 } else {
174 let _ = writeln!(&mut markdown, "{}", uri.as_link());
175 }
176 }
177 }
178 }
179
180 markdown
181 }
182
183 fn to_request(&self) -> LanguageModelRequestMessage {
184 let mut message = LanguageModelRequestMessage {
185 role: Role::User,
186 content: Vec::with_capacity(self.content.len()),
187 cache: false,
188 reasoning_details: None,
189 };
190
191 const OPEN_CONTEXT: &str = "<context>\n\
192 The following items were attached by the user. \
193 They are up-to-date and don't need to be re-read.\n\n";
194
195 const OPEN_FILES_TAG: &str = "<files>";
196 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
197 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
198 const OPEN_SELECTIONS_TAG: &str = "<selections>";
199 const OPEN_THREADS_TAG: &str = "<threads>";
200 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
201 const OPEN_RULES_TAG: &str =
202 "<rules>\nThe user has specified the following rules that should be applied:\n";
203
204 let mut file_context = OPEN_FILES_TAG.to_string();
205 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
206 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
207 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
208 let mut thread_context = OPEN_THREADS_TAG.to_string();
209 let mut fetch_context = OPEN_FETCH_TAG.to_string();
210 let mut rules_context = OPEN_RULES_TAG.to_string();
211
212 for chunk in &self.content {
213 let chunk = match chunk {
214 UserMessageContent::Text(text) => {
215 language_model::MessageContent::Text(text.clone())
216 }
217 UserMessageContent::Image(value) => {
218 language_model::MessageContent::Image(value.clone())
219 }
220 UserMessageContent::Mention { uri, content } => {
221 match uri {
222 MentionUri::File { abs_path } => {
223 write!(
224 &mut file_context,
225 "\n{}",
226 MarkdownCodeBlock {
227 tag: &codeblock_tag(abs_path, None),
228 text: &content.to_string(),
229 }
230 )
231 .ok();
232 }
233 MentionUri::PastedImage => {
234 debug_panic!("pasted image URI should not be used in mention content")
235 }
236 MentionUri::Directory { .. } => {
237 write!(&mut directory_context, "\n{}\n", content).ok();
238 }
239 MentionUri::Symbol {
240 abs_path: path,
241 line_range,
242 ..
243 } => {
244 write!(
245 &mut symbol_context,
246 "\n{}",
247 MarkdownCodeBlock {
248 tag: &codeblock_tag(path, Some(line_range)),
249 text: content
250 }
251 )
252 .ok();
253 }
254 MentionUri::Selection {
255 abs_path: path,
256 line_range,
257 ..
258 } => {
259 write!(
260 &mut selection_context,
261 "\n{}",
262 MarkdownCodeBlock {
263 tag: &codeblock_tag(
264 path.as_deref().unwrap_or("Untitled".as_ref()),
265 Some(line_range)
266 ),
267 text: content
268 }
269 )
270 .ok();
271 }
272 MentionUri::Thread { .. } => {
273 write!(&mut thread_context, "\n{}\n", content).ok();
274 }
275 MentionUri::TextThread { .. } => {
276 write!(&mut thread_context, "\n{}\n", content).ok();
277 }
278 MentionUri::Rule { .. } => {
279 write!(
280 &mut rules_context,
281 "\n{}",
282 MarkdownCodeBlock {
283 tag: "",
284 text: content
285 }
286 )
287 .ok();
288 }
289 MentionUri::Fetch { url } => {
290 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
291 }
292 }
293
294 language_model::MessageContent::Text(uri.as_link().to_string())
295 }
296 };
297
298 message.content.push(chunk);
299 }
300
301 let len_before_context = message.content.len();
302
303 if file_context.len() > OPEN_FILES_TAG.len() {
304 file_context.push_str("</files>\n");
305 message
306 .content
307 .push(language_model::MessageContent::Text(file_context));
308 }
309
310 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
311 directory_context.push_str("</directories>\n");
312 message
313 .content
314 .push(language_model::MessageContent::Text(directory_context));
315 }
316
317 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
318 symbol_context.push_str("</symbols>\n");
319 message
320 .content
321 .push(language_model::MessageContent::Text(symbol_context));
322 }
323
324 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
325 selection_context.push_str("</selections>\n");
326 message
327 .content
328 .push(language_model::MessageContent::Text(selection_context));
329 }
330
331 if thread_context.len() > OPEN_THREADS_TAG.len() {
332 thread_context.push_str("</threads>\n");
333 message
334 .content
335 .push(language_model::MessageContent::Text(thread_context));
336 }
337
338 if fetch_context.len() > OPEN_FETCH_TAG.len() {
339 fetch_context.push_str("</fetched_urls>\n");
340 message
341 .content
342 .push(language_model::MessageContent::Text(fetch_context));
343 }
344
345 if rules_context.len() > OPEN_RULES_TAG.len() {
346 rules_context.push_str("</user_rules>\n");
347 message
348 .content
349 .push(language_model::MessageContent::Text(rules_context));
350 }
351
352 if message.content.len() > len_before_context {
353 message.content.insert(
354 len_before_context,
355 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
356 );
357 message
358 .content
359 .push(language_model::MessageContent::Text("</context>".into()));
360 }
361
362 message
363 }
364}
365
366fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
367 let mut result = String::new();
368
369 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
370 let _ = write!(result, "{} ", extension);
371 }
372
373 let _ = write!(result, "{}", full_path.display());
374
375 if let Some(range) = line_range {
376 if range.start() == range.end() {
377 let _ = write!(result, ":{}", range.start() + 1);
378 } else {
379 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
380 }
381 }
382
383 result
384}
385
386impl AgentMessage {
387 pub fn to_markdown(&self) -> String {
388 let mut markdown = String::from("## Assistant\n\n");
389
390 for content in &self.content {
391 match content {
392 AgentMessageContent::Text(text) => {
393 markdown.push_str(text);
394 markdown.push('\n');
395 }
396 AgentMessageContent::Thinking { text, .. } => {
397 markdown.push_str("<think>");
398 markdown.push_str(text);
399 markdown.push_str("</think>\n");
400 }
401 AgentMessageContent::RedactedThinking(_) => {
402 markdown.push_str("<redacted_thinking />\n")
403 }
404 AgentMessageContent::ToolUse(tool_use) => {
405 markdown.push_str(&format!(
406 "**Tool Use**: {} (ID: {})\n",
407 tool_use.name, tool_use.id
408 ));
409 markdown.push_str(&format!(
410 "{}\n",
411 MarkdownCodeBlock {
412 tag: "json",
413 text: &format!("{:#}", tool_use.input)
414 }
415 ));
416 }
417 }
418 }
419
420 for tool_result in self.tool_results.values() {
421 markdown.push_str(&format!(
422 "**Tool Result**: {} (ID: {})\n\n",
423 tool_result.tool_name, tool_result.tool_use_id
424 ));
425 if tool_result.is_error {
426 markdown.push_str("**ERROR:**\n");
427 }
428
429 match &tool_result.content {
430 LanguageModelToolResultContent::Text(text) => {
431 writeln!(markdown, "{text}\n").ok();
432 }
433 LanguageModelToolResultContent::Image(_) => {
434 writeln!(markdown, "<image />\n").ok();
435 }
436 }
437
438 if let Some(output) = tool_result.output.as_ref() {
439 writeln!(
440 markdown,
441 "**Debug Output**:\n\n```json\n{}\n```\n",
442 serde_json::to_string_pretty(output).unwrap()
443 )
444 .unwrap();
445 }
446 }
447
448 markdown
449 }
450
451 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
452 let mut assistant_message = LanguageModelRequestMessage {
453 role: Role::Assistant,
454 content: Vec::with_capacity(self.content.len()),
455 cache: false,
456 reasoning_details: self.reasoning_details.clone(),
457 };
458 for chunk in &self.content {
459 match chunk {
460 AgentMessageContent::Text(text) => {
461 assistant_message
462 .content
463 .push(language_model::MessageContent::Text(text.clone()));
464 }
465 AgentMessageContent::Thinking { text, signature } => {
466 assistant_message
467 .content
468 .push(language_model::MessageContent::Thinking {
469 text: text.clone(),
470 signature: signature.clone(),
471 });
472 }
473 AgentMessageContent::RedactedThinking(value) => {
474 assistant_message.content.push(
475 language_model::MessageContent::RedactedThinking(value.clone()),
476 );
477 }
478 AgentMessageContent::ToolUse(tool_use) => {
479 if self.tool_results.contains_key(&tool_use.id) {
480 assistant_message
481 .content
482 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
483 }
484 }
485 };
486 }
487
488 let mut user_message = LanguageModelRequestMessage {
489 role: Role::User,
490 content: Vec::new(),
491 cache: false,
492 reasoning_details: None,
493 };
494
495 for tool_result in self.tool_results.values() {
496 let mut tool_result = tool_result.clone();
497 // Surprisingly, the API fails if we return an empty string here.
498 // It thinks we are sending a tool use without a tool result.
499 if tool_result.content.is_empty() {
500 tool_result.content = "<Tool returned an empty string>".into();
501 }
502 user_message
503 .content
504 .push(language_model::MessageContent::ToolResult(tool_result));
505 }
506
507 let mut messages = Vec::new();
508 if !assistant_message.content.is_empty() {
509 messages.push(assistant_message);
510 }
511 if !user_message.content.is_empty() {
512 messages.push(user_message);
513 }
514 messages
515 }
516}
517
518#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
519pub struct AgentMessage {
520 pub content: Vec<AgentMessageContent>,
521 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
522 pub reasoning_details: Option<serde_json::Value>,
523}
524
525#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
526pub enum AgentMessageContent {
527 Text(String),
528 Thinking {
529 text: String,
530 signature: Option<String>,
531 },
532 RedactedThinking(String),
533 ToolUse(LanguageModelToolUse),
534}
535
536pub trait TerminalHandle {
537 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
538 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
539 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
540 fn kill(&self, cx: &AsyncApp) -> Result<()>;
541}
542
543pub trait ThreadEnvironment {
544 fn create_terminal(
545 &self,
546 command: String,
547 cwd: Option<PathBuf>,
548 output_byte_limit: Option<u64>,
549 cx: &mut AsyncApp,
550 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
551}
552
553#[derive(Debug)]
554pub enum ThreadEvent {
555 UserMessage(UserMessage),
556 AgentText(String),
557 AgentThinking(String),
558 ToolCall(acp::ToolCall),
559 ToolCallUpdate(acp_thread::ToolCallUpdate),
560 ToolCallAuthorization(ToolCallAuthorization),
561 Retry(acp_thread::RetryStatus),
562 Stop(acp::StopReason),
563}
564
565#[derive(Debug)]
566pub struct NewTerminal {
567 pub command: String,
568 pub output_byte_limit: Option<u64>,
569 pub cwd: Option<PathBuf>,
570 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
571}
572
573#[derive(Debug)]
574pub struct ToolCallAuthorization {
575 pub tool_call: acp::ToolCallUpdate,
576 pub options: Vec<acp::PermissionOption>,
577 pub response: oneshot::Sender<acp::PermissionOptionId>,
578}
579
580#[derive(Debug, thiserror::Error)]
581enum CompletionError {
582 #[error("max tokens")]
583 MaxTokens,
584 #[error("refusal")]
585 Refusal,
586 #[error(transparent)]
587 Other(#[from] anyhow::Error),
588}
589
590pub struct Thread {
591 id: acp::SessionId,
592 prompt_id: PromptId,
593 updated_at: DateTime<Utc>,
594 title: Option<SharedString>,
595 pending_title_generation: Option<Task<()>>,
596 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
597 summary: Option<SharedString>,
598 messages: Vec<Message>,
599 user_store: Entity<UserStore>,
600 completion_mode: CompletionMode,
601 /// Holds the task that handles agent interaction until the end of the turn.
602 /// Survives across multiple requests as the model performs tool calls and
603 /// we run tools, report their results.
604 running_turn: Option<RunningTurn>,
605 pending_message: Option<AgentMessage>,
606 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
607 tool_use_limit_reached: bool,
608 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
609 #[allow(unused)]
610 cumulative_token_usage: TokenUsage,
611 #[allow(unused)]
612 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
613 context_server_registry: Entity<ContextServerRegistry>,
614 profile_id: AgentProfileId,
615 project_context: Entity<ProjectContext>,
616 templates: Arc<Templates>,
617 model: Option<Arc<dyn LanguageModel>>,
618 summarization_model: Option<Arc<dyn LanguageModel>>,
619 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
620 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
621 pub(crate) project: Entity<Project>,
622 pub(crate) action_log: Entity<ActionLog>,
623 /// Tracks the last time files were read by the agent, to detect external modifications
624 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
625}
626
627impl Thread {
628 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
629 let image = model.map_or(true, |model| model.supports_images());
630 acp::PromptCapabilities::new()
631 .image(image)
632 .embedded_context(true)
633 }
634
635 pub fn new(
636 project: Entity<Project>,
637 project_context: Entity<ProjectContext>,
638 context_server_registry: Entity<ContextServerRegistry>,
639 templates: Arc<Templates>,
640 model: Option<Arc<dyn LanguageModel>>,
641 cx: &mut Context<Self>,
642 ) -> Self {
643 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
644 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
645 let (prompt_capabilities_tx, prompt_capabilities_rx) =
646 watch::channel(Self::prompt_capabilities(model.as_deref()));
647 Self {
648 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
649 prompt_id: PromptId::new(),
650 updated_at: Utc::now(),
651 title: None,
652 pending_title_generation: None,
653 pending_summary_generation: None,
654 summary: None,
655 messages: Vec::new(),
656 user_store: project.read(cx).user_store(),
657 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
658 running_turn: None,
659 pending_message: None,
660 tools: BTreeMap::default(),
661 tool_use_limit_reached: false,
662 request_token_usage: HashMap::default(),
663 cumulative_token_usage: TokenUsage::default(),
664 initial_project_snapshot: {
665 let project_snapshot = Self::project_snapshot(project.clone(), cx);
666 cx.foreground_executor()
667 .spawn(async move { Some(project_snapshot.await) })
668 .shared()
669 },
670 context_server_registry,
671 profile_id,
672 project_context,
673 templates,
674 model,
675 summarization_model: None,
676 prompt_capabilities_tx,
677 prompt_capabilities_rx,
678 project,
679 action_log,
680 file_read_times: HashMap::default(),
681 }
682 }
683
684 pub fn id(&self) -> &acp::SessionId {
685 &self.id
686 }
687
688 pub fn replay(
689 &mut self,
690 cx: &mut Context<Self>,
691 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
692 let (tx, rx) = mpsc::unbounded();
693 let stream = ThreadEventStream(tx);
694 for message in &self.messages {
695 match message {
696 Message::User(user_message) => stream.send_user_message(user_message),
697 Message::Agent(assistant_message) => {
698 for content in &assistant_message.content {
699 match content {
700 AgentMessageContent::Text(text) => stream.send_text(text),
701 AgentMessageContent::Thinking { text, .. } => {
702 stream.send_thinking(text)
703 }
704 AgentMessageContent::RedactedThinking(_) => {}
705 AgentMessageContent::ToolUse(tool_use) => {
706 self.replay_tool_call(
707 tool_use,
708 assistant_message.tool_results.get(&tool_use.id),
709 &stream,
710 cx,
711 );
712 }
713 }
714 }
715 }
716 Message::Resume => {}
717 }
718 }
719 rx
720 }
721
722 fn replay_tool_call(
723 &self,
724 tool_use: &LanguageModelToolUse,
725 tool_result: Option<&LanguageModelToolResult>,
726 stream: &ThreadEventStream,
727 cx: &mut Context<Self>,
728 ) {
729 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
730 self.context_server_registry
731 .read(cx)
732 .servers()
733 .find_map(|(_, tools)| {
734 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
735 Some(tool.clone())
736 } else {
737 None
738 }
739 })
740 });
741
742 let Some(tool) = tool else {
743 stream
744 .0
745 .unbounded_send(Ok(ThreadEvent::ToolCall(
746 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
747 .status(acp::ToolCallStatus::Failed)
748 .raw_input(tool_use.input.clone()),
749 )))
750 .ok();
751 return;
752 };
753
754 let title = tool.initial_title(tool_use.input.clone(), cx);
755 let kind = tool.kind();
756 stream.send_tool_call(
757 &tool_use.id,
758 &tool_use.name,
759 title,
760 kind,
761 tool_use.input.clone(),
762 );
763
764 let output = tool_result
765 .as_ref()
766 .and_then(|result| result.output.clone());
767 if let Some(output) = output.clone() {
768 let tool_event_stream = ToolCallEventStream::new(
769 tool_use.id.clone(),
770 stream.clone(),
771 Some(self.project.read(cx).fs().clone()),
772 );
773 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
774 .log_err();
775 }
776
777 stream.update_tool_call_fields(
778 &tool_use.id,
779 acp::ToolCallUpdateFields::new()
780 .status(
781 tool_result
782 .as_ref()
783 .map_or(acp::ToolCallStatus::Failed, |result| {
784 if result.is_error {
785 acp::ToolCallStatus::Failed
786 } else {
787 acp::ToolCallStatus::Completed
788 }
789 }),
790 )
791 .raw_output(output),
792 );
793 }
794
795 pub fn from_db(
796 id: acp::SessionId,
797 db_thread: DbThread,
798 project: Entity<Project>,
799 project_context: Entity<ProjectContext>,
800 context_server_registry: Entity<ContextServerRegistry>,
801 templates: Arc<Templates>,
802 cx: &mut Context<Self>,
803 ) -> Self {
804 let profile_id = db_thread
805 .profile
806 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
807
808 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
809 db_thread
810 .model
811 .and_then(|model| {
812 let model = SelectedModel {
813 provider: model.provider.clone().into(),
814 model: model.model.into(),
815 };
816 registry.select_model(&model, cx)
817 })
818 .or_else(|| registry.default_model())
819 .map(|model| model.model)
820 });
821
822 if model.is_none() {
823 model = Self::resolve_profile_model(&profile_id, cx);
824 }
825 if model.is_none() {
826 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
827 registry.default_model().map(|model| model.model)
828 });
829 }
830
831 let (prompt_capabilities_tx, prompt_capabilities_rx) =
832 watch::channel(Self::prompt_capabilities(model.as_deref()));
833
834 let action_log = cx.new(|_| ActionLog::new(project.clone()));
835
836 Self {
837 id,
838 prompt_id: PromptId::new(),
839 title: if db_thread.title.is_empty() {
840 None
841 } else {
842 Some(db_thread.title.clone())
843 },
844 pending_title_generation: None,
845 pending_summary_generation: None,
846 summary: db_thread.detailed_summary,
847 messages: db_thread.messages,
848 user_store: project.read(cx).user_store(),
849 completion_mode: db_thread.completion_mode.unwrap_or_default(),
850 running_turn: None,
851 pending_message: None,
852 tools: BTreeMap::default(),
853 tool_use_limit_reached: false,
854 request_token_usage: db_thread.request_token_usage.clone(),
855 cumulative_token_usage: db_thread.cumulative_token_usage,
856 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
857 context_server_registry,
858 profile_id,
859 project_context,
860 templates,
861 model,
862 summarization_model: None,
863 project,
864 action_log,
865 updated_at: db_thread.updated_at,
866 prompt_capabilities_tx,
867 prompt_capabilities_rx,
868 file_read_times: HashMap::default(),
869 }
870 }
871
872 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
873 let initial_project_snapshot = self.initial_project_snapshot.clone();
874 let mut thread = DbThread {
875 title: self.title(),
876 messages: self.messages.clone(),
877 updated_at: self.updated_at,
878 detailed_summary: self.summary.clone(),
879 initial_project_snapshot: None,
880 cumulative_token_usage: self.cumulative_token_usage,
881 request_token_usage: self.request_token_usage.clone(),
882 model: self.model.as_ref().map(|model| DbLanguageModel {
883 provider: model.provider_id().to_string(),
884 model: model.name().0.to_string(),
885 }),
886 completion_mode: Some(self.completion_mode),
887 profile: Some(self.profile_id.clone()),
888 };
889
890 cx.background_spawn(async move {
891 let initial_project_snapshot = initial_project_snapshot.await;
892 thread.initial_project_snapshot = initial_project_snapshot;
893 thread
894 })
895 }
896
897 /// Create a snapshot of the current project state including git information and unsaved buffers.
898 fn project_snapshot(
899 project: Entity<Project>,
900 cx: &mut Context<Self>,
901 ) -> Task<Arc<ProjectSnapshot>> {
902 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
903 cx.spawn(async move |_, _| {
904 let snapshot = task.await;
905
906 Arc::new(ProjectSnapshot {
907 worktree_snapshots: snapshot.worktree_snapshots,
908 timestamp: Utc::now(),
909 })
910 })
911 }
912
913 pub fn project_context(&self) -> &Entity<ProjectContext> {
914 &self.project_context
915 }
916
917 pub fn project(&self) -> &Entity<Project> {
918 &self.project
919 }
920
921 pub fn action_log(&self) -> &Entity<ActionLog> {
922 &self.action_log
923 }
924
925 pub fn is_empty(&self) -> bool {
926 self.messages.is_empty() && self.title.is_none()
927 }
928
929 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
930 self.model.as_ref()
931 }
932
933 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
934 let old_usage = self.latest_token_usage();
935 self.model = Some(model);
936 let new_caps = Self::prompt_capabilities(self.model.as_deref());
937 let new_usage = self.latest_token_usage();
938 if old_usage != new_usage {
939 cx.emit(TokenUsageUpdated(new_usage));
940 }
941 self.prompt_capabilities_tx.send(new_caps).log_err();
942 cx.notify()
943 }
944
945 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
946 self.summarization_model.as_ref()
947 }
948
949 pub fn set_summarization_model(
950 &mut self,
951 model: Option<Arc<dyn LanguageModel>>,
952 cx: &mut Context<Self>,
953 ) {
954 self.summarization_model = model;
955 cx.notify()
956 }
957
958 pub fn completion_mode(&self) -> CompletionMode {
959 self.completion_mode
960 }
961
962 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
963 let old_usage = self.latest_token_usage();
964 self.completion_mode = mode;
965 let new_usage = self.latest_token_usage();
966 if old_usage != new_usage {
967 cx.emit(TokenUsageUpdated(new_usage));
968 }
969 cx.notify()
970 }
971
972 #[cfg(any(test, feature = "test-support"))]
973 pub fn last_message(&self) -> Option<Message> {
974 if let Some(message) = self.pending_message.clone() {
975 Some(Message::Agent(message))
976 } else {
977 self.messages.last().cloned()
978 }
979 }
980
981 pub fn add_default_tools(
982 &mut self,
983 environment: Rc<dyn ThreadEnvironment>,
984 cx: &mut Context<Self>,
985 ) {
986 let language_registry = self.project.read(cx).languages().clone();
987 self.add_tool(CopyPathTool::new(self.project.clone()));
988 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
989 self.add_tool(DeletePathTool::new(
990 self.project.clone(),
991 self.action_log.clone(),
992 ));
993 self.add_tool(DiagnosticsTool::new(self.project.clone()));
994 self.add_tool(EditFileTool::new(
995 self.project.clone(),
996 cx.weak_entity(),
997 language_registry,
998 Templates::new(),
999 ));
1000 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1001 self.add_tool(FindPathTool::new(self.project.clone()));
1002 self.add_tool(GrepTool::new(self.project.clone()));
1003 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1004 self.add_tool(MovePathTool::new(self.project.clone()));
1005 self.add_tool(NowTool);
1006 self.add_tool(OpenTool::new(self.project.clone()));
1007 self.add_tool(ReadFileTool::new(
1008 cx.weak_entity(),
1009 self.project.clone(),
1010 self.action_log.clone(),
1011 ));
1012 self.add_tool(SaveFileTool::new(self.project.clone()));
1013 self.add_tool(RestoreFileFromDiskTool::new(self.project.clone()));
1014 self.add_tool(SpawnSubagentTool::new(None));
1015 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1016 self.add_tool(ThinkingTool);
1017 self.add_tool(WebSearchTool);
1018 }
1019
1020 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1021 self.tools.insert(T::name().into(), tool.erase());
1022 }
1023
1024 pub fn remove_tool(&mut self, name: &str) -> bool {
1025 self.tools.remove(name).is_some()
1026 }
1027
1028 pub fn profile(&self) -> &AgentProfileId {
1029 &self.profile_id
1030 }
1031
1032 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1033 if self.profile_id == profile_id {
1034 return;
1035 }
1036
1037 self.profile_id = profile_id;
1038
1039 // Swap to the profile's preferred model when available.
1040 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1041 self.set_model(model, cx);
1042 }
1043 }
1044
1045 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1046 if let Some(running_turn) = self.running_turn.take() {
1047 running_turn.cancel();
1048 }
1049 self.flush_pending_message(cx);
1050 }
1051
1052 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1053 let Some(last_user_message) = self.last_user_message() else {
1054 return;
1055 };
1056
1057 self.request_token_usage
1058 .insert(last_user_message.id.clone(), update);
1059 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1060 cx.notify();
1061 }
1062
1063 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1064 self.cancel(cx);
1065 let Some(position) = self.messages.iter().position(
1066 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1067 ) else {
1068 return Err(anyhow!("Message not found"));
1069 };
1070
1071 for message in self.messages.drain(position..) {
1072 match message {
1073 Message::User(message) => {
1074 self.request_token_usage.remove(&message.id);
1075 }
1076 Message::Agent(_) | Message::Resume => {}
1077 }
1078 }
1079 self.clear_summary();
1080 cx.notify();
1081 Ok(())
1082 }
1083
1084 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1085 let last_user_message = self.last_user_message()?;
1086 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1087 Some(*tokens)
1088 }
1089
1090 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1091 let usage = self.latest_request_token_usage()?;
1092 let model = self.model.clone()?;
1093 Some(acp_thread::TokenUsage {
1094 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1095 used_tokens: usage.total_tokens(),
1096 })
1097 }
1098
1099 /// Look up the active profile and resolve its preferred model if one is configured.
1100 fn resolve_profile_model(
1101 profile_id: &AgentProfileId,
1102 cx: &mut Context<Self>,
1103 ) -> Option<Arc<dyn LanguageModel>> {
1104 let selection = AgentSettings::get_global(cx)
1105 .profiles
1106 .get(profile_id)?
1107 .default_model
1108 .clone()?;
1109 Self::resolve_model_from_selection(&selection, cx)
1110 }
1111
1112 /// Translate a stored model selection into the configured model from the registry.
1113 fn resolve_model_from_selection(
1114 selection: &LanguageModelSelection,
1115 cx: &mut Context<Self>,
1116 ) -> Option<Arc<dyn LanguageModel>> {
1117 let selected = SelectedModel {
1118 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1119 model: LanguageModelId::from(selection.model.clone()),
1120 };
1121 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1122 registry
1123 .select_model(&selected, cx)
1124 .map(|configured| configured.model)
1125 })
1126 }
1127
1128 pub fn resume(
1129 &mut self,
1130 cx: &mut Context<Self>,
1131 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1132 self.messages.push(Message::Resume);
1133 cx.notify();
1134
1135 log::debug!("Total messages in thread: {}", self.messages.len());
1136 self.run_turn(cx)
1137 }
1138
1139 /// Sending a message results in the model streaming a response, which could include tool calls.
1140 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1141 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1142 pub fn send<T>(
1143 &mut self,
1144 id: UserMessageId,
1145 content: impl IntoIterator<Item = T>,
1146 cx: &mut Context<Self>,
1147 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1148 where
1149 T: Into<UserMessageContent>,
1150 {
1151 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1152 log::debug!("Thread::send content: {:?}", content);
1153
1154 self.messages
1155 .push(Message::User(UserMessage { id, content }));
1156 cx.notify();
1157
1158 self.send_existing(cx)
1159 }
1160
1161 pub fn send_existing(
1162 &mut self,
1163 cx: &mut Context<Self>,
1164 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1165 let model = self.model().context("No language model configured")?;
1166
1167 log::info!("Thread::send called with model: {}", model.name().0);
1168 self.advance_prompt_id();
1169
1170 log::debug!("Total messages in thread: {}", self.messages.len());
1171 self.run_turn(cx)
1172 }
1173
1174 pub fn push_acp_user_block(
1175 &mut self,
1176 id: UserMessageId,
1177 blocks: impl IntoIterator<Item = acp::ContentBlock>,
1178 path_style: PathStyle,
1179 cx: &mut Context<Self>,
1180 ) {
1181 let content = blocks
1182 .into_iter()
1183 .map(|block| UserMessageContent::from_content_block(block, path_style))
1184 .collect::<Vec<_>>();
1185 self.messages
1186 .push(Message::User(UserMessage { id, content }));
1187 cx.notify();
1188 }
1189
1190 pub fn push_acp_agent_block(&mut self, block: acp::ContentBlock, cx: &mut Context<Self>) {
1191 let text = match block {
1192 acp::ContentBlock::Text(text_content) => text_content.text,
1193 acp::ContentBlock::Image(_) => "[image]".to_string(),
1194 acp::ContentBlock::Audio(_) => "[audio]".to_string(),
1195 acp::ContentBlock::ResourceLink(resource_link) => resource_link.uri,
1196 acp::ContentBlock::Resource(resource) => match resource.resource {
1197 acp::EmbeddedResourceResource::TextResourceContents(resource) => resource.uri,
1198 acp::EmbeddedResourceResource::BlobResourceContents(resource) => resource.uri,
1199 _ => "[resource]".to_string(),
1200 },
1201 _ => "[unknown]".to_string(),
1202 };
1203
1204 self.messages.push(Message::Agent(AgentMessage {
1205 content: vec![AgentMessageContent::Text(text)],
1206 ..Default::default()
1207 }));
1208 cx.notify();
1209 }
1210
1211 #[cfg(feature = "eval")]
1212 pub fn proceed(
1213 &mut self,
1214 cx: &mut Context<Self>,
1215 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1216 self.run_turn(cx)
1217 }
1218
1219 fn run_turn(
1220 &mut self,
1221 cx: &mut Context<Self>,
1222 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1223 self.cancel(cx);
1224
1225 let model = self.model.clone().context("No language model configured")?;
1226 let profile = AgentSettings::get_global(cx)
1227 .profiles
1228 .get(&self.profile_id)
1229 .context("Profile not found")?;
1230 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1231 let event_stream = ThreadEventStream(events_tx);
1232 let message_ix = self.messages.len().saturating_sub(1);
1233 self.tool_use_limit_reached = false;
1234 self.clear_summary();
1235 self.running_turn = Some(RunningTurn {
1236 event_stream: event_stream.clone(),
1237 tools: self.enabled_tools(profile, &model, cx),
1238 _task: cx.spawn(async move |this, cx| {
1239 log::debug!("Starting agent turn execution");
1240
1241 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1242 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1243
1244 match turn_result {
1245 Ok(()) => {
1246 log::debug!("Turn execution completed");
1247 event_stream.send_stop(acp::StopReason::EndTurn);
1248 }
1249 Err(error) => {
1250 log::error!("Turn execution failed: {:?}", error);
1251 match error.downcast::<CompletionError>() {
1252 Ok(CompletionError::Refusal) => {
1253 event_stream.send_stop(acp::StopReason::Refusal);
1254 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1255 }
1256 Ok(CompletionError::MaxTokens) => {
1257 event_stream.send_stop(acp::StopReason::MaxTokens);
1258 }
1259 Ok(CompletionError::Other(error)) | Err(error) => {
1260 event_stream.send_error(error);
1261 }
1262 }
1263 }
1264 }
1265
1266 _ = this.update(cx, |this, _| this.running_turn.take());
1267 }),
1268 });
1269 Ok(events_rx)
1270 }
1271
1272 async fn run_turn_internal(
1273 this: &WeakEntity<Self>,
1274 model: Arc<dyn LanguageModel>,
1275 event_stream: &ThreadEventStream,
1276 cx: &mut AsyncApp,
1277 ) -> Result<()> {
1278 let mut attempt = 0;
1279 let mut intent = CompletionIntent::UserPrompt;
1280 loop {
1281 let request =
1282 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1283
1284 telemetry::event!(
1285 "Agent Thread Completion",
1286 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1287 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1288 model = model.telemetry_id(),
1289 model_provider = model.provider_id().to_string(),
1290 attempt
1291 );
1292
1293 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1294
1295 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1296 Ok(events) => (events, None),
1297 Err(err) => (stream::empty().boxed(), Some(err)),
1298 };
1299 let mut tool_results = FuturesUnordered::new();
1300 while let Some(event) = events.next().await {
1301 log::trace!("Received completion event: {:?}", event);
1302 match event {
1303 Ok(event) => {
1304 tool_results.extend(this.update(cx, |this, cx| {
1305 this.handle_completion_event(event, event_stream, cx)
1306 })??);
1307 }
1308 Err(err) => {
1309 error = Some(err);
1310 break;
1311 }
1312 }
1313 }
1314
1315 let end_turn = tool_results.is_empty();
1316 while let Some(tool_result) = tool_results.next().await {
1317 log::debug!("Tool finished {:?}", tool_result);
1318
1319 event_stream.update_tool_call_fields(
1320 &tool_result.tool_use_id,
1321 acp::ToolCallUpdateFields::new()
1322 .status(if tool_result.is_error {
1323 acp::ToolCallStatus::Failed
1324 } else {
1325 acp::ToolCallStatus::Completed
1326 })
1327 .raw_output(tool_result.output.clone()),
1328 );
1329 this.update(cx, |this, _cx| {
1330 this.pending_message()
1331 .tool_results
1332 .insert(tool_result.tool_use_id.clone(), tool_result);
1333 })?;
1334 }
1335
1336 this.update(cx, |this, cx| {
1337 this.flush_pending_message(cx);
1338 if this.title.is_none() && this.pending_title_generation.is_none() {
1339 this.generate_title(cx);
1340 }
1341 })?;
1342
1343 if let Some(error) = error {
1344 attempt += 1;
1345 let retry = this.update(cx, |this, cx| {
1346 let user_store = this.user_store.read(cx);
1347 this.handle_completion_error(error, attempt, user_store.plan())
1348 })??;
1349 let timer = cx.background_executor().timer(retry.duration);
1350 event_stream.send_retry(retry);
1351 timer.await;
1352 this.update(cx, |this, _cx| {
1353 if let Some(Message::Agent(message)) = this.messages.last() {
1354 if message.tool_results.is_empty() {
1355 intent = CompletionIntent::UserPrompt;
1356 this.messages.push(Message::Resume);
1357 }
1358 }
1359 })?;
1360 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1361 return Err(language_model::ToolUseLimitReachedError.into());
1362 } else if end_turn {
1363 return Ok(());
1364 } else {
1365 intent = CompletionIntent::ToolResults;
1366 attempt = 0;
1367 }
1368 }
1369 }
1370
1371 fn handle_completion_error(
1372 &mut self,
1373 error: LanguageModelCompletionError,
1374 attempt: u8,
1375 plan: Option<Plan>,
1376 ) -> Result<acp_thread::RetryStatus> {
1377 let Some(model) = self.model.as_ref() else {
1378 return Err(anyhow!(error));
1379 };
1380
1381 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1382 match plan {
1383 Some(Plan::V2(_)) => true,
1384 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1385 None => false,
1386 }
1387 } else {
1388 true
1389 };
1390
1391 if !auto_retry {
1392 return Err(anyhow!(error));
1393 }
1394
1395 let Some(strategy) = Self::retry_strategy_for(&error) else {
1396 return Err(anyhow!(error));
1397 };
1398
1399 let max_attempts = match &strategy {
1400 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1401 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1402 };
1403
1404 if attempt > max_attempts {
1405 return Err(anyhow!(error));
1406 }
1407
1408 let delay = match &strategy {
1409 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1410 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1411 Duration::from_secs(delay_secs)
1412 }
1413 RetryStrategy::Fixed { delay, .. } => *delay,
1414 };
1415 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1416
1417 Ok(acp_thread::RetryStatus {
1418 last_error: error.to_string().into(),
1419 attempt: attempt as usize,
1420 max_attempts: max_attempts as usize,
1421 started_at: Instant::now(),
1422 duration: delay,
1423 })
1424 }
1425
1426 /// A helper method that's called on every streamed completion event.
1427 /// Returns an optional tool result task, which the main agentic loop will
1428 /// send back to the model when it resolves.
1429 fn handle_completion_event(
1430 &mut self,
1431 event: LanguageModelCompletionEvent,
1432 event_stream: &ThreadEventStream,
1433 cx: &mut Context<Self>,
1434 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1435 log::trace!("Handling streamed completion event: {:?}", event);
1436 use LanguageModelCompletionEvent::*;
1437
1438 match event {
1439 StartMessage { .. } => {
1440 self.flush_pending_message(cx);
1441 self.pending_message = Some(AgentMessage::default());
1442 }
1443 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1444 Thinking { text, signature } => {
1445 self.handle_thinking_event(text, signature, event_stream, cx)
1446 }
1447 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1448 ReasoningDetails(details) => {
1449 let last_message = self.pending_message();
1450 // Store the last non-empty reasoning_details (overwrites earlier ones)
1451 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1452 if let serde_json::Value::Array(ref arr) = details {
1453 if !arr.is_empty() {
1454 last_message.reasoning_details = Some(details);
1455 }
1456 } else {
1457 last_message.reasoning_details = Some(details);
1458 }
1459 }
1460 ToolUse(tool_use) => {
1461 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1462 }
1463 ToolUseJsonParseError {
1464 id,
1465 tool_name,
1466 raw_input,
1467 json_parse_error,
1468 } => {
1469 return Ok(Some(Task::ready(
1470 self.handle_tool_use_json_parse_error_event(
1471 id,
1472 tool_name,
1473 raw_input,
1474 json_parse_error,
1475 ),
1476 )));
1477 }
1478 UsageUpdate(usage) => {
1479 telemetry::event!(
1480 "Agent Thread Completion Usage Updated",
1481 thread_id = self.id.to_string(),
1482 prompt_id = self.prompt_id.to_string(),
1483 model = self.model.as_ref().map(|m| m.telemetry_id()),
1484 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1485 input_tokens = usage.input_tokens,
1486 output_tokens = usage.output_tokens,
1487 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1488 cache_read_input_tokens = usage.cache_read_input_tokens,
1489 );
1490 self.update_token_usage(usage, cx);
1491 }
1492 UsageUpdated { amount, limit } => {
1493 self.update_model_request_usage(amount, limit, cx);
1494 }
1495 ToolUseLimitReached => {
1496 self.tool_use_limit_reached = true;
1497 }
1498 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1499 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1500 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1501 Started | Queued { .. } => {}
1502 }
1503
1504 Ok(None)
1505 }
1506
1507 fn handle_text_event(
1508 &mut self,
1509 new_text: String,
1510 event_stream: &ThreadEventStream,
1511 cx: &mut Context<Self>,
1512 ) {
1513 event_stream.send_text(&new_text);
1514
1515 let last_message = self.pending_message();
1516 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1517 text.push_str(&new_text);
1518 } else {
1519 last_message
1520 .content
1521 .push(AgentMessageContent::Text(new_text));
1522 }
1523
1524 cx.notify();
1525 }
1526
1527 fn handle_thinking_event(
1528 &mut self,
1529 new_text: String,
1530 new_signature: Option<String>,
1531 event_stream: &ThreadEventStream,
1532 cx: &mut Context<Self>,
1533 ) {
1534 event_stream.send_thinking(&new_text);
1535
1536 let last_message = self.pending_message();
1537 if let Some(AgentMessageContent::Thinking { text, signature }) =
1538 last_message.content.last_mut()
1539 {
1540 text.push_str(&new_text);
1541 *signature = new_signature.or(signature.take());
1542 } else {
1543 last_message.content.push(AgentMessageContent::Thinking {
1544 text: new_text,
1545 signature: new_signature,
1546 });
1547 }
1548
1549 cx.notify();
1550 }
1551
1552 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1553 let last_message = self.pending_message();
1554 last_message
1555 .content
1556 .push(AgentMessageContent::RedactedThinking(data));
1557 cx.notify();
1558 }
1559
1560 fn handle_tool_use_event(
1561 &mut self,
1562 tool_use: LanguageModelToolUse,
1563 event_stream: &ThreadEventStream,
1564 cx: &mut Context<Self>,
1565 ) -> Option<Task<LanguageModelToolResult>> {
1566 cx.notify();
1567
1568 let tool = self.tool(tool_use.name.as_ref());
1569 let mut title = SharedString::from(&tool_use.name);
1570 let mut kind = acp::ToolKind::Other;
1571 if let Some(tool) = tool.as_ref() {
1572 title = tool.initial_title(tool_use.input.clone(), cx);
1573 kind = tool.kind();
1574 }
1575
1576 // Ensure the last message ends in the current tool use
1577 let last_message = self.pending_message();
1578 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1579 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1580 if last_tool_use.id == tool_use.id {
1581 *last_tool_use = tool_use.clone();
1582 false
1583 } else {
1584 true
1585 }
1586 } else {
1587 true
1588 }
1589 });
1590
1591 if push_new_tool_use {
1592 event_stream.send_tool_call(
1593 &tool_use.id,
1594 &tool_use.name,
1595 title,
1596 kind,
1597 tool_use.input.clone(),
1598 );
1599 last_message
1600 .content
1601 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1602 } else {
1603 event_stream.update_tool_call_fields(
1604 &tool_use.id,
1605 acp::ToolCallUpdateFields::new()
1606 .title(title.as_str())
1607 .kind(kind)
1608 .raw_input(tool_use.input.clone()),
1609 );
1610 }
1611
1612 if !tool_use.is_input_complete {
1613 return None;
1614 }
1615
1616 let Some(tool) = tool else {
1617 let content = format!("No tool named {} exists", tool_use.name);
1618 return Some(Task::ready(LanguageModelToolResult {
1619 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1620 tool_use_id: tool_use.id,
1621 tool_name: tool_use.name,
1622 is_error: true,
1623 output: None,
1624 }));
1625 };
1626
1627 let fs = self.project.read(cx).fs().clone();
1628 let tool_event_stream =
1629 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1630 tool_event_stream.update_fields(
1631 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1632 );
1633 let supports_images = self.model().is_some_and(|model| model.supports_images());
1634 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1635 log::debug!("Running tool {}", tool_use.name);
1636 Some(cx.foreground_executor().spawn(async move {
1637 let tool_result = tool_result.await.and_then(|output| {
1638 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1639 && !supports_images
1640 {
1641 return Err(anyhow!(
1642 "Attempted to read an image, but this model doesn't support it.",
1643 ));
1644 }
1645 Ok(output)
1646 });
1647
1648 match tool_result {
1649 Ok(output) => LanguageModelToolResult {
1650 tool_use_id: tool_use.id,
1651 tool_name: tool_use.name,
1652 is_error: false,
1653 content: output.llm_output,
1654 output: Some(output.raw_output),
1655 },
1656 Err(error) => LanguageModelToolResult {
1657 tool_use_id: tool_use.id,
1658 tool_name: tool_use.name,
1659 is_error: true,
1660 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1661 output: Some(error.to_string().into()),
1662 },
1663 }
1664 }))
1665 }
1666
1667 fn handle_tool_use_json_parse_error_event(
1668 &mut self,
1669 tool_use_id: LanguageModelToolUseId,
1670 tool_name: Arc<str>,
1671 raw_input: Arc<str>,
1672 json_parse_error: String,
1673 ) -> LanguageModelToolResult {
1674 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1675 LanguageModelToolResult {
1676 tool_use_id,
1677 tool_name,
1678 is_error: true,
1679 content: LanguageModelToolResultContent::Text(tool_output.into()),
1680 output: Some(serde_json::Value::String(raw_input.to_string())),
1681 }
1682 }
1683
1684 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1685 self.project
1686 .read(cx)
1687 .user_store()
1688 .update(cx, |user_store, cx| {
1689 user_store.update_model_request_usage(
1690 ModelRequestUsage(RequestUsage {
1691 amount: amount as i32,
1692 limit,
1693 }),
1694 cx,
1695 )
1696 });
1697 }
1698
1699 pub fn title(&self) -> SharedString {
1700 self.title.clone().unwrap_or("New Thread".into())
1701 }
1702
1703 pub fn is_generating_summary(&self) -> bool {
1704 self.pending_summary_generation.is_some()
1705 }
1706
1707 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1708 if let Some(summary) = self.summary.as_ref() {
1709 return Task::ready(Some(summary.clone())).shared();
1710 }
1711 if let Some(task) = self.pending_summary_generation.clone() {
1712 return task;
1713 }
1714 let Some(model) = self.summarization_model.clone() else {
1715 log::error!("No summarization model available");
1716 return Task::ready(None).shared();
1717 };
1718 let mut request = LanguageModelRequest {
1719 intent: Some(CompletionIntent::ThreadContextSummarization),
1720 temperature: AgentSettings::temperature_for_model(&model, cx),
1721 ..Default::default()
1722 };
1723
1724 for message in &self.messages {
1725 request.messages.extend(message.to_request());
1726 }
1727
1728 request.messages.push(LanguageModelRequestMessage {
1729 role: Role::User,
1730 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1731 cache: false,
1732 reasoning_details: None,
1733 });
1734
1735 let task = cx
1736 .spawn(async move |this, cx| {
1737 let mut summary = String::new();
1738 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1739 while let Some(event) = messages.next().await {
1740 let event = event.log_err()?;
1741 let text = match event {
1742 LanguageModelCompletionEvent::Text(text) => text,
1743 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1744 this.update(cx, |thread, cx| {
1745 thread.update_model_request_usage(amount, limit, cx);
1746 })
1747 .ok()?;
1748 continue;
1749 }
1750 _ => continue,
1751 };
1752
1753 let mut lines = text.lines();
1754 summary.extend(lines.next());
1755 }
1756
1757 log::debug!("Setting summary: {}", summary);
1758 let summary = SharedString::from(summary);
1759
1760 this.update(cx, |this, cx| {
1761 this.summary = Some(summary.clone());
1762 this.pending_summary_generation = None;
1763 cx.notify()
1764 })
1765 .ok()?;
1766
1767 Some(summary)
1768 })
1769 .shared();
1770 self.pending_summary_generation = Some(task.clone());
1771 task
1772 }
1773
1774 fn generate_title(&mut self, cx: &mut Context<Self>) {
1775 let Some(model) = self.summarization_model.clone() else {
1776 return;
1777 };
1778
1779 log::debug!(
1780 "Generating title with model: {:?}",
1781 self.summarization_model.as_ref().map(|model| model.name())
1782 );
1783 let mut request = LanguageModelRequest {
1784 intent: Some(CompletionIntent::ThreadSummarization),
1785 temperature: AgentSettings::temperature_for_model(&model, cx),
1786 ..Default::default()
1787 };
1788
1789 for message in &self.messages {
1790 request.messages.extend(message.to_request());
1791 }
1792
1793 request.messages.push(LanguageModelRequestMessage {
1794 role: Role::User,
1795 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1796 cache: false,
1797 reasoning_details: None,
1798 });
1799 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1800 let mut title = String::new();
1801
1802 let generate = async {
1803 let mut messages = model.stream_completion(request, cx).await?;
1804 while let Some(event) = messages.next().await {
1805 let event = event?;
1806 let text = match event {
1807 LanguageModelCompletionEvent::Text(text) => text,
1808 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1809 this.update(cx, |thread, cx| {
1810 thread.update_model_request_usage(amount, limit, cx);
1811 })?;
1812 continue;
1813 }
1814 _ => continue,
1815 };
1816
1817 let mut lines = text.lines();
1818 title.extend(lines.next());
1819
1820 // Stop if the LLM generated multiple lines.
1821 if lines.next().is_some() {
1822 break;
1823 }
1824 }
1825 anyhow::Ok(())
1826 };
1827
1828 if generate.await.context("failed to generate title").is_ok() {
1829 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1830 }
1831 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1832 }));
1833 }
1834
1835 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1836 self.pending_title_generation = None;
1837 if Some(&title) != self.title.as_ref() {
1838 self.title = Some(title);
1839 cx.emit(TitleUpdated);
1840 cx.notify();
1841 }
1842 }
1843
1844 fn clear_summary(&mut self) {
1845 self.summary = None;
1846 self.pending_summary_generation = None;
1847 }
1848
1849 fn last_user_message(&self) -> Option<&UserMessage> {
1850 self.messages
1851 .iter()
1852 .rev()
1853 .find_map(|message| match message {
1854 Message::User(user_message) => Some(user_message),
1855 Message::Agent(_) => None,
1856 Message::Resume => None,
1857 })
1858 }
1859
1860 fn pending_message(&mut self) -> &mut AgentMessage {
1861 self.pending_message.get_or_insert_default()
1862 }
1863
1864 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1865 let Some(mut message) = self.pending_message.take() else {
1866 return;
1867 };
1868
1869 if message.content.is_empty() {
1870 return;
1871 }
1872
1873 for content in &message.content {
1874 let AgentMessageContent::ToolUse(tool_use) = content else {
1875 continue;
1876 };
1877
1878 if !message.tool_results.contains_key(&tool_use.id) {
1879 message.tool_results.insert(
1880 tool_use.id.clone(),
1881 LanguageModelToolResult {
1882 tool_use_id: tool_use.id.clone(),
1883 tool_name: tool_use.name.clone(),
1884 is_error: true,
1885 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1886 output: None,
1887 },
1888 );
1889 }
1890 }
1891
1892 self.messages.push(Message::Agent(message));
1893 self.updated_at = Utc::now();
1894 self.clear_summary();
1895 cx.notify()
1896 }
1897
1898 pub(crate) fn build_completion_request(
1899 &self,
1900 completion_intent: CompletionIntent,
1901 cx: &App,
1902 ) -> Result<LanguageModelRequest> {
1903 let model = self.model().context("No language model configured")?;
1904 let tools = if let Some(turn) = self.running_turn.as_ref() {
1905 turn.tools
1906 .iter()
1907 .filter_map(|(tool_name, tool)| {
1908 log::trace!("Including tool: {}", tool_name);
1909 Some(LanguageModelRequestTool {
1910 name: tool_name.to_string(),
1911 description: tool.description().to_string(),
1912 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1913 })
1914 })
1915 .collect::<Vec<_>>()
1916 } else {
1917 Vec::new()
1918 };
1919
1920 log::debug!("Building completion request");
1921 log::debug!("Completion intent: {:?}", completion_intent);
1922 log::debug!("Completion mode: {:?}", self.completion_mode);
1923
1924 let available_tools: Vec<_> = self
1925 .running_turn
1926 .as_ref()
1927 .map(|turn| turn.tools.keys().cloned().collect())
1928 .unwrap_or_default();
1929
1930 log::debug!("Request includes {} tools", available_tools.len());
1931 let messages = self.build_request_messages(available_tools, cx);
1932 log::debug!("Request will include {} messages", messages.len());
1933
1934 let request = LanguageModelRequest {
1935 thread_id: Some(self.id.to_string()),
1936 prompt_id: Some(self.prompt_id.to_string()),
1937 intent: Some(completion_intent),
1938 mode: Some(self.completion_mode.into()),
1939 messages,
1940 tools,
1941 tool_choice: None,
1942 stop: Vec::new(),
1943 temperature: AgentSettings::temperature_for_model(model, cx),
1944 thinking_allowed: true,
1945 };
1946
1947 log::debug!("Completion request built successfully");
1948 Ok(request)
1949 }
1950
1951 fn enabled_tools(
1952 &self,
1953 profile: &AgentProfileSettings,
1954 model: &Arc<dyn LanguageModel>,
1955 cx: &App,
1956 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1957 fn truncate(tool_name: &SharedString) -> SharedString {
1958 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1959 let mut truncated = tool_name.to_string();
1960 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1961 truncated.into()
1962 } else {
1963 tool_name.clone()
1964 }
1965 }
1966
1967 let mut tools = self
1968 .tools
1969 .iter()
1970 .filter_map(|(tool_name, tool)| {
1971 if tool.supports_provider(&model.provider_id())
1972 && profile.is_tool_enabled(tool_name)
1973 {
1974 Some((truncate(tool_name), tool.clone()))
1975 } else {
1976 None
1977 }
1978 })
1979 .collect::<BTreeMap<_, _>>();
1980
1981 let mut context_server_tools = Vec::new();
1982 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1983 let mut duplicate_tool_names = HashSet::default();
1984 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1985 for (tool_name, tool) in server_tools {
1986 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1987 let tool_name = truncate(tool_name);
1988 if !seen_tools.insert(tool_name.clone()) {
1989 duplicate_tool_names.insert(tool_name.clone());
1990 }
1991 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1992 }
1993 }
1994 }
1995
1996 // When there are duplicate tool names, disambiguate by prefixing them
1997 // with the server ID. In the rare case there isn't enough space for the
1998 // disambiguated tool name, keep only the last tool with this name.
1999 for (server_id, tool_name, tool) in context_server_tools {
2000 if duplicate_tool_names.contains(&tool_name) {
2001 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
2002 if available >= 2 {
2003 let mut disambiguated = server_id.0.to_string();
2004 disambiguated.truncate(available - 1);
2005 disambiguated.push('_');
2006 disambiguated.push_str(&tool_name);
2007 tools.insert(disambiguated.into(), tool.clone());
2008 } else {
2009 tools.insert(tool_name, tool.clone());
2010 }
2011 } else {
2012 tools.insert(tool_name, tool.clone());
2013 }
2014 }
2015
2016 tools
2017 }
2018
2019 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
2020 self.running_turn.as_ref()?.tools.get(name).cloned()
2021 }
2022
2023 pub fn has_tool(&self, name: &str) -> bool {
2024 self.running_turn
2025 .as_ref()
2026 .is_some_and(|turn| turn.tools.contains_key(name))
2027 }
2028
2029 fn build_request_messages(
2030 &self,
2031 available_tools: Vec<SharedString>,
2032 cx: &App,
2033 ) -> Vec<LanguageModelRequestMessage> {
2034 log::trace!(
2035 "Building request messages from {} thread messages",
2036 self.messages.len()
2037 );
2038
2039 let system_prompt = SystemPromptTemplate {
2040 project: self.project_context.read(cx),
2041 available_tools,
2042 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
2043 }
2044 .render(&self.templates)
2045 .context("failed to build system prompt")
2046 .expect("Invalid template");
2047 let mut messages = vec![LanguageModelRequestMessage {
2048 role: Role::System,
2049 content: vec![system_prompt.into()],
2050 cache: false,
2051 reasoning_details: None,
2052 }];
2053 for message in &self.messages {
2054 messages.extend(message.to_request());
2055 }
2056
2057 if let Some(last_message) = messages.last_mut() {
2058 last_message.cache = true;
2059 }
2060
2061 if let Some(message) = self.pending_message.as_ref() {
2062 messages.extend(message.to_request());
2063 }
2064
2065 messages
2066 }
2067
2068 pub fn to_markdown(&self) -> String {
2069 let mut markdown = String::new();
2070 for (ix, message) in self.messages.iter().enumerate() {
2071 if ix > 0 {
2072 markdown.push('\n');
2073 }
2074 markdown.push_str(&message.to_markdown());
2075 }
2076
2077 if let Some(message) = self.pending_message.as_ref() {
2078 markdown.push('\n');
2079 markdown.push_str(&message.to_markdown());
2080 }
2081
2082 markdown
2083 }
2084
2085 fn advance_prompt_id(&mut self) {
2086 self.prompt_id = PromptId::new();
2087 }
2088
2089 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2090 use LanguageModelCompletionError::*;
2091 use http_client::StatusCode;
2092
2093 // General strategy here:
2094 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2095 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2096 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2097 match error {
2098 HttpResponseError {
2099 status_code: StatusCode::TOO_MANY_REQUESTS,
2100 ..
2101 } => Some(RetryStrategy::ExponentialBackoff {
2102 initial_delay: BASE_RETRY_DELAY,
2103 max_attempts: MAX_RETRY_ATTEMPTS,
2104 }),
2105 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2106 Some(RetryStrategy::Fixed {
2107 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2108 max_attempts: MAX_RETRY_ATTEMPTS,
2109 })
2110 }
2111 UpstreamProviderError {
2112 status,
2113 retry_after,
2114 ..
2115 } => match *status {
2116 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2117 Some(RetryStrategy::Fixed {
2118 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2119 max_attempts: MAX_RETRY_ATTEMPTS,
2120 })
2121 }
2122 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2123 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2124 // Internal Server Error could be anything, retry up to 3 times.
2125 max_attempts: 3,
2126 }),
2127 status => {
2128 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2129 // but we frequently get them in practice. See https://http.dev/529
2130 if status.as_u16() == 529 {
2131 Some(RetryStrategy::Fixed {
2132 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2133 max_attempts: MAX_RETRY_ATTEMPTS,
2134 })
2135 } else {
2136 Some(RetryStrategy::Fixed {
2137 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2138 max_attempts: 2,
2139 })
2140 }
2141 }
2142 },
2143 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2144 delay: BASE_RETRY_DELAY,
2145 max_attempts: 3,
2146 }),
2147 ApiReadResponseError { .. }
2148 | HttpSend { .. }
2149 | DeserializeResponse { .. }
2150 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2151 delay: BASE_RETRY_DELAY,
2152 max_attempts: 3,
2153 }),
2154 // Retrying these errors definitely shouldn't help.
2155 HttpResponseError {
2156 status_code:
2157 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2158 ..
2159 }
2160 | AuthenticationError { .. }
2161 | PermissionError { .. }
2162 | NoApiKey { .. }
2163 | ApiEndpointNotFound { .. }
2164 | PromptTooLarge { .. } => None,
2165 // These errors might be transient, so retry them
2166 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2167 delay: BASE_RETRY_DELAY,
2168 max_attempts: 1,
2169 }),
2170 // Retry all other 4xx and 5xx errors once.
2171 HttpResponseError { status_code, .. }
2172 if status_code.is_client_error() || status_code.is_server_error() =>
2173 {
2174 Some(RetryStrategy::Fixed {
2175 delay: BASE_RETRY_DELAY,
2176 max_attempts: 3,
2177 })
2178 }
2179 Other(err)
2180 if err.is::<language_model::PaymentRequiredError>()
2181 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2182 {
2183 // Retrying won't help for Payment Required or Model Request Limit errors (where
2184 // the user must upgrade to usage-based billing to get more requests, or else wait
2185 // for a significant amount of time for the request limit to reset).
2186 None
2187 }
2188 // Conservatively assume that any other errors are non-retryable
2189 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2190 delay: BASE_RETRY_DELAY,
2191 max_attempts: 2,
2192 }),
2193 }
2194 }
2195}
2196
2197struct RunningTurn {
2198 /// Holds the task that handles agent interaction until the end of the turn.
2199 /// Survives across multiple requests as the model performs tool calls and
2200 /// we run tools, report their results.
2201 _task: Task<()>,
2202 /// The current event stream for the running turn. Used to report a final
2203 /// cancellation event if we cancel the turn.
2204 event_stream: ThreadEventStream,
2205 /// The tools that were enabled for this turn.
2206 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2207}
2208
2209impl RunningTurn {
2210 fn cancel(self) {
2211 log::debug!("Cancelling in progress turn");
2212 self.event_stream.send_canceled();
2213 }
2214}
2215
2216pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2217
2218impl EventEmitter<TokenUsageUpdated> for Thread {}
2219
2220pub struct TitleUpdated;
2221
2222impl EventEmitter<TitleUpdated> for Thread {}
2223
2224pub trait AgentTool
2225where
2226 Self: 'static + Sized,
2227{
2228 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2229 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2230
2231 fn name() -> &'static str;
2232
2233 fn description() -> SharedString {
2234 let schema = schemars::schema_for!(Self::Input);
2235 SharedString::new(
2236 schema
2237 .get("description")
2238 .and_then(|description| description.as_str())
2239 .unwrap_or_default(),
2240 )
2241 }
2242
2243 fn kind() -> acp::ToolKind;
2244
2245 /// The initial tool title to display. Can be updated during the tool run.
2246 fn initial_title(
2247 &self,
2248 input: Result<Self::Input, serde_json::Value>,
2249 cx: &mut App,
2250 ) -> SharedString;
2251
2252 /// Returns the JSON schema that describes the tool's input.
2253 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2254 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2255 }
2256
2257 /// Some tools rely on a provider for the underlying billing or other reasons.
2258 /// Allow the tool to check if they are compatible, or should be filtered out.
2259 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2260 true
2261 }
2262
2263 /// Runs the tool with the provided input.
2264 fn run(
2265 self: Arc<Self>,
2266 input: Self::Input,
2267 event_stream: ToolCallEventStream,
2268 cx: &mut App,
2269 ) -> Task<Result<Self::Output>>;
2270
2271 /// Emits events for a previous execution of the tool.
2272 fn replay(
2273 &self,
2274 _input: Self::Input,
2275 _output: Self::Output,
2276 _event_stream: ToolCallEventStream,
2277 _cx: &mut App,
2278 ) -> Result<()> {
2279 Ok(())
2280 }
2281
2282 fn erase(self) -> Arc<dyn AnyAgentTool> {
2283 Arc::new(Erased(Arc::new(self)))
2284 }
2285}
2286
2287pub struct Erased<T>(T);
2288
2289pub struct AgentToolOutput {
2290 pub llm_output: LanguageModelToolResultContent,
2291 pub raw_output: serde_json::Value,
2292}
2293
2294pub trait AnyAgentTool {
2295 fn name(&self) -> SharedString;
2296 fn description(&self) -> SharedString;
2297 fn kind(&self) -> acp::ToolKind;
2298 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2299 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2300 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2301 true
2302 }
2303 fn run(
2304 self: Arc<Self>,
2305 input: serde_json::Value,
2306 event_stream: ToolCallEventStream,
2307 cx: &mut App,
2308 ) -> Task<Result<AgentToolOutput>>;
2309 fn replay(
2310 &self,
2311 input: serde_json::Value,
2312 output: serde_json::Value,
2313 event_stream: ToolCallEventStream,
2314 cx: &mut App,
2315 ) -> Result<()>;
2316}
2317
2318impl<T> AnyAgentTool for Erased<Arc<T>>
2319where
2320 T: AgentTool,
2321{
2322 fn name(&self) -> SharedString {
2323 T::name().into()
2324 }
2325
2326 fn description(&self) -> SharedString {
2327 T::description()
2328 }
2329
2330 fn kind(&self) -> agent_client_protocol::ToolKind {
2331 T::kind()
2332 }
2333
2334 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2335 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2336 self.0.initial_title(parsed_input, _cx)
2337 }
2338
2339 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2340 let mut json = serde_json::to_value(T::input_schema(format))?;
2341 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2342 Ok(json)
2343 }
2344
2345 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2346 T::supports_provider(provider)
2347 }
2348
2349 fn run(
2350 self: Arc<Self>,
2351 input: serde_json::Value,
2352 event_stream: ToolCallEventStream,
2353 cx: &mut App,
2354 ) -> Task<Result<AgentToolOutput>> {
2355 cx.spawn(async move |cx| {
2356 let input = serde_json::from_value(input)?;
2357 let output = cx
2358 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2359 .await?;
2360 let raw_output = serde_json::to_value(&output)?;
2361 Ok(AgentToolOutput {
2362 llm_output: output.into(),
2363 raw_output,
2364 })
2365 })
2366 }
2367
2368 fn replay(
2369 &self,
2370 input: serde_json::Value,
2371 output: serde_json::Value,
2372 event_stream: ToolCallEventStream,
2373 cx: &mut App,
2374 ) -> Result<()> {
2375 let input = serde_json::from_value(input)?;
2376 let output = serde_json::from_value(output)?;
2377 self.0.replay(input, output, event_stream, cx)
2378 }
2379}
2380
2381#[derive(Clone)]
2382struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2383
2384impl ThreadEventStream {
2385 fn send_user_message(&self, message: &UserMessage) {
2386 self.0
2387 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2388 .ok();
2389 }
2390
2391 fn send_text(&self, text: &str) {
2392 self.0
2393 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2394 .ok();
2395 }
2396
2397 fn send_thinking(&self, text: &str) {
2398 self.0
2399 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2400 .ok();
2401 }
2402
2403 fn send_tool_call(
2404 &self,
2405 id: &LanguageModelToolUseId,
2406 tool_name: &str,
2407 title: SharedString,
2408 kind: acp::ToolKind,
2409 input: serde_json::Value,
2410 ) {
2411 self.0
2412 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2413 id,
2414 tool_name,
2415 title.to_string(),
2416 kind,
2417 input,
2418 ))))
2419 .ok();
2420 }
2421
2422 fn initial_tool_call(
2423 id: &LanguageModelToolUseId,
2424 tool_name: &str,
2425 title: String,
2426 kind: acp::ToolKind,
2427 input: serde_json::Value,
2428 ) -> acp::ToolCall {
2429 acp::ToolCall::new(id.to_string(), title)
2430 .kind(kind)
2431 .raw_input(input)
2432 .meta(acp::Meta::from_iter([(
2433 "tool_name".into(),
2434 tool_name.into(),
2435 )]))
2436 }
2437
2438 fn update_tool_call_fields(
2439 &self,
2440 tool_use_id: &LanguageModelToolUseId,
2441 fields: acp::ToolCallUpdateFields,
2442 ) {
2443 self.0
2444 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2445 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2446 )))
2447 .ok();
2448 }
2449
2450 fn send_retry(&self, status: acp_thread::RetryStatus) {
2451 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2452 }
2453
2454 fn send_stop(&self, reason: acp::StopReason) {
2455 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2456 }
2457
2458 fn send_canceled(&self) {
2459 self.0
2460 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2461 .ok();
2462 }
2463
2464 fn send_error(&self, error: impl Into<anyhow::Error>) {
2465 self.0.unbounded_send(Err(error.into())).ok();
2466 }
2467}
2468
2469#[derive(Clone)]
2470pub struct ToolCallEventStream {
2471 tool_use_id: LanguageModelToolUseId,
2472 stream: ThreadEventStream,
2473 fs: Option<Arc<dyn Fs>>,
2474}
2475
2476impl ToolCallEventStream {
2477 #[cfg(any(test, feature = "test-support"))]
2478 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2479 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2480
2481 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2482
2483 (stream, ToolCallEventStreamReceiver(events_rx))
2484 }
2485
2486 fn new(
2487 tool_use_id: LanguageModelToolUseId,
2488 stream: ThreadEventStream,
2489 fs: Option<Arc<dyn Fs>>,
2490 ) -> Self {
2491 Self {
2492 tool_use_id,
2493 stream,
2494 fs,
2495 }
2496 }
2497
2498 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2499 self.stream
2500 .update_tool_call_fields(&self.tool_use_id, fields);
2501 }
2502
2503 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2504 self.stream
2505 .0
2506 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2507 acp_thread::ToolCallUpdateDiff {
2508 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2509 diff,
2510 }
2511 .into(),
2512 )))
2513 .ok();
2514 }
2515
2516 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2517 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2518 return Task::ready(Ok(()));
2519 }
2520
2521 let (response_tx, response_rx) = oneshot::channel();
2522 self.stream
2523 .0
2524 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2525 ToolCallAuthorization {
2526 tool_call: acp::ToolCallUpdate::new(
2527 self.tool_use_id.to_string(),
2528 acp::ToolCallUpdateFields::new().title(title.into()),
2529 ),
2530 options: vec![
2531 acp::PermissionOption::new(
2532 acp::PermissionOptionId::new("always_allow"),
2533 "Always Allow",
2534 acp::PermissionOptionKind::AllowAlways,
2535 ),
2536 acp::PermissionOption::new(
2537 acp::PermissionOptionId::new("allow"),
2538 "Allow",
2539 acp::PermissionOptionKind::AllowOnce,
2540 ),
2541 acp::PermissionOption::new(
2542 acp::PermissionOptionId::new("deny"),
2543 "Deny",
2544 acp::PermissionOptionKind::RejectOnce,
2545 ),
2546 ],
2547 response: response_tx,
2548 },
2549 )))
2550 .ok();
2551 let fs = self.fs.clone();
2552 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2553 "always_allow" => {
2554 if let Some(fs) = fs.clone() {
2555 cx.update(|cx| {
2556 update_settings_file(fs, cx, |settings, _| {
2557 settings
2558 .agent
2559 .get_or_insert_default()
2560 .set_always_allow_tool_actions(true);
2561 });
2562 })?;
2563 }
2564
2565 Ok(())
2566 }
2567 "allow" => Ok(()),
2568 _ => Err(anyhow!("Permission to run tool denied by user")),
2569 })
2570 }
2571}
2572
2573#[cfg(any(test, feature = "test-support"))]
2574pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2575
2576#[cfg(any(test, feature = "test-support"))]
2577impl ToolCallEventStreamReceiver {
2578 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2579 let event = self.0.next().await;
2580 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2581 auth
2582 } else {
2583 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2584 }
2585 }
2586
2587 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2588 let event = self.0.next().await;
2589 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2590 update,
2591 )))) = event
2592 {
2593 update.fields
2594 } else {
2595 panic!("Expected update fields but got: {:?}", event);
2596 }
2597 }
2598
2599 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2600 let event = self.0.next().await;
2601 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2602 update,
2603 )))) = event
2604 {
2605 update.diff
2606 } else {
2607 panic!("Expected diff but got: {:?}", event);
2608 }
2609 }
2610
2611 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2612 let event = self.0.next().await;
2613 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2614 update,
2615 )))) = event
2616 {
2617 update.terminal
2618 } else {
2619 panic!("Expected terminal but got: {:?}", event);
2620 }
2621 }
2622}
2623
2624#[cfg(any(test, feature = "test-support"))]
2625impl std::ops::Deref for ToolCallEventStreamReceiver {
2626 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2627
2628 fn deref(&self) -> &Self::Target {
2629 &self.0
2630 }
2631}
2632
2633#[cfg(any(test, feature = "test-support"))]
2634impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2635 fn deref_mut(&mut self) -> &mut Self::Target {
2636 &mut self.0
2637 }
2638}
2639
2640impl From<&str> for UserMessageContent {
2641 fn from(text: &str) -> Self {
2642 Self::Text(text.into())
2643 }
2644}
2645
2646impl UserMessageContent {
2647 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
2648 match value {
2649 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2650 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2651 acp::ContentBlock::Audio(_) => {
2652 // TODO
2653 Self::Text("[audio]".to_string())
2654 }
2655 acp::ContentBlock::ResourceLink(resource_link) => {
2656 match MentionUri::parse(&resource_link.uri, path_style) {
2657 Ok(uri) => Self::Mention {
2658 uri,
2659 content: String::new(),
2660 },
2661 Err(err) => {
2662 log::error!("Failed to parse mention link: {}", err);
2663 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2664 }
2665 }
2666 }
2667 acp::ContentBlock::Resource(resource) => match resource.resource {
2668 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2669 match MentionUri::parse(&resource.uri, path_style) {
2670 Ok(uri) => Self::Mention {
2671 uri,
2672 content: resource.text,
2673 },
2674 Err(err) => {
2675 log::error!("Failed to parse mention link: {}", err);
2676 Self::Text(
2677 MarkdownCodeBlock {
2678 tag: &resource.uri,
2679 text: &resource.text,
2680 }
2681 .to_string(),
2682 )
2683 }
2684 }
2685 }
2686 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2687 // TODO
2688 Self::Text("[blob]".to_string())
2689 }
2690 other => {
2691 log::warn!("Unexpected content type: {:?}", other);
2692 Self::Text("[unknown]".to_string())
2693 }
2694 },
2695 other => {
2696 log::warn!("Unexpected content type: {:?}", other);
2697 Self::Text("[unknown]".to_string())
2698 }
2699 }
2700 }
2701}
2702
2703impl From<UserMessageContent> for acp::ContentBlock {
2704 fn from(content: UserMessageContent) -> Self {
2705 match content {
2706 UserMessageContent::Text(text) => text.into(),
2707 UserMessageContent::Image(image) => {
2708 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
2709 }
2710 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
2711 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
2712 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
2713 )),
2714 ),
2715 }
2716 }
2717}
2718
2719fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2720 LanguageModelImage {
2721 source: image_content.data.into(),
2722 size: None,
2723 }
2724}