1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 SystemPromptTemplate, Template, Templates, TerminalTool, ThinkingTool, WebSearchTool,
6};
7use acp_thread::{MentionUri, UserMessageId};
8use action_log::ActionLog;
9
10use agent_client_protocol as acp;
11use agent_settings::{
12 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
13 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
14};
15use anyhow::{Context as _, Result, anyhow};
16use chrono::{DateTime, Utc};
17use client::{ModelRequestUsage, RequestUsage, UserStore};
18use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, Plan, UsageLimit};
19use collections::{HashMap, HashSet, IndexMap};
20use fs::Fs;
21use futures::stream;
22use futures::{
23 FutureExt,
24 channel::{mpsc, oneshot},
25 future::Shared,
26 stream::FuturesUnordered,
27};
28use gpui::{
29 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
30};
31use language_model::{
32 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
33 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
34 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
35 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
36 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
37 ZED_CLOUD_PROVIDER_ID,
38};
39use project::Project;
40use prompt_store::ProjectContext;
41use schemars::{JsonSchema, Schema};
42use serde::{Deserialize, Serialize};
43use settings::{LanguageModelSelection, Settings, update_settings_file};
44use smol::stream::StreamExt;
45use std::{
46 collections::BTreeMap,
47 ops::RangeInclusive,
48 path::Path,
49 rc::Rc,
50 sync::Arc,
51 time::{Duration, Instant},
52};
53use std::{fmt::Write, path::PathBuf};
54use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
55use uuid::Uuid;
56
57const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
58pub const MAX_TOOL_NAME_LENGTH: usize = 64;
59
60/// The ID of the user prompt that initiated a request.
61///
62/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
63#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
64pub struct PromptId(Arc<str>);
65
66impl PromptId {
67 pub fn new() -> Self {
68 Self(Uuid::new_v4().to_string().into())
69 }
70}
71
72impl std::fmt::Display for PromptId {
73 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
74 write!(f, "{}", self.0)
75 }
76}
77
78pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
79pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
80
81#[derive(Debug, Clone)]
82enum RetryStrategy {
83 ExponentialBackoff {
84 initial_delay: Duration,
85 max_attempts: u8,
86 },
87 Fixed {
88 delay: Duration,
89 max_attempts: u8,
90 },
91}
92
93#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
94pub enum Message {
95 User(UserMessage),
96 Agent(AgentMessage),
97 Resume,
98}
99
100impl Message {
101 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
102 match self {
103 Message::Agent(agent_message) => Some(agent_message),
104 _ => None,
105 }
106 }
107
108 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
109 match self {
110 Message::User(message) => vec![message.to_request()],
111 Message::Agent(message) => message.to_request(),
112 Message::Resume => vec![LanguageModelRequestMessage {
113 role: Role::User,
114 content: vec!["Continue where you left off".into()],
115 cache: false,
116 }],
117 }
118 }
119
120 pub fn to_markdown(&self) -> String {
121 match self {
122 Message::User(message) => message.to_markdown(),
123 Message::Agent(message) => message.to_markdown(),
124 Message::Resume => "[resume]\n".into(),
125 }
126 }
127
128 pub fn role(&self) -> Role {
129 match self {
130 Message::User(_) | Message::Resume => Role::User,
131 Message::Agent(_) => Role::Assistant,
132 }
133 }
134}
135
136#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
137pub struct UserMessage {
138 pub id: UserMessageId,
139 pub content: Vec<UserMessageContent>,
140}
141
142#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
143pub enum UserMessageContent {
144 Text(String),
145 Mention { uri: MentionUri, content: String },
146 Image(LanguageModelImage),
147}
148
149impl UserMessage {
150 pub fn to_markdown(&self) -> String {
151 let mut markdown = String::from("## User\n\n");
152
153 for content in &self.content {
154 match content {
155 UserMessageContent::Text(text) => {
156 markdown.push_str(text);
157 markdown.push('\n');
158 }
159 UserMessageContent::Image(_) => {
160 markdown.push_str("<image />\n");
161 }
162 UserMessageContent::Mention { uri, content } => {
163 if !content.is_empty() {
164 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
165 } else {
166 let _ = writeln!(&mut markdown, "{}", uri.as_link());
167 }
168 }
169 }
170 }
171
172 markdown
173 }
174
175 fn to_request(&self) -> LanguageModelRequestMessage {
176 let mut message = LanguageModelRequestMessage {
177 role: Role::User,
178 content: Vec::with_capacity(self.content.len()),
179 cache: false,
180 };
181
182 const OPEN_CONTEXT: &str = "<context>\n\
183 The following items were attached by the user. \
184 They are up-to-date and don't need to be re-read.\n\n";
185
186 const OPEN_FILES_TAG: &str = "<files>";
187 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
188 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
189 const OPEN_SELECTIONS_TAG: &str = "<selections>";
190 const OPEN_THREADS_TAG: &str = "<threads>";
191 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
192 const OPEN_RULES_TAG: &str =
193 "<rules>\nThe user has specified the following rules that should be applied:\n";
194
195 let mut file_context = OPEN_FILES_TAG.to_string();
196 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
197 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
198 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
199 let mut thread_context = OPEN_THREADS_TAG.to_string();
200 let mut fetch_context = OPEN_FETCH_TAG.to_string();
201 let mut rules_context = OPEN_RULES_TAG.to_string();
202
203 for chunk in &self.content {
204 let chunk = match chunk {
205 UserMessageContent::Text(text) => {
206 language_model::MessageContent::Text(text.clone())
207 }
208 UserMessageContent::Image(value) => {
209 language_model::MessageContent::Image(value.clone())
210 }
211 UserMessageContent::Mention { uri, content } => {
212 match uri {
213 MentionUri::File { abs_path } => {
214 write!(
215 &mut file_context,
216 "\n{}",
217 MarkdownCodeBlock {
218 tag: &codeblock_tag(abs_path, None),
219 text: &content.to_string(),
220 }
221 )
222 .ok();
223 }
224 MentionUri::PastedImage => {
225 debug_panic!("pasted image URI should not be used in mention content")
226 }
227 MentionUri::Directory { .. } => {
228 write!(&mut directory_context, "\n{}\n", content).ok();
229 }
230 MentionUri::Symbol {
231 abs_path: path,
232 line_range,
233 ..
234 } => {
235 write!(
236 &mut symbol_context,
237 "\n{}",
238 MarkdownCodeBlock {
239 tag: &codeblock_tag(path, Some(line_range)),
240 text: content
241 }
242 )
243 .ok();
244 }
245 MentionUri::Selection {
246 abs_path: path,
247 line_range,
248 ..
249 } => {
250 write!(
251 &mut selection_context,
252 "\n{}",
253 MarkdownCodeBlock {
254 tag: &codeblock_tag(
255 path.as_deref().unwrap_or("Untitled".as_ref()),
256 Some(line_range)
257 ),
258 text: content
259 }
260 )
261 .ok();
262 }
263 MentionUri::Thread { .. } => {
264 write!(&mut thread_context, "\n{}\n", content).ok();
265 }
266 MentionUri::TextThread { .. } => {
267 write!(&mut thread_context, "\n{}\n", content).ok();
268 }
269 MentionUri::Rule { .. } => {
270 write!(
271 &mut rules_context,
272 "\n{}",
273 MarkdownCodeBlock {
274 tag: "",
275 text: content
276 }
277 )
278 .ok();
279 }
280 MentionUri::Fetch { url } => {
281 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
282 }
283 }
284
285 language_model::MessageContent::Text(uri.as_link().to_string())
286 }
287 };
288
289 message.content.push(chunk);
290 }
291
292 let len_before_context = message.content.len();
293
294 if file_context.len() > OPEN_FILES_TAG.len() {
295 file_context.push_str("</files>\n");
296 message
297 .content
298 .push(language_model::MessageContent::Text(file_context));
299 }
300
301 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
302 directory_context.push_str("</directories>\n");
303 message
304 .content
305 .push(language_model::MessageContent::Text(directory_context));
306 }
307
308 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
309 symbol_context.push_str("</symbols>\n");
310 message
311 .content
312 .push(language_model::MessageContent::Text(symbol_context));
313 }
314
315 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
316 selection_context.push_str("</selections>\n");
317 message
318 .content
319 .push(language_model::MessageContent::Text(selection_context));
320 }
321
322 if thread_context.len() > OPEN_THREADS_TAG.len() {
323 thread_context.push_str("</threads>\n");
324 message
325 .content
326 .push(language_model::MessageContent::Text(thread_context));
327 }
328
329 if fetch_context.len() > OPEN_FETCH_TAG.len() {
330 fetch_context.push_str("</fetched_urls>\n");
331 message
332 .content
333 .push(language_model::MessageContent::Text(fetch_context));
334 }
335
336 if rules_context.len() > OPEN_RULES_TAG.len() {
337 rules_context.push_str("</user_rules>\n");
338 message
339 .content
340 .push(language_model::MessageContent::Text(rules_context));
341 }
342
343 if message.content.len() > len_before_context {
344 message.content.insert(
345 len_before_context,
346 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
347 );
348 message
349 .content
350 .push(language_model::MessageContent::Text("</context>".into()));
351 }
352
353 message
354 }
355}
356
357fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
358 let mut result = String::new();
359
360 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
361 let _ = write!(result, "{} ", extension);
362 }
363
364 let _ = write!(result, "{}", full_path.display());
365
366 if let Some(range) = line_range {
367 if range.start() == range.end() {
368 let _ = write!(result, ":{}", range.start() + 1);
369 } else {
370 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
371 }
372 }
373
374 result
375}
376
377impl AgentMessage {
378 pub fn to_markdown(&self) -> String {
379 let mut markdown = String::from("## Assistant\n\n");
380
381 for content in &self.content {
382 match content {
383 AgentMessageContent::Text(text) => {
384 markdown.push_str(text);
385 markdown.push('\n');
386 }
387 AgentMessageContent::Thinking { text, .. } => {
388 markdown.push_str("<think>");
389 markdown.push_str(text);
390 markdown.push_str("</think>\n");
391 }
392 AgentMessageContent::RedactedThinking(_) => {
393 markdown.push_str("<redacted_thinking />\n")
394 }
395 AgentMessageContent::ToolUse(tool_use) => {
396 markdown.push_str(&format!(
397 "**Tool Use**: {} (ID: {})\n",
398 tool_use.name, tool_use.id
399 ));
400 markdown.push_str(&format!(
401 "{}\n",
402 MarkdownCodeBlock {
403 tag: "json",
404 text: &format!("{:#}", tool_use.input)
405 }
406 ));
407 }
408 }
409 }
410
411 for tool_result in self.tool_results.values() {
412 markdown.push_str(&format!(
413 "**Tool Result**: {} (ID: {})\n\n",
414 tool_result.tool_name, tool_result.tool_use_id
415 ));
416 if tool_result.is_error {
417 markdown.push_str("**ERROR:**\n");
418 }
419
420 match &tool_result.content {
421 LanguageModelToolResultContent::Text(text) => {
422 writeln!(markdown, "{text}\n").ok();
423 }
424 LanguageModelToolResultContent::Image(_) => {
425 writeln!(markdown, "<image />\n").ok();
426 }
427 }
428
429 if let Some(output) = tool_result.output.as_ref() {
430 writeln!(
431 markdown,
432 "**Debug Output**:\n\n```json\n{}\n```\n",
433 serde_json::to_string_pretty(output).unwrap()
434 )
435 .unwrap();
436 }
437 }
438
439 markdown
440 }
441
442 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
443 let mut assistant_message = LanguageModelRequestMessage {
444 role: Role::Assistant,
445 content: Vec::with_capacity(self.content.len()),
446 cache: false,
447 };
448 for chunk in &self.content {
449 match chunk {
450 AgentMessageContent::Text(text) => {
451 assistant_message
452 .content
453 .push(language_model::MessageContent::Text(text.clone()));
454 }
455 AgentMessageContent::Thinking { text, signature } => {
456 assistant_message
457 .content
458 .push(language_model::MessageContent::Thinking {
459 text: text.clone(),
460 signature: signature.clone(),
461 });
462 }
463 AgentMessageContent::RedactedThinking(value) => {
464 assistant_message.content.push(
465 language_model::MessageContent::RedactedThinking(value.clone()),
466 );
467 }
468 AgentMessageContent::ToolUse(tool_use) => {
469 if self.tool_results.contains_key(&tool_use.id) {
470 assistant_message
471 .content
472 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
473 }
474 }
475 };
476 }
477
478 let mut user_message = LanguageModelRequestMessage {
479 role: Role::User,
480 content: Vec::new(),
481 cache: false,
482 };
483
484 for tool_result in self.tool_results.values() {
485 let mut tool_result = tool_result.clone();
486 // Surprisingly, the API fails if we return an empty string here.
487 // It thinks we are sending a tool use without a tool result.
488 if tool_result.content.is_empty() {
489 tool_result.content = "<Tool returned an empty string>".into();
490 }
491 user_message
492 .content
493 .push(language_model::MessageContent::ToolResult(tool_result));
494 }
495
496 let mut messages = Vec::new();
497 if !assistant_message.content.is_empty() {
498 messages.push(assistant_message);
499 }
500 if !user_message.content.is_empty() {
501 messages.push(user_message);
502 }
503 messages
504 }
505}
506
507#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
508pub struct AgentMessage {
509 pub content: Vec<AgentMessageContent>,
510 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
511}
512
513#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
514pub enum AgentMessageContent {
515 Text(String),
516 Thinking {
517 text: String,
518 signature: Option<String>,
519 },
520 RedactedThinking(String),
521 ToolUse(LanguageModelToolUse),
522}
523
524pub trait TerminalHandle {
525 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
526 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
527 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
528}
529
530pub trait ThreadEnvironment {
531 fn create_terminal(
532 &self,
533 command: String,
534 cwd: Option<PathBuf>,
535 output_byte_limit: Option<u64>,
536 cx: &mut AsyncApp,
537 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
538}
539
540#[derive(Debug)]
541pub enum ThreadEvent {
542 UserMessage(UserMessage),
543 AgentText(String),
544 AgentThinking(String),
545 ToolCall(acp::ToolCall),
546 ToolCallUpdate(acp_thread::ToolCallUpdate),
547 ToolCallAuthorization(ToolCallAuthorization),
548 Retry(acp_thread::RetryStatus),
549 Stop(acp::StopReason),
550}
551
552#[derive(Debug)]
553pub struct NewTerminal {
554 pub command: String,
555 pub output_byte_limit: Option<u64>,
556 pub cwd: Option<PathBuf>,
557 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
558}
559
560#[derive(Debug)]
561pub struct ToolCallAuthorization {
562 pub tool_call: acp::ToolCallUpdate,
563 pub options: Vec<acp::PermissionOption>,
564 pub response: oneshot::Sender<acp::PermissionOptionId>,
565}
566
567#[derive(Debug, thiserror::Error)]
568enum CompletionError {
569 #[error("max tokens")]
570 MaxTokens,
571 #[error("refusal")]
572 Refusal,
573 #[error(transparent)]
574 Other(#[from] anyhow::Error),
575}
576
577pub struct Thread {
578 id: acp::SessionId,
579 prompt_id: PromptId,
580 updated_at: DateTime<Utc>,
581 title: Option<SharedString>,
582 pending_title_generation: Option<Task<()>>,
583 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
584 summary: Option<SharedString>,
585 messages: Vec<Message>,
586 user_store: Entity<UserStore>,
587 completion_mode: CompletionMode,
588 /// Holds the task that handles agent interaction until the end of the turn.
589 /// Survives across multiple requests as the model performs tool calls and
590 /// we run tools, report their results.
591 running_turn: Option<RunningTurn>,
592 pending_message: Option<AgentMessage>,
593 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
594 tool_use_limit_reached: bool,
595 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
596 #[allow(unused)]
597 cumulative_token_usage: TokenUsage,
598 #[allow(unused)]
599 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
600 context_server_registry: Entity<ContextServerRegistry>,
601 profile_id: AgentProfileId,
602 project_context: Entity<ProjectContext>,
603 templates: Arc<Templates>,
604 model: Option<Arc<dyn LanguageModel>>,
605 summarization_model: Option<Arc<dyn LanguageModel>>,
606 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
607 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
608 pub(crate) project: Entity<Project>,
609 pub(crate) action_log: Entity<ActionLog>,
610 /// Tracks the last time files were read by the agent, to detect external modifications
611 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
612}
613
614impl Thread {
615 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
616 let image = model.map_or(true, |model| model.supports_images());
617 acp::PromptCapabilities {
618 meta: None,
619 image,
620 audio: false,
621 embedded_context: true,
622 }
623 }
624
625 pub fn new(
626 project: Entity<Project>,
627 project_context: Entity<ProjectContext>,
628 context_server_registry: Entity<ContextServerRegistry>,
629 templates: Arc<Templates>,
630 model: Option<Arc<dyn LanguageModel>>,
631 cx: &mut Context<Self>,
632 ) -> Self {
633 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
634 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
635 let (prompt_capabilities_tx, prompt_capabilities_rx) =
636 watch::channel(Self::prompt_capabilities(model.as_deref()));
637 Self {
638 id: acp::SessionId(uuid::Uuid::new_v4().to_string().into()),
639 prompt_id: PromptId::new(),
640 updated_at: Utc::now(),
641 title: None,
642 pending_title_generation: None,
643 pending_summary_generation: None,
644 summary: None,
645 messages: Vec::new(),
646 user_store: project.read(cx).user_store(),
647 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
648 running_turn: None,
649 pending_message: None,
650 tools: BTreeMap::default(),
651 tool_use_limit_reached: false,
652 request_token_usage: HashMap::default(),
653 cumulative_token_usage: TokenUsage::default(),
654 initial_project_snapshot: {
655 let project_snapshot = Self::project_snapshot(project.clone(), cx);
656 cx.foreground_executor()
657 .spawn(async move { Some(project_snapshot.await) })
658 .shared()
659 },
660 context_server_registry,
661 profile_id,
662 project_context,
663 templates,
664 model,
665 summarization_model: None,
666 prompt_capabilities_tx,
667 prompt_capabilities_rx,
668 project,
669 action_log,
670 file_read_times: HashMap::default(),
671 }
672 }
673
674 pub fn id(&self) -> &acp::SessionId {
675 &self.id
676 }
677
678 pub fn replay(
679 &mut self,
680 cx: &mut Context<Self>,
681 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
682 let (tx, rx) = mpsc::unbounded();
683 let stream = ThreadEventStream(tx);
684 for message in &self.messages {
685 match message {
686 Message::User(user_message) => stream.send_user_message(user_message),
687 Message::Agent(assistant_message) => {
688 for content in &assistant_message.content {
689 match content {
690 AgentMessageContent::Text(text) => stream.send_text(text),
691 AgentMessageContent::Thinking { text, .. } => {
692 stream.send_thinking(text)
693 }
694 AgentMessageContent::RedactedThinking(_) => {}
695 AgentMessageContent::ToolUse(tool_use) => {
696 self.replay_tool_call(
697 tool_use,
698 assistant_message.tool_results.get(&tool_use.id),
699 &stream,
700 cx,
701 );
702 }
703 }
704 }
705 }
706 Message::Resume => {}
707 }
708 }
709 rx
710 }
711
712 fn replay_tool_call(
713 &self,
714 tool_use: &LanguageModelToolUse,
715 tool_result: Option<&LanguageModelToolResult>,
716 stream: &ThreadEventStream,
717 cx: &mut Context<Self>,
718 ) {
719 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
720 self.context_server_registry
721 .read(cx)
722 .servers()
723 .find_map(|(_, tools)| {
724 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
725 Some(tool.clone())
726 } else {
727 None
728 }
729 })
730 });
731
732 let Some(tool) = tool else {
733 stream
734 .0
735 .unbounded_send(Ok(ThreadEvent::ToolCall(acp::ToolCall {
736 meta: None,
737 id: acp::ToolCallId(tool_use.id.to_string().into()),
738 title: tool_use.name.to_string(),
739 kind: acp::ToolKind::Other,
740 status: acp::ToolCallStatus::Failed,
741 content: Vec::new(),
742 locations: Vec::new(),
743 raw_input: Some(tool_use.input.clone()),
744 raw_output: None,
745 })))
746 .ok();
747 return;
748 };
749
750 let title = tool.initial_title(tool_use.input.clone(), cx);
751 let kind = tool.kind();
752 stream.send_tool_call(
753 &tool_use.id,
754 &tool_use.name,
755 title,
756 kind,
757 tool_use.input.clone(),
758 );
759
760 let output = tool_result
761 .as_ref()
762 .and_then(|result| result.output.clone());
763 if let Some(output) = output.clone() {
764 let tool_event_stream = ToolCallEventStream::new(
765 tool_use.id.clone(),
766 stream.clone(),
767 Some(self.project.read(cx).fs().clone()),
768 );
769 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
770 .log_err();
771 }
772
773 stream.update_tool_call_fields(
774 &tool_use.id,
775 acp::ToolCallUpdateFields {
776 status: Some(
777 tool_result
778 .as_ref()
779 .map_or(acp::ToolCallStatus::Failed, |result| {
780 if result.is_error {
781 acp::ToolCallStatus::Failed
782 } else {
783 acp::ToolCallStatus::Completed
784 }
785 }),
786 ),
787 raw_output: output,
788 ..Default::default()
789 },
790 );
791 }
792
793 pub fn from_db(
794 id: acp::SessionId,
795 db_thread: DbThread,
796 project: Entity<Project>,
797 project_context: Entity<ProjectContext>,
798 context_server_registry: Entity<ContextServerRegistry>,
799 templates: Arc<Templates>,
800 cx: &mut Context<Self>,
801 ) -> Self {
802 let profile_id = db_thread
803 .profile
804 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
805
806 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
807 db_thread
808 .model
809 .and_then(|model| {
810 let model = SelectedModel {
811 provider: model.provider.clone().into(),
812 model: model.model.into(),
813 };
814 registry.select_model(&model, cx)
815 })
816 .or_else(|| registry.default_model())
817 .map(|model| model.model)
818 });
819
820 if model.is_none() {
821 model = Self::resolve_profile_model(&profile_id, cx);
822 }
823 if model.is_none() {
824 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
825 registry.default_model().map(|model| model.model)
826 });
827 }
828
829 let (prompt_capabilities_tx, prompt_capabilities_rx) =
830 watch::channel(Self::prompt_capabilities(model.as_deref()));
831
832 let action_log = cx.new(|_| ActionLog::new(project.clone()));
833
834 Self {
835 id,
836 prompt_id: PromptId::new(),
837 title: if db_thread.title.is_empty() {
838 None
839 } else {
840 Some(db_thread.title.clone())
841 },
842 pending_title_generation: None,
843 pending_summary_generation: None,
844 summary: db_thread.detailed_summary,
845 messages: db_thread.messages,
846 user_store: project.read(cx).user_store(),
847 completion_mode: db_thread.completion_mode.unwrap_or_default(),
848 running_turn: None,
849 pending_message: None,
850 tools: BTreeMap::default(),
851 tool_use_limit_reached: false,
852 request_token_usage: db_thread.request_token_usage.clone(),
853 cumulative_token_usage: db_thread.cumulative_token_usage,
854 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
855 context_server_registry,
856 profile_id,
857 project_context,
858 templates,
859 model,
860 summarization_model: None,
861 project,
862 action_log,
863 updated_at: db_thread.updated_at,
864 prompt_capabilities_tx,
865 prompt_capabilities_rx,
866 file_read_times: HashMap::default(),
867 }
868 }
869
870 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
871 let initial_project_snapshot = self.initial_project_snapshot.clone();
872 let mut thread = DbThread {
873 title: self.title(),
874 messages: self.messages.clone(),
875 updated_at: self.updated_at,
876 detailed_summary: self.summary.clone(),
877 initial_project_snapshot: None,
878 cumulative_token_usage: self.cumulative_token_usage,
879 request_token_usage: self.request_token_usage.clone(),
880 model: self.model.as_ref().map(|model| DbLanguageModel {
881 provider: model.provider_id().to_string(),
882 model: model.name().0.to_string(),
883 }),
884 completion_mode: Some(self.completion_mode),
885 profile: Some(self.profile_id.clone()),
886 };
887
888 cx.background_spawn(async move {
889 let initial_project_snapshot = initial_project_snapshot.await;
890 thread.initial_project_snapshot = initial_project_snapshot;
891 thread
892 })
893 }
894
895 /// Create a snapshot of the current project state including git information and unsaved buffers.
896 fn project_snapshot(
897 project: Entity<Project>,
898 cx: &mut Context<Self>,
899 ) -> Task<Arc<ProjectSnapshot>> {
900 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
901 cx.spawn(async move |_, _| {
902 let snapshot = task.await;
903
904 Arc::new(ProjectSnapshot {
905 worktree_snapshots: snapshot.worktree_snapshots,
906 timestamp: Utc::now(),
907 })
908 })
909 }
910
911 pub fn project_context(&self) -> &Entity<ProjectContext> {
912 &self.project_context
913 }
914
915 pub fn project(&self) -> &Entity<Project> {
916 &self.project
917 }
918
919 pub fn action_log(&self) -> &Entity<ActionLog> {
920 &self.action_log
921 }
922
923 pub fn is_empty(&self) -> bool {
924 self.messages.is_empty() && self.title.is_none()
925 }
926
927 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
928 self.model.as_ref()
929 }
930
931 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
932 let old_usage = self.latest_token_usage();
933 self.model = Some(model);
934 let new_caps = Self::prompt_capabilities(self.model.as_deref());
935 let new_usage = self.latest_token_usage();
936 if old_usage != new_usage {
937 cx.emit(TokenUsageUpdated(new_usage));
938 }
939 self.prompt_capabilities_tx.send(new_caps).log_err();
940 cx.notify()
941 }
942
943 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
944 self.summarization_model.as_ref()
945 }
946
947 pub fn set_summarization_model(
948 &mut self,
949 model: Option<Arc<dyn LanguageModel>>,
950 cx: &mut Context<Self>,
951 ) {
952 self.summarization_model = model;
953 cx.notify()
954 }
955
956 pub fn completion_mode(&self) -> CompletionMode {
957 self.completion_mode
958 }
959
960 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
961 let old_usage = self.latest_token_usage();
962 self.completion_mode = mode;
963 let new_usage = self.latest_token_usage();
964 if old_usage != new_usage {
965 cx.emit(TokenUsageUpdated(new_usage));
966 }
967 cx.notify()
968 }
969
970 #[cfg(any(test, feature = "test-support"))]
971 pub fn last_message(&self) -> Option<Message> {
972 if let Some(message) = self.pending_message.clone() {
973 Some(Message::Agent(message))
974 } else {
975 self.messages.last().cloned()
976 }
977 }
978
979 pub fn add_default_tools(
980 &mut self,
981 environment: Rc<dyn ThreadEnvironment>,
982 cx: &mut Context<Self>,
983 ) {
984 let language_registry = self.project.read(cx).languages().clone();
985 self.add_tool(CopyPathTool::new(self.project.clone()));
986 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
987 self.add_tool(DeletePathTool::new(
988 self.project.clone(),
989 self.action_log.clone(),
990 ));
991 self.add_tool(DiagnosticsTool::new(self.project.clone()));
992 self.add_tool(EditFileTool::new(
993 self.project.clone(),
994 cx.weak_entity(),
995 language_registry,
996 Templates::new(),
997 ));
998 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
999 self.add_tool(FindPathTool::new(self.project.clone()));
1000 self.add_tool(GrepTool::new(self.project.clone()));
1001 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1002 self.add_tool(MovePathTool::new(self.project.clone()));
1003 self.add_tool(NowTool);
1004 self.add_tool(OpenTool::new(self.project.clone()));
1005 self.add_tool(ReadFileTool::new(
1006 cx.weak_entity(),
1007 self.project.clone(),
1008 self.action_log.clone(),
1009 ));
1010 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1011 self.add_tool(ThinkingTool);
1012 self.add_tool(WebSearchTool);
1013 }
1014
1015 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1016 self.tools.insert(T::name().into(), tool.erase());
1017 }
1018
1019 pub fn remove_tool(&mut self, name: &str) -> bool {
1020 self.tools.remove(name).is_some()
1021 }
1022
1023 pub fn profile(&self) -> &AgentProfileId {
1024 &self.profile_id
1025 }
1026
1027 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1028 if self.profile_id == profile_id {
1029 return;
1030 }
1031
1032 self.profile_id = profile_id;
1033
1034 // Swap to the profile's preferred model when available.
1035 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1036 self.set_model(model, cx);
1037 }
1038 }
1039
1040 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1041 if let Some(running_turn) = self.running_turn.take() {
1042 running_turn.cancel();
1043 }
1044 self.flush_pending_message(cx);
1045 }
1046
1047 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1048 let Some(last_user_message) = self.last_user_message() else {
1049 return;
1050 };
1051
1052 self.request_token_usage
1053 .insert(last_user_message.id.clone(), update);
1054 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1055 cx.notify();
1056 }
1057
1058 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1059 self.cancel(cx);
1060 let Some(position) = self.messages.iter().position(
1061 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1062 ) else {
1063 return Err(anyhow!("Message not found"));
1064 };
1065
1066 for message in self.messages.drain(position..) {
1067 match message {
1068 Message::User(message) => {
1069 self.request_token_usage.remove(&message.id);
1070 }
1071 Message::Agent(_) | Message::Resume => {}
1072 }
1073 }
1074 self.clear_summary();
1075 cx.notify();
1076 Ok(())
1077 }
1078
1079 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1080 let last_user_message = self.last_user_message()?;
1081 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1082 Some(*tokens)
1083 }
1084
1085 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1086 let usage = self.latest_request_token_usage()?;
1087 let model = self.model.clone()?;
1088 Some(acp_thread::TokenUsage {
1089 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1090 used_tokens: usage.total_tokens(),
1091 })
1092 }
1093
1094 /// Look up the active profile and resolve its preferred model if one is configured.
1095 fn resolve_profile_model(
1096 profile_id: &AgentProfileId,
1097 cx: &mut Context<Self>,
1098 ) -> Option<Arc<dyn LanguageModel>> {
1099 let selection = AgentSettings::get_global(cx)
1100 .profiles
1101 .get(profile_id)?
1102 .default_model
1103 .clone()?;
1104 Self::resolve_model_from_selection(&selection, cx)
1105 }
1106
1107 /// Translate a stored model selection into the configured model from the registry.
1108 fn resolve_model_from_selection(
1109 selection: &LanguageModelSelection,
1110 cx: &mut Context<Self>,
1111 ) -> Option<Arc<dyn LanguageModel>> {
1112 let selected = SelectedModel {
1113 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1114 model: LanguageModelId::from(selection.model.clone()),
1115 };
1116 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1117 registry
1118 .select_model(&selected, cx)
1119 .map(|configured| configured.model)
1120 })
1121 }
1122
1123 pub fn resume(
1124 &mut self,
1125 cx: &mut Context<Self>,
1126 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1127 self.messages.push(Message::Resume);
1128 cx.notify();
1129
1130 log::debug!("Total messages in thread: {}", self.messages.len());
1131 self.run_turn(cx)
1132 }
1133
1134 /// Sending a message results in the model streaming a response, which could include tool calls.
1135 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1136 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1137 pub fn send<T>(
1138 &mut self,
1139 id: UserMessageId,
1140 content: impl IntoIterator<Item = T>,
1141 cx: &mut Context<Self>,
1142 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1143 where
1144 T: Into<UserMessageContent>,
1145 {
1146 let model = self.model().context("No language model configured")?;
1147
1148 log::info!("Thread::send called with model: {}", model.name().0);
1149 self.advance_prompt_id();
1150
1151 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1152 log::debug!("Thread::send content: {:?}", content);
1153
1154 self.messages
1155 .push(Message::User(UserMessage { id, content }));
1156 cx.notify();
1157
1158 log::debug!("Total messages in thread: {}", self.messages.len());
1159 self.run_turn(cx)
1160 }
1161
1162 #[cfg(feature = "eval")]
1163 pub fn proceed(
1164 &mut self,
1165 cx: &mut Context<Self>,
1166 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1167 self.run_turn(cx)
1168 }
1169
1170 fn run_turn(
1171 &mut self,
1172 cx: &mut Context<Self>,
1173 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1174 self.cancel(cx);
1175
1176 let model = self.model.clone().context("No language model configured")?;
1177 let profile = AgentSettings::get_global(cx)
1178 .profiles
1179 .get(&self.profile_id)
1180 .context("Profile not found")?;
1181 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1182 let event_stream = ThreadEventStream(events_tx);
1183 let message_ix = self.messages.len().saturating_sub(1);
1184 self.tool_use_limit_reached = false;
1185 self.clear_summary();
1186 self.running_turn = Some(RunningTurn {
1187 event_stream: event_stream.clone(),
1188 tools: self.enabled_tools(profile, &model, cx),
1189 _task: cx.spawn(async move |this, cx| {
1190 log::debug!("Starting agent turn execution");
1191
1192 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1193 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1194
1195 match turn_result {
1196 Ok(()) => {
1197 log::debug!("Turn execution completed");
1198 event_stream.send_stop(acp::StopReason::EndTurn);
1199 }
1200 Err(error) => {
1201 log::error!("Turn execution failed: {:?}", error);
1202 match error.downcast::<CompletionError>() {
1203 Ok(CompletionError::Refusal) => {
1204 event_stream.send_stop(acp::StopReason::Refusal);
1205 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1206 }
1207 Ok(CompletionError::MaxTokens) => {
1208 event_stream.send_stop(acp::StopReason::MaxTokens);
1209 }
1210 Ok(CompletionError::Other(error)) | Err(error) => {
1211 event_stream.send_error(error);
1212 }
1213 }
1214 }
1215 }
1216
1217 _ = this.update(cx, |this, _| this.running_turn.take());
1218 }),
1219 });
1220 Ok(events_rx)
1221 }
1222
1223 async fn run_turn_internal(
1224 this: &WeakEntity<Self>,
1225 model: Arc<dyn LanguageModel>,
1226 event_stream: &ThreadEventStream,
1227 cx: &mut AsyncApp,
1228 ) -> Result<()> {
1229 let mut attempt = 0;
1230 let mut intent = CompletionIntent::UserPrompt;
1231 loop {
1232 let request =
1233 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1234
1235 telemetry::event!(
1236 "Agent Thread Completion",
1237 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1238 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1239 model = model.telemetry_id(),
1240 model_provider = model.provider_id().to_string(),
1241 attempt
1242 );
1243
1244 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1245
1246 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1247 Ok(events) => (events, None),
1248 Err(err) => (stream::empty().boxed(), Some(err)),
1249 };
1250 let mut tool_results = FuturesUnordered::new();
1251 while let Some(event) = events.next().await {
1252 log::trace!("Received completion event: {:?}", event);
1253 match event {
1254 Ok(event) => {
1255 tool_results.extend(this.update(cx, |this, cx| {
1256 this.handle_completion_event(event, event_stream, cx)
1257 })??);
1258 }
1259 Err(err) => {
1260 error = Some(err);
1261 break;
1262 }
1263 }
1264 }
1265
1266 let end_turn = tool_results.is_empty();
1267 while let Some(tool_result) = tool_results.next().await {
1268 log::debug!("Tool finished {:?}", tool_result);
1269
1270 event_stream.update_tool_call_fields(
1271 &tool_result.tool_use_id,
1272 acp::ToolCallUpdateFields {
1273 status: Some(if tool_result.is_error {
1274 acp::ToolCallStatus::Failed
1275 } else {
1276 acp::ToolCallStatus::Completed
1277 }),
1278 raw_output: tool_result.output.clone(),
1279 ..Default::default()
1280 },
1281 );
1282 this.update(cx, |this, _cx| {
1283 this.pending_message()
1284 .tool_results
1285 .insert(tool_result.tool_use_id.clone(), tool_result);
1286 })?;
1287 }
1288
1289 this.update(cx, |this, cx| {
1290 this.flush_pending_message(cx);
1291 if this.title.is_none() && this.pending_title_generation.is_none() {
1292 this.generate_title(cx);
1293 }
1294 })?;
1295
1296 if let Some(error) = error {
1297 attempt += 1;
1298 let retry = this.update(cx, |this, cx| {
1299 let user_store = this.user_store.read(cx);
1300 this.handle_completion_error(error, attempt, user_store.plan())
1301 })??;
1302 let timer = cx.background_executor().timer(retry.duration);
1303 event_stream.send_retry(retry);
1304 timer.await;
1305 this.update(cx, |this, _cx| {
1306 if let Some(Message::Agent(message)) = this.messages.last() {
1307 if message.tool_results.is_empty() {
1308 intent = CompletionIntent::UserPrompt;
1309 this.messages.push(Message::Resume);
1310 }
1311 }
1312 })?;
1313 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1314 return Err(language_model::ToolUseLimitReachedError.into());
1315 } else if end_turn {
1316 return Ok(());
1317 } else {
1318 intent = CompletionIntent::ToolResults;
1319 attempt = 0;
1320 }
1321 }
1322 }
1323
1324 fn handle_completion_error(
1325 &mut self,
1326 error: LanguageModelCompletionError,
1327 attempt: u8,
1328 plan: Option<Plan>,
1329 ) -> Result<acp_thread::RetryStatus> {
1330 let Some(model) = self.model.as_ref() else {
1331 return Err(anyhow!(error));
1332 };
1333
1334 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1335 match plan {
1336 Some(Plan::V2(_)) => true,
1337 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1338 None => false,
1339 }
1340 } else {
1341 true
1342 };
1343
1344 if !auto_retry {
1345 return Err(anyhow!(error));
1346 }
1347
1348 let Some(strategy) = Self::retry_strategy_for(&error) else {
1349 return Err(anyhow!(error));
1350 };
1351
1352 let max_attempts = match &strategy {
1353 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1354 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1355 };
1356
1357 if attempt > max_attempts {
1358 return Err(anyhow!(error));
1359 }
1360
1361 let delay = match &strategy {
1362 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1363 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1364 Duration::from_secs(delay_secs)
1365 }
1366 RetryStrategy::Fixed { delay, .. } => *delay,
1367 };
1368 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1369
1370 Ok(acp_thread::RetryStatus {
1371 last_error: error.to_string().into(),
1372 attempt: attempt as usize,
1373 max_attempts: max_attempts as usize,
1374 started_at: Instant::now(),
1375 duration: delay,
1376 })
1377 }
1378
1379 /// A helper method that's called on every streamed completion event.
1380 /// Returns an optional tool result task, which the main agentic loop will
1381 /// send back to the model when it resolves.
1382 fn handle_completion_event(
1383 &mut self,
1384 event: LanguageModelCompletionEvent,
1385 event_stream: &ThreadEventStream,
1386 cx: &mut Context<Self>,
1387 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1388 log::trace!("Handling streamed completion event: {:?}", event);
1389 use LanguageModelCompletionEvent::*;
1390
1391 match event {
1392 StartMessage { .. } => {
1393 self.flush_pending_message(cx);
1394 self.pending_message = Some(AgentMessage::default());
1395 }
1396 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1397 Thinking { text, signature } => {
1398 self.handle_thinking_event(text, signature, event_stream, cx)
1399 }
1400 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1401 ToolUse(tool_use) => {
1402 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1403 }
1404 ToolUseJsonParseError {
1405 id,
1406 tool_name,
1407 raw_input,
1408 json_parse_error,
1409 } => {
1410 return Ok(Some(Task::ready(
1411 self.handle_tool_use_json_parse_error_event(
1412 id,
1413 tool_name,
1414 raw_input,
1415 json_parse_error,
1416 ),
1417 )));
1418 }
1419 UsageUpdate(usage) => {
1420 telemetry::event!(
1421 "Agent Thread Completion Usage Updated",
1422 thread_id = self.id.to_string(),
1423 prompt_id = self.prompt_id.to_string(),
1424 model = self.model.as_ref().map(|m| m.telemetry_id()),
1425 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1426 input_tokens = usage.input_tokens,
1427 output_tokens = usage.output_tokens,
1428 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1429 cache_read_input_tokens = usage.cache_read_input_tokens,
1430 );
1431 self.update_token_usage(usage, cx);
1432 }
1433 StatusUpdate(CompletionRequestStatus::UsageUpdated { amount, limit }) => {
1434 self.update_model_request_usage(amount, limit, cx);
1435 }
1436 StatusUpdate(
1437 CompletionRequestStatus::Started
1438 | CompletionRequestStatus::Queued { .. }
1439 | CompletionRequestStatus::Failed { .. },
1440 ) => {}
1441 StatusUpdate(CompletionRequestStatus::ToolUseLimitReached) => {
1442 self.tool_use_limit_reached = true;
1443 }
1444 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1445 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1446 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1447 }
1448
1449 Ok(None)
1450 }
1451
1452 fn handle_text_event(
1453 &mut self,
1454 new_text: String,
1455 event_stream: &ThreadEventStream,
1456 cx: &mut Context<Self>,
1457 ) {
1458 event_stream.send_text(&new_text);
1459
1460 let last_message = self.pending_message();
1461 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1462 text.push_str(&new_text);
1463 } else {
1464 last_message
1465 .content
1466 .push(AgentMessageContent::Text(new_text));
1467 }
1468
1469 cx.notify();
1470 }
1471
1472 fn handle_thinking_event(
1473 &mut self,
1474 new_text: String,
1475 new_signature: Option<String>,
1476 event_stream: &ThreadEventStream,
1477 cx: &mut Context<Self>,
1478 ) {
1479 event_stream.send_thinking(&new_text);
1480
1481 let last_message = self.pending_message();
1482 if let Some(AgentMessageContent::Thinking { text, signature }) =
1483 last_message.content.last_mut()
1484 {
1485 text.push_str(&new_text);
1486 *signature = new_signature.or(signature.take());
1487 } else {
1488 last_message.content.push(AgentMessageContent::Thinking {
1489 text: new_text,
1490 signature: new_signature,
1491 });
1492 }
1493
1494 cx.notify();
1495 }
1496
1497 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1498 let last_message = self.pending_message();
1499 last_message
1500 .content
1501 .push(AgentMessageContent::RedactedThinking(data));
1502 cx.notify();
1503 }
1504
1505 fn handle_tool_use_event(
1506 &mut self,
1507 tool_use: LanguageModelToolUse,
1508 event_stream: &ThreadEventStream,
1509 cx: &mut Context<Self>,
1510 ) -> Option<Task<LanguageModelToolResult>> {
1511 cx.notify();
1512
1513 let tool = self.tool(tool_use.name.as_ref());
1514 let mut title = SharedString::from(&tool_use.name);
1515 let mut kind = acp::ToolKind::Other;
1516 if let Some(tool) = tool.as_ref() {
1517 title = tool.initial_title(tool_use.input.clone(), cx);
1518 kind = tool.kind();
1519 }
1520
1521 // Ensure the last message ends in the current tool use
1522 let last_message = self.pending_message();
1523 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1524 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1525 if last_tool_use.id == tool_use.id {
1526 *last_tool_use = tool_use.clone();
1527 false
1528 } else {
1529 true
1530 }
1531 } else {
1532 true
1533 }
1534 });
1535
1536 if push_new_tool_use {
1537 event_stream.send_tool_call(
1538 &tool_use.id,
1539 &tool_use.name,
1540 title,
1541 kind,
1542 tool_use.input.clone(),
1543 );
1544 last_message
1545 .content
1546 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1547 } else {
1548 event_stream.update_tool_call_fields(
1549 &tool_use.id,
1550 acp::ToolCallUpdateFields {
1551 title: Some(title.into()),
1552 kind: Some(kind),
1553 raw_input: Some(tool_use.input.clone()),
1554 ..Default::default()
1555 },
1556 );
1557 }
1558
1559 if !tool_use.is_input_complete {
1560 return None;
1561 }
1562
1563 let Some(tool) = tool else {
1564 let content = format!("No tool named {} exists", tool_use.name);
1565 return Some(Task::ready(LanguageModelToolResult {
1566 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1567 tool_use_id: tool_use.id,
1568 tool_name: tool_use.name,
1569 is_error: true,
1570 output: None,
1571 }));
1572 };
1573
1574 let fs = self.project.read(cx).fs().clone();
1575 let tool_event_stream =
1576 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1577 tool_event_stream.update_fields(acp::ToolCallUpdateFields {
1578 status: Some(acp::ToolCallStatus::InProgress),
1579 ..Default::default()
1580 });
1581 let supports_images = self.model().is_some_and(|model| model.supports_images());
1582 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1583 log::debug!("Running tool {}", tool_use.name);
1584 Some(cx.foreground_executor().spawn(async move {
1585 let tool_result = tool_result.await.and_then(|output| {
1586 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1587 && !supports_images
1588 {
1589 return Err(anyhow!(
1590 "Attempted to read an image, but this model doesn't support it.",
1591 ));
1592 }
1593 Ok(output)
1594 });
1595
1596 match tool_result {
1597 Ok(output) => LanguageModelToolResult {
1598 tool_use_id: tool_use.id,
1599 tool_name: tool_use.name,
1600 is_error: false,
1601 content: output.llm_output,
1602 output: Some(output.raw_output),
1603 },
1604 Err(error) => LanguageModelToolResult {
1605 tool_use_id: tool_use.id,
1606 tool_name: tool_use.name,
1607 is_error: true,
1608 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1609 output: Some(error.to_string().into()),
1610 },
1611 }
1612 }))
1613 }
1614
1615 fn handle_tool_use_json_parse_error_event(
1616 &mut self,
1617 tool_use_id: LanguageModelToolUseId,
1618 tool_name: Arc<str>,
1619 raw_input: Arc<str>,
1620 json_parse_error: String,
1621 ) -> LanguageModelToolResult {
1622 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1623 LanguageModelToolResult {
1624 tool_use_id,
1625 tool_name,
1626 is_error: true,
1627 content: LanguageModelToolResultContent::Text(tool_output.into()),
1628 output: Some(serde_json::Value::String(raw_input.to_string())),
1629 }
1630 }
1631
1632 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1633 self.project
1634 .read(cx)
1635 .user_store()
1636 .update(cx, |user_store, cx| {
1637 user_store.update_model_request_usage(
1638 ModelRequestUsage(RequestUsage {
1639 amount: amount as i32,
1640 limit,
1641 }),
1642 cx,
1643 )
1644 });
1645 }
1646
1647 pub fn title(&self) -> SharedString {
1648 self.title.clone().unwrap_or("New Thread".into())
1649 }
1650
1651 pub fn is_generating_summary(&self) -> bool {
1652 self.pending_summary_generation.is_some()
1653 }
1654
1655 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1656 if let Some(summary) = self.summary.as_ref() {
1657 return Task::ready(Some(summary.clone())).shared();
1658 }
1659 if let Some(task) = self.pending_summary_generation.clone() {
1660 return task;
1661 }
1662 let Some(model) = self.summarization_model.clone() else {
1663 log::error!("No summarization model available");
1664 return Task::ready(None).shared();
1665 };
1666 let mut request = LanguageModelRequest {
1667 intent: Some(CompletionIntent::ThreadContextSummarization),
1668 temperature: AgentSettings::temperature_for_model(&model, cx),
1669 ..Default::default()
1670 };
1671
1672 for message in &self.messages {
1673 request.messages.extend(message.to_request());
1674 }
1675
1676 request.messages.push(LanguageModelRequestMessage {
1677 role: Role::User,
1678 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1679 cache: false,
1680 });
1681
1682 let task = cx
1683 .spawn(async move |this, cx| {
1684 let mut summary = String::new();
1685 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1686 while let Some(event) = messages.next().await {
1687 let event = event.log_err()?;
1688 let text = match event {
1689 LanguageModelCompletionEvent::Text(text) => text,
1690 LanguageModelCompletionEvent::StatusUpdate(
1691 CompletionRequestStatus::UsageUpdated { amount, limit },
1692 ) => {
1693 this.update(cx, |thread, cx| {
1694 thread.update_model_request_usage(amount, limit, cx);
1695 })
1696 .ok()?;
1697 continue;
1698 }
1699 _ => continue,
1700 };
1701
1702 let mut lines = text.lines();
1703 summary.extend(lines.next());
1704 }
1705
1706 log::debug!("Setting summary: {}", summary);
1707 let summary = SharedString::from(summary);
1708
1709 this.update(cx, |this, cx| {
1710 this.summary = Some(summary.clone());
1711 this.pending_summary_generation = None;
1712 cx.notify()
1713 })
1714 .ok()?;
1715
1716 Some(summary)
1717 })
1718 .shared();
1719 self.pending_summary_generation = Some(task.clone());
1720 task
1721 }
1722
1723 fn generate_title(&mut self, cx: &mut Context<Self>) {
1724 let Some(model) = self.summarization_model.clone() else {
1725 return;
1726 };
1727
1728 log::debug!(
1729 "Generating title with model: {:?}",
1730 self.summarization_model.as_ref().map(|model| model.name())
1731 );
1732 let mut request = LanguageModelRequest {
1733 intent: Some(CompletionIntent::ThreadSummarization),
1734 temperature: AgentSettings::temperature_for_model(&model, cx),
1735 ..Default::default()
1736 };
1737
1738 for message in &self.messages {
1739 request.messages.extend(message.to_request());
1740 }
1741
1742 request.messages.push(LanguageModelRequestMessage {
1743 role: Role::User,
1744 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1745 cache: false,
1746 });
1747 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1748 let mut title = String::new();
1749
1750 let generate = async {
1751 let mut messages = model.stream_completion(request, cx).await?;
1752 while let Some(event) = messages.next().await {
1753 let event = event?;
1754 let text = match event {
1755 LanguageModelCompletionEvent::Text(text) => text,
1756 LanguageModelCompletionEvent::StatusUpdate(
1757 CompletionRequestStatus::UsageUpdated { amount, limit },
1758 ) => {
1759 this.update(cx, |thread, cx| {
1760 thread.update_model_request_usage(amount, limit, cx);
1761 })?;
1762 continue;
1763 }
1764 _ => continue,
1765 };
1766
1767 let mut lines = text.lines();
1768 title.extend(lines.next());
1769
1770 // Stop if the LLM generated multiple lines.
1771 if lines.next().is_some() {
1772 break;
1773 }
1774 }
1775 anyhow::Ok(())
1776 };
1777
1778 if generate.await.context("failed to generate title").is_ok() {
1779 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1780 }
1781 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1782 }));
1783 }
1784
1785 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1786 self.pending_title_generation = None;
1787 if Some(&title) != self.title.as_ref() {
1788 self.title = Some(title);
1789 cx.emit(TitleUpdated);
1790 cx.notify();
1791 }
1792 }
1793
1794 fn clear_summary(&mut self) {
1795 self.summary = None;
1796 self.pending_summary_generation = None;
1797 }
1798
1799 fn last_user_message(&self) -> Option<&UserMessage> {
1800 self.messages
1801 .iter()
1802 .rev()
1803 .find_map(|message| match message {
1804 Message::User(user_message) => Some(user_message),
1805 Message::Agent(_) => None,
1806 Message::Resume => None,
1807 })
1808 }
1809
1810 fn pending_message(&mut self) -> &mut AgentMessage {
1811 self.pending_message.get_or_insert_default()
1812 }
1813
1814 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1815 let Some(mut message) = self.pending_message.take() else {
1816 return;
1817 };
1818
1819 if message.content.is_empty() {
1820 return;
1821 }
1822
1823 for content in &message.content {
1824 let AgentMessageContent::ToolUse(tool_use) = content else {
1825 continue;
1826 };
1827
1828 if !message.tool_results.contains_key(&tool_use.id) {
1829 message.tool_results.insert(
1830 tool_use.id.clone(),
1831 LanguageModelToolResult {
1832 tool_use_id: tool_use.id.clone(),
1833 tool_name: tool_use.name.clone(),
1834 is_error: true,
1835 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1836 output: None,
1837 },
1838 );
1839 }
1840 }
1841
1842 self.messages.push(Message::Agent(message));
1843 self.updated_at = Utc::now();
1844 self.clear_summary();
1845 cx.notify()
1846 }
1847
1848 pub(crate) fn build_completion_request(
1849 &self,
1850 completion_intent: CompletionIntent,
1851 cx: &App,
1852 ) -> Result<LanguageModelRequest> {
1853 let model = self.model().context("No language model configured")?;
1854 let tools = if let Some(turn) = self.running_turn.as_ref() {
1855 turn.tools
1856 .iter()
1857 .filter_map(|(tool_name, tool)| {
1858 log::trace!("Including tool: {}", tool_name);
1859 Some(LanguageModelRequestTool {
1860 name: tool_name.to_string(),
1861 description: tool.description().to_string(),
1862 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1863 })
1864 })
1865 .collect::<Vec<_>>()
1866 } else {
1867 Vec::new()
1868 };
1869
1870 log::debug!("Building completion request");
1871 log::debug!("Completion intent: {:?}", completion_intent);
1872 log::debug!("Completion mode: {:?}", self.completion_mode);
1873
1874 let available_tools: Vec<_> = self
1875 .running_turn
1876 .as_ref()
1877 .map(|turn| turn.tools.keys().cloned().collect())
1878 .unwrap_or_default();
1879
1880 log::debug!("Request includes {} tools", available_tools.len());
1881 let messages = self.build_request_messages(available_tools, cx);
1882 log::debug!("Request will include {} messages", messages.len());
1883
1884 let request = LanguageModelRequest {
1885 thread_id: Some(self.id.to_string()),
1886 prompt_id: Some(self.prompt_id.to_string()),
1887 intent: Some(completion_intent),
1888 mode: Some(self.completion_mode.into()),
1889 messages,
1890 tools,
1891 tool_choice: None,
1892 stop: Vec::new(),
1893 temperature: AgentSettings::temperature_for_model(model, cx),
1894 thinking_allowed: true,
1895 };
1896
1897 log::debug!("Completion request built successfully");
1898 Ok(request)
1899 }
1900
1901 fn enabled_tools(
1902 &self,
1903 profile: &AgentProfileSettings,
1904 model: &Arc<dyn LanguageModel>,
1905 cx: &App,
1906 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1907 fn truncate(tool_name: &SharedString) -> SharedString {
1908 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1909 let mut truncated = tool_name.to_string();
1910 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1911 truncated.into()
1912 } else {
1913 tool_name.clone()
1914 }
1915 }
1916
1917 let mut tools = self
1918 .tools
1919 .iter()
1920 .filter_map(|(tool_name, tool)| {
1921 if tool.supports_provider(&model.provider_id())
1922 && profile.is_tool_enabled(tool_name)
1923 {
1924 Some((truncate(tool_name), tool.clone()))
1925 } else {
1926 None
1927 }
1928 })
1929 .collect::<BTreeMap<_, _>>();
1930
1931 let mut context_server_tools = Vec::new();
1932 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1933 let mut duplicate_tool_names = HashSet::default();
1934 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1935 for (tool_name, tool) in server_tools {
1936 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1937 let tool_name = truncate(tool_name);
1938 if !seen_tools.insert(tool_name.clone()) {
1939 duplicate_tool_names.insert(tool_name.clone());
1940 }
1941 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1942 }
1943 }
1944 }
1945
1946 // When there are duplicate tool names, disambiguate by prefixing them
1947 // with the server ID. In the rare case there isn't enough space for the
1948 // disambiguated tool name, keep only the last tool with this name.
1949 for (server_id, tool_name, tool) in context_server_tools {
1950 if duplicate_tool_names.contains(&tool_name) {
1951 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1952 if available >= 2 {
1953 let mut disambiguated = server_id.0.to_string();
1954 disambiguated.truncate(available - 1);
1955 disambiguated.push('_');
1956 disambiguated.push_str(&tool_name);
1957 tools.insert(disambiguated.into(), tool.clone());
1958 } else {
1959 tools.insert(tool_name, tool.clone());
1960 }
1961 } else {
1962 tools.insert(tool_name, tool.clone());
1963 }
1964 }
1965
1966 tools
1967 }
1968
1969 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1970 self.running_turn.as_ref()?.tools.get(name).cloned()
1971 }
1972
1973 fn build_request_messages(
1974 &self,
1975 available_tools: Vec<SharedString>,
1976 cx: &App,
1977 ) -> Vec<LanguageModelRequestMessage> {
1978 log::trace!(
1979 "Building request messages from {} thread messages",
1980 self.messages.len()
1981 );
1982
1983 let system_prompt = SystemPromptTemplate {
1984 project: self.project_context.read(cx),
1985 available_tools,
1986 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
1987 }
1988 .render(&self.templates)
1989 .context("failed to build system prompt")
1990 .expect("Invalid template");
1991 let mut messages = vec![LanguageModelRequestMessage {
1992 role: Role::System,
1993 content: vec![system_prompt.into()],
1994 cache: false,
1995 }];
1996 for message in &self.messages {
1997 messages.extend(message.to_request());
1998 }
1999
2000 if let Some(last_message) = messages.last_mut() {
2001 last_message.cache = true;
2002 }
2003
2004 if let Some(message) = self.pending_message.as_ref() {
2005 messages.extend(message.to_request());
2006 }
2007
2008 messages
2009 }
2010
2011 pub fn to_markdown(&self) -> String {
2012 let mut markdown = String::new();
2013 for (ix, message) in self.messages.iter().enumerate() {
2014 if ix > 0 {
2015 markdown.push('\n');
2016 }
2017 markdown.push_str(&message.to_markdown());
2018 }
2019
2020 if let Some(message) = self.pending_message.as_ref() {
2021 markdown.push('\n');
2022 markdown.push_str(&message.to_markdown());
2023 }
2024
2025 markdown
2026 }
2027
2028 fn advance_prompt_id(&mut self) {
2029 self.prompt_id = PromptId::new();
2030 }
2031
2032 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2033 use LanguageModelCompletionError::*;
2034 use http_client::StatusCode;
2035
2036 // General strategy here:
2037 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2038 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2039 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2040 match error {
2041 HttpResponseError {
2042 status_code: StatusCode::TOO_MANY_REQUESTS,
2043 ..
2044 } => Some(RetryStrategy::ExponentialBackoff {
2045 initial_delay: BASE_RETRY_DELAY,
2046 max_attempts: MAX_RETRY_ATTEMPTS,
2047 }),
2048 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2049 Some(RetryStrategy::Fixed {
2050 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2051 max_attempts: MAX_RETRY_ATTEMPTS,
2052 })
2053 }
2054 UpstreamProviderError {
2055 status,
2056 retry_after,
2057 ..
2058 } => match *status {
2059 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2060 Some(RetryStrategy::Fixed {
2061 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2062 max_attempts: MAX_RETRY_ATTEMPTS,
2063 })
2064 }
2065 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2066 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2067 // Internal Server Error could be anything, retry up to 3 times.
2068 max_attempts: 3,
2069 }),
2070 status => {
2071 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2072 // but we frequently get them in practice. See https://http.dev/529
2073 if status.as_u16() == 529 {
2074 Some(RetryStrategy::Fixed {
2075 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2076 max_attempts: MAX_RETRY_ATTEMPTS,
2077 })
2078 } else {
2079 Some(RetryStrategy::Fixed {
2080 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2081 max_attempts: 2,
2082 })
2083 }
2084 }
2085 },
2086 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2087 delay: BASE_RETRY_DELAY,
2088 max_attempts: 3,
2089 }),
2090 ApiReadResponseError { .. }
2091 | HttpSend { .. }
2092 | DeserializeResponse { .. }
2093 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2094 delay: BASE_RETRY_DELAY,
2095 max_attempts: 3,
2096 }),
2097 // Retrying these errors definitely shouldn't help.
2098 HttpResponseError {
2099 status_code:
2100 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2101 ..
2102 }
2103 | AuthenticationError { .. }
2104 | PermissionError { .. }
2105 | NoApiKey { .. }
2106 | ApiEndpointNotFound { .. }
2107 | PromptTooLarge { .. } => None,
2108 // These errors might be transient, so retry them
2109 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2110 delay: BASE_RETRY_DELAY,
2111 max_attempts: 1,
2112 }),
2113 // Retry all other 4xx and 5xx errors once.
2114 HttpResponseError { status_code, .. }
2115 if status_code.is_client_error() || status_code.is_server_error() =>
2116 {
2117 Some(RetryStrategy::Fixed {
2118 delay: BASE_RETRY_DELAY,
2119 max_attempts: 3,
2120 })
2121 }
2122 Other(err)
2123 if err.is::<language_model::PaymentRequiredError>()
2124 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2125 {
2126 // Retrying won't help for Payment Required or Model Request Limit errors (where
2127 // the user must upgrade to usage-based billing to get more requests, or else wait
2128 // for a significant amount of time for the request limit to reset).
2129 None
2130 }
2131 // Conservatively assume that any other errors are non-retryable
2132 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2133 delay: BASE_RETRY_DELAY,
2134 max_attempts: 2,
2135 }),
2136 }
2137 }
2138}
2139
2140struct RunningTurn {
2141 /// Holds the task that handles agent interaction until the end of the turn.
2142 /// Survives across multiple requests as the model performs tool calls and
2143 /// we run tools, report their results.
2144 _task: Task<()>,
2145 /// The current event stream for the running turn. Used to report a final
2146 /// cancellation event if we cancel the turn.
2147 event_stream: ThreadEventStream,
2148 /// The tools that were enabled for this turn.
2149 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2150}
2151
2152impl RunningTurn {
2153 fn cancel(self) {
2154 log::debug!("Cancelling in progress turn");
2155 self.event_stream.send_canceled();
2156 }
2157}
2158
2159pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2160
2161impl EventEmitter<TokenUsageUpdated> for Thread {}
2162
2163pub struct TitleUpdated;
2164
2165impl EventEmitter<TitleUpdated> for Thread {}
2166
2167pub trait AgentTool
2168where
2169 Self: 'static + Sized,
2170{
2171 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2172 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2173
2174 fn name() -> &'static str;
2175
2176 fn description() -> SharedString {
2177 let schema = schemars::schema_for!(Self::Input);
2178 SharedString::new(
2179 schema
2180 .get("description")
2181 .and_then(|description| description.as_str())
2182 .unwrap_or_default(),
2183 )
2184 }
2185
2186 fn kind() -> acp::ToolKind;
2187
2188 /// The initial tool title to display. Can be updated during the tool run.
2189 fn initial_title(
2190 &self,
2191 input: Result<Self::Input, serde_json::Value>,
2192 cx: &mut App,
2193 ) -> SharedString;
2194
2195 /// Returns the JSON schema that describes the tool's input.
2196 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2197 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2198 }
2199
2200 /// Some tools rely on a provider for the underlying billing or other reasons.
2201 /// Allow the tool to check if they are compatible, or should be filtered out.
2202 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2203 true
2204 }
2205
2206 /// Runs the tool with the provided input.
2207 fn run(
2208 self: Arc<Self>,
2209 input: Self::Input,
2210 event_stream: ToolCallEventStream,
2211 cx: &mut App,
2212 ) -> Task<Result<Self::Output>>;
2213
2214 /// Emits events for a previous execution of the tool.
2215 fn replay(
2216 &self,
2217 _input: Self::Input,
2218 _output: Self::Output,
2219 _event_stream: ToolCallEventStream,
2220 _cx: &mut App,
2221 ) -> Result<()> {
2222 Ok(())
2223 }
2224
2225 fn erase(self) -> Arc<dyn AnyAgentTool> {
2226 Arc::new(Erased(Arc::new(self)))
2227 }
2228}
2229
2230pub struct Erased<T>(T);
2231
2232pub struct AgentToolOutput {
2233 pub llm_output: LanguageModelToolResultContent,
2234 pub raw_output: serde_json::Value,
2235}
2236
2237pub trait AnyAgentTool {
2238 fn name(&self) -> SharedString;
2239 fn description(&self) -> SharedString;
2240 fn kind(&self) -> acp::ToolKind;
2241 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2242 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2243 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2244 true
2245 }
2246 fn run(
2247 self: Arc<Self>,
2248 input: serde_json::Value,
2249 event_stream: ToolCallEventStream,
2250 cx: &mut App,
2251 ) -> Task<Result<AgentToolOutput>>;
2252 fn replay(
2253 &self,
2254 input: serde_json::Value,
2255 output: serde_json::Value,
2256 event_stream: ToolCallEventStream,
2257 cx: &mut App,
2258 ) -> Result<()>;
2259}
2260
2261impl<T> AnyAgentTool for Erased<Arc<T>>
2262where
2263 T: AgentTool,
2264{
2265 fn name(&self) -> SharedString {
2266 T::name().into()
2267 }
2268
2269 fn description(&self) -> SharedString {
2270 T::description()
2271 }
2272
2273 fn kind(&self) -> agent_client_protocol::ToolKind {
2274 T::kind()
2275 }
2276
2277 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2278 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2279 self.0.initial_title(parsed_input, _cx)
2280 }
2281
2282 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2283 let mut json = serde_json::to_value(T::input_schema(format))?;
2284 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2285 Ok(json)
2286 }
2287
2288 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2289 T::supports_provider(provider)
2290 }
2291
2292 fn run(
2293 self: Arc<Self>,
2294 input: serde_json::Value,
2295 event_stream: ToolCallEventStream,
2296 cx: &mut App,
2297 ) -> Task<Result<AgentToolOutput>> {
2298 cx.spawn(async move |cx| {
2299 let input = serde_json::from_value(input)?;
2300 let output = cx
2301 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2302 .await?;
2303 let raw_output = serde_json::to_value(&output)?;
2304 Ok(AgentToolOutput {
2305 llm_output: output.into(),
2306 raw_output,
2307 })
2308 })
2309 }
2310
2311 fn replay(
2312 &self,
2313 input: serde_json::Value,
2314 output: serde_json::Value,
2315 event_stream: ToolCallEventStream,
2316 cx: &mut App,
2317 ) -> Result<()> {
2318 let input = serde_json::from_value(input)?;
2319 let output = serde_json::from_value(output)?;
2320 self.0.replay(input, output, event_stream, cx)
2321 }
2322}
2323
2324#[derive(Clone)]
2325struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2326
2327impl ThreadEventStream {
2328 fn send_user_message(&self, message: &UserMessage) {
2329 self.0
2330 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2331 .ok();
2332 }
2333
2334 fn send_text(&self, text: &str) {
2335 self.0
2336 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2337 .ok();
2338 }
2339
2340 fn send_thinking(&self, text: &str) {
2341 self.0
2342 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2343 .ok();
2344 }
2345
2346 fn send_tool_call(
2347 &self,
2348 id: &LanguageModelToolUseId,
2349 tool_name: &str,
2350 title: SharedString,
2351 kind: acp::ToolKind,
2352 input: serde_json::Value,
2353 ) {
2354 self.0
2355 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2356 id,
2357 tool_name,
2358 title.to_string(),
2359 kind,
2360 input,
2361 ))))
2362 .ok();
2363 }
2364
2365 fn initial_tool_call(
2366 id: &LanguageModelToolUseId,
2367 tool_name: &str,
2368 title: String,
2369 kind: acp::ToolKind,
2370 input: serde_json::Value,
2371 ) -> acp::ToolCall {
2372 acp::ToolCall {
2373 meta: Some(serde_json::json!({
2374 "tool_name": tool_name
2375 })),
2376 id: acp::ToolCallId(id.to_string().into()),
2377 title,
2378 kind,
2379 status: acp::ToolCallStatus::Pending,
2380 content: vec![],
2381 locations: vec![],
2382 raw_input: Some(input),
2383 raw_output: None,
2384 }
2385 }
2386
2387 fn update_tool_call_fields(
2388 &self,
2389 tool_use_id: &LanguageModelToolUseId,
2390 fields: acp::ToolCallUpdateFields,
2391 ) {
2392 self.0
2393 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2394 acp::ToolCallUpdate {
2395 meta: None,
2396 id: acp::ToolCallId(tool_use_id.to_string().into()),
2397 fields,
2398 }
2399 .into(),
2400 )))
2401 .ok();
2402 }
2403
2404 fn send_retry(&self, status: acp_thread::RetryStatus) {
2405 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2406 }
2407
2408 fn send_stop(&self, reason: acp::StopReason) {
2409 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2410 }
2411
2412 fn send_canceled(&self) {
2413 self.0
2414 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2415 .ok();
2416 }
2417
2418 fn send_error(&self, error: impl Into<anyhow::Error>) {
2419 self.0.unbounded_send(Err(error.into())).ok();
2420 }
2421}
2422
2423#[derive(Clone)]
2424pub struct ToolCallEventStream {
2425 tool_use_id: LanguageModelToolUseId,
2426 stream: ThreadEventStream,
2427 fs: Option<Arc<dyn Fs>>,
2428}
2429
2430impl ToolCallEventStream {
2431 #[cfg(any(test, feature = "test-support"))]
2432 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2433 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2434
2435 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2436
2437 (stream, ToolCallEventStreamReceiver(events_rx))
2438 }
2439
2440 fn new(
2441 tool_use_id: LanguageModelToolUseId,
2442 stream: ThreadEventStream,
2443 fs: Option<Arc<dyn Fs>>,
2444 ) -> Self {
2445 Self {
2446 tool_use_id,
2447 stream,
2448 fs,
2449 }
2450 }
2451
2452 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2453 self.stream
2454 .update_tool_call_fields(&self.tool_use_id, fields);
2455 }
2456
2457 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2458 self.stream
2459 .0
2460 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2461 acp_thread::ToolCallUpdateDiff {
2462 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2463 diff,
2464 }
2465 .into(),
2466 )))
2467 .ok();
2468 }
2469
2470 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2471 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2472 return Task::ready(Ok(()));
2473 }
2474
2475 let (response_tx, response_rx) = oneshot::channel();
2476 self.stream
2477 .0
2478 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2479 ToolCallAuthorization {
2480 tool_call: acp::ToolCallUpdate {
2481 meta: None,
2482 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2483 fields: acp::ToolCallUpdateFields {
2484 title: Some(title.into()),
2485 ..Default::default()
2486 },
2487 },
2488 options: vec![
2489 acp::PermissionOption {
2490 id: acp::PermissionOptionId("always_allow".into()),
2491 name: "Always Allow".into(),
2492 kind: acp::PermissionOptionKind::AllowAlways,
2493 meta: None,
2494 },
2495 acp::PermissionOption {
2496 id: acp::PermissionOptionId("allow".into()),
2497 name: "Allow".into(),
2498 kind: acp::PermissionOptionKind::AllowOnce,
2499 meta: None,
2500 },
2501 acp::PermissionOption {
2502 id: acp::PermissionOptionId("deny".into()),
2503 name: "Deny".into(),
2504 kind: acp::PermissionOptionKind::RejectOnce,
2505 meta: None,
2506 },
2507 ],
2508 response: response_tx,
2509 },
2510 )))
2511 .ok();
2512 let fs = self.fs.clone();
2513 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2514 "always_allow" => {
2515 if let Some(fs) = fs.clone() {
2516 cx.update(|cx| {
2517 update_settings_file(fs, cx, |settings, _| {
2518 settings
2519 .agent
2520 .get_or_insert_default()
2521 .set_always_allow_tool_actions(true);
2522 });
2523 })?;
2524 }
2525
2526 Ok(())
2527 }
2528 "allow" => Ok(()),
2529 _ => Err(anyhow!("Permission to run tool denied by user")),
2530 })
2531 }
2532}
2533
2534#[cfg(any(test, feature = "test-support"))]
2535pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2536
2537#[cfg(any(test, feature = "test-support"))]
2538impl ToolCallEventStreamReceiver {
2539 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2540 let event = self.0.next().await;
2541 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2542 auth
2543 } else {
2544 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2545 }
2546 }
2547
2548 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2549 let event = self.0.next().await;
2550 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2551 update,
2552 )))) = event
2553 {
2554 update.fields
2555 } else {
2556 panic!("Expected update fields but got: {:?}", event);
2557 }
2558 }
2559
2560 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2561 let event = self.0.next().await;
2562 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2563 update,
2564 )))) = event
2565 {
2566 update.diff
2567 } else {
2568 panic!("Expected diff but got: {:?}", event);
2569 }
2570 }
2571
2572 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2573 let event = self.0.next().await;
2574 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2575 update,
2576 )))) = event
2577 {
2578 update.terminal
2579 } else {
2580 panic!("Expected terminal but got: {:?}", event);
2581 }
2582 }
2583}
2584
2585#[cfg(any(test, feature = "test-support"))]
2586impl std::ops::Deref for ToolCallEventStreamReceiver {
2587 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2588
2589 fn deref(&self) -> &Self::Target {
2590 &self.0
2591 }
2592}
2593
2594#[cfg(any(test, feature = "test-support"))]
2595impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2596 fn deref_mut(&mut self) -> &mut Self::Target {
2597 &mut self.0
2598 }
2599}
2600
2601impl From<&str> for UserMessageContent {
2602 fn from(text: &str) -> Self {
2603 Self::Text(text.into())
2604 }
2605}
2606
2607impl UserMessageContent {
2608 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
2609 match value {
2610 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2611 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2612 acp::ContentBlock::Audio(_) => {
2613 // TODO
2614 Self::Text("[audio]".to_string())
2615 }
2616 acp::ContentBlock::ResourceLink(resource_link) => {
2617 match MentionUri::parse(&resource_link.uri, path_style) {
2618 Ok(uri) => Self::Mention {
2619 uri,
2620 content: String::new(),
2621 },
2622 Err(err) => {
2623 log::error!("Failed to parse mention link: {}", err);
2624 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2625 }
2626 }
2627 }
2628 acp::ContentBlock::Resource(resource) => match resource.resource {
2629 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2630 match MentionUri::parse(&resource.uri, path_style) {
2631 Ok(uri) => Self::Mention {
2632 uri,
2633 content: resource.text,
2634 },
2635 Err(err) => {
2636 log::error!("Failed to parse mention link: {}", err);
2637 Self::Text(
2638 MarkdownCodeBlock {
2639 tag: &resource.uri,
2640 text: &resource.text,
2641 }
2642 .to_string(),
2643 )
2644 }
2645 }
2646 }
2647 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2648 // TODO
2649 Self::Text("[blob]".to_string())
2650 }
2651 },
2652 }
2653 }
2654}
2655
2656impl From<UserMessageContent> for acp::ContentBlock {
2657 fn from(content: UserMessageContent) -> Self {
2658 match content {
2659 UserMessageContent::Text(text) => acp::ContentBlock::Text(acp::TextContent {
2660 text,
2661 annotations: None,
2662 meta: None,
2663 }),
2664 UserMessageContent::Image(image) => acp::ContentBlock::Image(acp::ImageContent {
2665 data: image.source.to_string(),
2666 mime_type: "image/png".to_string(),
2667 meta: None,
2668 annotations: None,
2669 uri: None,
2670 }),
2671 UserMessageContent::Mention { uri, content } => {
2672 acp::ContentBlock::Resource(acp::EmbeddedResource {
2673 meta: None,
2674 resource: acp::EmbeddedResourceResource::TextResourceContents(
2675 acp::TextResourceContents {
2676 meta: None,
2677 mime_type: None,
2678 text: content,
2679 uri: uri.to_uri().to_string(),
2680 },
2681 ),
2682 annotations: None,
2683 })
2684 }
2685 }
2686 }
2687}
2688
2689fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2690 LanguageModelImage {
2691 source: image_content.data.into(),
2692 // TODO: make this optional?
2693 size: gpui::Size::new(0.into(), 0.into()),
2694 }
2695}