1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 SystemPromptTemplate, Template, Templates, TerminalTool, ThinkingTool, WebSearchTool,
6};
7use acp_thread::{MentionUri, UserMessageId};
8use action_log::ActionLog;
9
10use agent_client_protocol as acp;
11use agent_settings::{
12 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
13 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
14};
15use anyhow::{Context as _, Result, anyhow};
16use chrono::{DateTime, Utc};
17use client::{ModelRequestUsage, RequestUsage, UserStore};
18use cloud_llm_client::{CompletionIntent, Plan, UsageLimit};
19use collections::{HashMap, HashSet, IndexMap};
20use fs::Fs;
21use futures::stream;
22use futures::{
23 FutureExt,
24 channel::{mpsc, oneshot},
25 future::Shared,
26 stream::FuturesUnordered,
27};
28use gpui::{
29 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
30};
31use language_model::{
32 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
33 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
34 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
35 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
36 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
37 ZED_CLOUD_PROVIDER_ID,
38};
39use project::Project;
40use prompt_store::ProjectContext;
41use schemars::{JsonSchema, Schema};
42use serde::{Deserialize, Serialize};
43use settings::{LanguageModelSelection, Settings, update_settings_file};
44use smol::stream::StreamExt;
45use std::{
46 collections::BTreeMap,
47 ops::RangeInclusive,
48 path::Path,
49 rc::Rc,
50 sync::Arc,
51 time::{Duration, Instant},
52};
53use std::{fmt::Write, path::PathBuf};
54use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
55use uuid::Uuid;
56
57const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
58pub const MAX_TOOL_NAME_LENGTH: usize = 64;
59
60/// The ID of the user prompt that initiated a request.
61///
62/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
63#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
64pub struct PromptId(Arc<str>);
65
66impl PromptId {
67 pub fn new() -> Self {
68 Self(Uuid::new_v4().to_string().into())
69 }
70}
71
72impl std::fmt::Display for PromptId {
73 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
74 write!(f, "{}", self.0)
75 }
76}
77
78pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
79pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
80
81#[derive(Debug, Clone)]
82enum RetryStrategy {
83 ExponentialBackoff {
84 initial_delay: Duration,
85 max_attempts: u8,
86 },
87 Fixed {
88 delay: Duration,
89 max_attempts: u8,
90 },
91}
92
93#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
94pub enum Message {
95 User(UserMessage),
96 Agent(AgentMessage),
97 Resume,
98}
99
100impl Message {
101 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
102 match self {
103 Message::Agent(agent_message) => Some(agent_message),
104 _ => None,
105 }
106 }
107
108 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
109 match self {
110 Message::User(message) => vec![message.to_request()],
111 Message::Agent(message) => message.to_request(),
112 Message::Resume => vec![LanguageModelRequestMessage {
113 role: Role::User,
114 content: vec!["Continue where you left off".into()],
115 cache: false,
116 reasoning_details: None,
117 }],
118 }
119 }
120
121 pub fn to_markdown(&self) -> String {
122 match self {
123 Message::User(message) => message.to_markdown(),
124 Message::Agent(message) => message.to_markdown(),
125 Message::Resume => "[resume]\n".into(),
126 }
127 }
128
129 pub fn role(&self) -> Role {
130 match self {
131 Message::User(_) | Message::Resume => Role::User,
132 Message::Agent(_) => Role::Assistant,
133 }
134 }
135}
136
137#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
138pub struct UserMessage {
139 pub id: UserMessageId,
140 pub content: Vec<UserMessageContent>,
141}
142
143#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
144pub enum UserMessageContent {
145 Text(String),
146 Mention { uri: MentionUri, content: String },
147 Image(LanguageModelImage),
148}
149
150impl UserMessage {
151 pub fn to_markdown(&self) -> String {
152 let mut markdown = String::from("## User\n\n");
153
154 for content in &self.content {
155 match content {
156 UserMessageContent::Text(text) => {
157 markdown.push_str(text);
158 markdown.push('\n');
159 }
160 UserMessageContent::Image(_) => {
161 markdown.push_str("<image />\n");
162 }
163 UserMessageContent::Mention { uri, content } => {
164 if !content.is_empty() {
165 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
166 } else {
167 let _ = writeln!(&mut markdown, "{}", uri.as_link());
168 }
169 }
170 }
171 }
172
173 markdown
174 }
175
176 fn to_request(&self) -> LanguageModelRequestMessage {
177 let mut message = LanguageModelRequestMessage {
178 role: Role::User,
179 content: Vec::with_capacity(self.content.len()),
180 cache: false,
181 reasoning_details: None,
182 };
183
184 const OPEN_CONTEXT: &str = "<context>\n\
185 The following items were attached by the user. \
186 They are up-to-date and don't need to be re-read.\n\n";
187
188 const OPEN_FILES_TAG: &str = "<files>";
189 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
190 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
191 const OPEN_SELECTIONS_TAG: &str = "<selections>";
192 const OPEN_THREADS_TAG: &str = "<threads>";
193 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
194 const OPEN_RULES_TAG: &str =
195 "<rules>\nThe user has specified the following rules that should be applied:\n";
196
197 let mut file_context = OPEN_FILES_TAG.to_string();
198 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
199 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
200 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
201 let mut thread_context = OPEN_THREADS_TAG.to_string();
202 let mut fetch_context = OPEN_FETCH_TAG.to_string();
203 let mut rules_context = OPEN_RULES_TAG.to_string();
204
205 for chunk in &self.content {
206 let chunk = match chunk {
207 UserMessageContent::Text(text) => {
208 language_model::MessageContent::Text(text.clone())
209 }
210 UserMessageContent::Image(value) => {
211 language_model::MessageContent::Image(value.clone())
212 }
213 UserMessageContent::Mention { uri, content } => {
214 match uri {
215 MentionUri::File { abs_path } => {
216 write!(
217 &mut file_context,
218 "\n{}",
219 MarkdownCodeBlock {
220 tag: &codeblock_tag(abs_path, None),
221 text: &content.to_string(),
222 }
223 )
224 .ok();
225 }
226 MentionUri::PastedImage => {
227 debug_panic!("pasted image URI should not be used in mention content")
228 }
229 MentionUri::Directory { .. } => {
230 write!(&mut directory_context, "\n{}\n", content).ok();
231 }
232 MentionUri::Symbol {
233 abs_path: path,
234 line_range,
235 ..
236 } => {
237 write!(
238 &mut symbol_context,
239 "\n{}",
240 MarkdownCodeBlock {
241 tag: &codeblock_tag(path, Some(line_range)),
242 text: content
243 }
244 )
245 .ok();
246 }
247 MentionUri::Selection {
248 abs_path: path,
249 line_range,
250 ..
251 } => {
252 write!(
253 &mut selection_context,
254 "\n{}",
255 MarkdownCodeBlock {
256 tag: &codeblock_tag(
257 path.as_deref().unwrap_or("Untitled".as_ref()),
258 Some(line_range)
259 ),
260 text: content
261 }
262 )
263 .ok();
264 }
265 MentionUri::Thread { .. } => {
266 write!(&mut thread_context, "\n{}\n", content).ok();
267 }
268 MentionUri::TextThread { .. } => {
269 write!(&mut thread_context, "\n{}\n", content).ok();
270 }
271 MentionUri::Rule { .. } => {
272 write!(
273 &mut rules_context,
274 "\n{}",
275 MarkdownCodeBlock {
276 tag: "",
277 text: content
278 }
279 )
280 .ok();
281 }
282 MentionUri::Fetch { url } => {
283 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
284 }
285 }
286
287 language_model::MessageContent::Text(uri.as_link().to_string())
288 }
289 };
290
291 message.content.push(chunk);
292 }
293
294 let len_before_context = message.content.len();
295
296 if file_context.len() > OPEN_FILES_TAG.len() {
297 file_context.push_str("</files>\n");
298 message
299 .content
300 .push(language_model::MessageContent::Text(file_context));
301 }
302
303 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
304 directory_context.push_str("</directories>\n");
305 message
306 .content
307 .push(language_model::MessageContent::Text(directory_context));
308 }
309
310 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
311 symbol_context.push_str("</symbols>\n");
312 message
313 .content
314 .push(language_model::MessageContent::Text(symbol_context));
315 }
316
317 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
318 selection_context.push_str("</selections>\n");
319 message
320 .content
321 .push(language_model::MessageContent::Text(selection_context));
322 }
323
324 if thread_context.len() > OPEN_THREADS_TAG.len() {
325 thread_context.push_str("</threads>\n");
326 message
327 .content
328 .push(language_model::MessageContent::Text(thread_context));
329 }
330
331 if fetch_context.len() > OPEN_FETCH_TAG.len() {
332 fetch_context.push_str("</fetched_urls>\n");
333 message
334 .content
335 .push(language_model::MessageContent::Text(fetch_context));
336 }
337
338 if rules_context.len() > OPEN_RULES_TAG.len() {
339 rules_context.push_str("</user_rules>\n");
340 message
341 .content
342 .push(language_model::MessageContent::Text(rules_context));
343 }
344
345 if message.content.len() > len_before_context {
346 message.content.insert(
347 len_before_context,
348 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
349 );
350 message
351 .content
352 .push(language_model::MessageContent::Text("</context>".into()));
353 }
354
355 message
356 }
357}
358
359fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
360 let mut result = String::new();
361
362 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
363 let _ = write!(result, "{} ", extension);
364 }
365
366 let _ = write!(result, "{}", full_path.display());
367
368 if let Some(range) = line_range {
369 if range.start() == range.end() {
370 let _ = write!(result, ":{}", range.start() + 1);
371 } else {
372 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
373 }
374 }
375
376 result
377}
378
379impl AgentMessage {
380 pub fn to_markdown(&self) -> String {
381 let mut markdown = String::from("## Assistant\n\n");
382
383 for content in &self.content {
384 match content {
385 AgentMessageContent::Text(text) => {
386 markdown.push_str(text);
387 markdown.push('\n');
388 }
389 AgentMessageContent::Thinking { text, .. } => {
390 markdown.push_str("<think>");
391 markdown.push_str(text);
392 markdown.push_str("</think>\n");
393 }
394 AgentMessageContent::RedactedThinking(_) => {
395 markdown.push_str("<redacted_thinking />\n")
396 }
397 AgentMessageContent::ToolUse(tool_use) => {
398 markdown.push_str(&format!(
399 "**Tool Use**: {} (ID: {})\n",
400 tool_use.name, tool_use.id
401 ));
402 markdown.push_str(&format!(
403 "{}\n",
404 MarkdownCodeBlock {
405 tag: "json",
406 text: &format!("{:#}", tool_use.input)
407 }
408 ));
409 }
410 }
411 }
412
413 for tool_result in self.tool_results.values() {
414 markdown.push_str(&format!(
415 "**Tool Result**: {} (ID: {})\n\n",
416 tool_result.tool_name, tool_result.tool_use_id
417 ));
418 if tool_result.is_error {
419 markdown.push_str("**ERROR:**\n");
420 }
421
422 match &tool_result.content {
423 LanguageModelToolResultContent::Text(text) => {
424 writeln!(markdown, "{text}\n").ok();
425 }
426 LanguageModelToolResultContent::Image(_) => {
427 writeln!(markdown, "<image />\n").ok();
428 }
429 }
430
431 if let Some(output) = tool_result.output.as_ref() {
432 writeln!(
433 markdown,
434 "**Debug Output**:\n\n```json\n{}\n```\n",
435 serde_json::to_string_pretty(output).unwrap()
436 )
437 .unwrap();
438 }
439 }
440
441 markdown
442 }
443
444 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
445 let mut assistant_message = LanguageModelRequestMessage {
446 role: Role::Assistant,
447 content: Vec::with_capacity(self.content.len()),
448 cache: false,
449 reasoning_details: self.reasoning_details.clone(),
450 };
451 for chunk in &self.content {
452 match chunk {
453 AgentMessageContent::Text(text) => {
454 assistant_message
455 .content
456 .push(language_model::MessageContent::Text(text.clone()));
457 }
458 AgentMessageContent::Thinking { text, signature } => {
459 assistant_message
460 .content
461 .push(language_model::MessageContent::Thinking {
462 text: text.clone(),
463 signature: signature.clone(),
464 });
465 }
466 AgentMessageContent::RedactedThinking(value) => {
467 assistant_message.content.push(
468 language_model::MessageContent::RedactedThinking(value.clone()),
469 );
470 }
471 AgentMessageContent::ToolUse(tool_use) => {
472 if self.tool_results.contains_key(&tool_use.id) {
473 assistant_message
474 .content
475 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
476 }
477 }
478 };
479 }
480
481 let mut user_message = LanguageModelRequestMessage {
482 role: Role::User,
483 content: Vec::new(),
484 cache: false,
485 reasoning_details: None,
486 };
487
488 for tool_result in self.tool_results.values() {
489 let mut tool_result = tool_result.clone();
490 // Surprisingly, the API fails if we return an empty string here.
491 // It thinks we are sending a tool use without a tool result.
492 if tool_result.content.is_empty() {
493 tool_result.content = "<Tool returned an empty string>".into();
494 }
495 user_message
496 .content
497 .push(language_model::MessageContent::ToolResult(tool_result));
498 }
499
500 let mut messages = Vec::new();
501 if !assistant_message.content.is_empty() {
502 messages.push(assistant_message);
503 }
504 if !user_message.content.is_empty() {
505 messages.push(user_message);
506 }
507 messages
508 }
509}
510
511#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
512pub struct AgentMessage {
513 pub content: Vec<AgentMessageContent>,
514 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
515 pub reasoning_details: Option<serde_json::Value>,
516}
517
518#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
519pub enum AgentMessageContent {
520 Text(String),
521 Thinking {
522 text: String,
523 signature: Option<String>,
524 },
525 RedactedThinking(String),
526 ToolUse(LanguageModelToolUse),
527}
528
529pub trait TerminalHandle {
530 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
531 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
532 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
533 fn kill(&self, cx: &AsyncApp) -> Result<()>;
534}
535
536pub trait ThreadEnvironment {
537 fn create_terminal(
538 &self,
539 command: String,
540 cwd: Option<PathBuf>,
541 output_byte_limit: Option<u64>,
542 cx: &mut AsyncApp,
543 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
544}
545
546#[derive(Debug)]
547pub enum ThreadEvent {
548 UserMessage(UserMessage),
549 AgentText(String),
550 AgentThinking(String),
551 ToolCall(acp::ToolCall),
552 ToolCallUpdate(acp_thread::ToolCallUpdate),
553 ToolCallAuthorization(ToolCallAuthorization),
554 Retry(acp_thread::RetryStatus),
555 Stop(acp::StopReason),
556}
557
558#[derive(Debug)]
559pub struct NewTerminal {
560 pub command: String,
561 pub output_byte_limit: Option<u64>,
562 pub cwd: Option<PathBuf>,
563 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
564}
565
566#[derive(Debug)]
567pub struct ToolCallAuthorization {
568 pub tool_call: acp::ToolCallUpdate,
569 pub options: Vec<acp::PermissionOption>,
570 pub response: oneshot::Sender<acp::PermissionOptionId>,
571}
572
573#[derive(Debug, thiserror::Error)]
574enum CompletionError {
575 #[error("max tokens")]
576 MaxTokens,
577 #[error("refusal")]
578 Refusal,
579 #[error(transparent)]
580 Other(#[from] anyhow::Error),
581}
582
583pub struct Thread {
584 id: acp::SessionId,
585 prompt_id: PromptId,
586 updated_at: DateTime<Utc>,
587 title: Option<SharedString>,
588 pending_title_generation: Option<Task<()>>,
589 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
590 summary: Option<SharedString>,
591 messages: Vec<Message>,
592 user_store: Entity<UserStore>,
593 completion_mode: CompletionMode,
594 /// Holds the task that handles agent interaction until the end of the turn.
595 /// Survives across multiple requests as the model performs tool calls and
596 /// we run tools, report their results.
597 running_turn: Option<RunningTurn>,
598 pending_message: Option<AgentMessage>,
599 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
600 tool_use_limit_reached: bool,
601 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
602 #[allow(unused)]
603 cumulative_token_usage: TokenUsage,
604 #[allow(unused)]
605 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
606 context_server_registry: Entity<ContextServerRegistry>,
607 profile_id: AgentProfileId,
608 project_context: Entity<ProjectContext>,
609 templates: Arc<Templates>,
610 model: Option<Arc<dyn LanguageModel>>,
611 summarization_model: Option<Arc<dyn LanguageModel>>,
612 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
613 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
614 pub(crate) project: Entity<Project>,
615 pub(crate) action_log: Entity<ActionLog>,
616 /// Tracks the last time files were read by the agent, to detect external modifications
617 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
618}
619
620impl Thread {
621 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
622 let image = model.map_or(true, |model| model.supports_images());
623 acp::PromptCapabilities::new()
624 .image(image)
625 .embedded_context(true)
626 }
627
628 pub fn new(
629 project: Entity<Project>,
630 project_context: Entity<ProjectContext>,
631 context_server_registry: Entity<ContextServerRegistry>,
632 templates: Arc<Templates>,
633 model: Option<Arc<dyn LanguageModel>>,
634 cx: &mut Context<Self>,
635 ) -> Self {
636 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
637 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
638 let (prompt_capabilities_tx, prompt_capabilities_rx) =
639 watch::channel(Self::prompt_capabilities(model.as_deref()));
640 Self {
641 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
642 prompt_id: PromptId::new(),
643 updated_at: Utc::now(),
644 title: None,
645 pending_title_generation: None,
646 pending_summary_generation: None,
647 summary: None,
648 messages: Vec::new(),
649 user_store: project.read(cx).user_store(),
650 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
651 running_turn: None,
652 pending_message: None,
653 tools: BTreeMap::default(),
654 tool_use_limit_reached: false,
655 request_token_usage: HashMap::default(),
656 cumulative_token_usage: TokenUsage::default(),
657 initial_project_snapshot: {
658 let project_snapshot = Self::project_snapshot(project.clone(), cx);
659 cx.foreground_executor()
660 .spawn(async move { Some(project_snapshot.await) })
661 .shared()
662 },
663 context_server_registry,
664 profile_id,
665 project_context,
666 templates,
667 model,
668 summarization_model: None,
669 prompt_capabilities_tx,
670 prompt_capabilities_rx,
671 project,
672 action_log,
673 file_read_times: HashMap::default(),
674 }
675 }
676
677 pub fn id(&self) -> &acp::SessionId {
678 &self.id
679 }
680
681 pub fn replay(
682 &mut self,
683 cx: &mut Context<Self>,
684 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
685 let (tx, rx) = mpsc::unbounded();
686 let stream = ThreadEventStream(tx);
687 for message in &self.messages {
688 match message {
689 Message::User(user_message) => stream.send_user_message(user_message),
690 Message::Agent(assistant_message) => {
691 for content in &assistant_message.content {
692 match content {
693 AgentMessageContent::Text(text) => stream.send_text(text),
694 AgentMessageContent::Thinking { text, .. } => {
695 stream.send_thinking(text)
696 }
697 AgentMessageContent::RedactedThinking(_) => {}
698 AgentMessageContent::ToolUse(tool_use) => {
699 self.replay_tool_call(
700 tool_use,
701 assistant_message.tool_results.get(&tool_use.id),
702 &stream,
703 cx,
704 );
705 }
706 }
707 }
708 }
709 Message::Resume => {}
710 }
711 }
712 rx
713 }
714
715 fn replay_tool_call(
716 &self,
717 tool_use: &LanguageModelToolUse,
718 tool_result: Option<&LanguageModelToolResult>,
719 stream: &ThreadEventStream,
720 cx: &mut Context<Self>,
721 ) {
722 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
723 self.context_server_registry
724 .read(cx)
725 .servers()
726 .find_map(|(_, tools)| {
727 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
728 Some(tool.clone())
729 } else {
730 None
731 }
732 })
733 });
734
735 let Some(tool) = tool else {
736 stream
737 .0
738 .unbounded_send(Ok(ThreadEvent::ToolCall(
739 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
740 .status(acp::ToolCallStatus::Failed)
741 .raw_input(tool_use.input.clone()),
742 )))
743 .ok();
744 return;
745 };
746
747 let title = tool.initial_title(tool_use.input.clone(), cx);
748 let kind = tool.kind();
749 stream.send_tool_call(
750 &tool_use.id,
751 &tool_use.name,
752 title,
753 kind,
754 tool_use.input.clone(),
755 );
756
757 let output = tool_result
758 .as_ref()
759 .and_then(|result| result.output.clone());
760 if let Some(output) = output.clone() {
761 let tool_event_stream = ToolCallEventStream::new(
762 tool_use.id.clone(),
763 stream.clone(),
764 Some(self.project.read(cx).fs().clone()),
765 );
766 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
767 .log_err();
768 }
769
770 stream.update_tool_call_fields(
771 &tool_use.id,
772 acp::ToolCallUpdateFields::new()
773 .status(
774 tool_result
775 .as_ref()
776 .map_or(acp::ToolCallStatus::Failed, |result| {
777 if result.is_error {
778 acp::ToolCallStatus::Failed
779 } else {
780 acp::ToolCallStatus::Completed
781 }
782 }),
783 )
784 .raw_output(output),
785 );
786 }
787
788 pub fn from_db(
789 id: acp::SessionId,
790 db_thread: DbThread,
791 project: Entity<Project>,
792 project_context: Entity<ProjectContext>,
793 context_server_registry: Entity<ContextServerRegistry>,
794 templates: Arc<Templates>,
795 cx: &mut Context<Self>,
796 ) -> Self {
797 let profile_id = db_thread
798 .profile
799 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
800
801 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
802 db_thread
803 .model
804 .and_then(|model| {
805 let model = SelectedModel {
806 provider: model.provider.clone().into(),
807 model: model.model.into(),
808 };
809 registry.select_model(&model, cx)
810 })
811 .or_else(|| registry.default_model())
812 .map(|model| model.model)
813 });
814
815 if model.is_none() {
816 model = Self::resolve_profile_model(&profile_id, cx);
817 }
818 if model.is_none() {
819 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
820 registry.default_model().map(|model| model.model)
821 });
822 }
823
824 let (prompt_capabilities_tx, prompt_capabilities_rx) =
825 watch::channel(Self::prompt_capabilities(model.as_deref()));
826
827 let action_log = cx.new(|_| ActionLog::new(project.clone()));
828
829 Self {
830 id,
831 prompt_id: PromptId::new(),
832 title: if db_thread.title.is_empty() {
833 None
834 } else {
835 Some(db_thread.title.clone())
836 },
837 pending_title_generation: None,
838 pending_summary_generation: None,
839 summary: db_thread.detailed_summary,
840 messages: db_thread.messages,
841 user_store: project.read(cx).user_store(),
842 completion_mode: db_thread.completion_mode.unwrap_or_default(),
843 running_turn: None,
844 pending_message: None,
845 tools: BTreeMap::default(),
846 tool_use_limit_reached: false,
847 request_token_usage: db_thread.request_token_usage.clone(),
848 cumulative_token_usage: db_thread.cumulative_token_usage,
849 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
850 context_server_registry,
851 profile_id,
852 project_context,
853 templates,
854 model,
855 summarization_model: None,
856 project,
857 action_log,
858 updated_at: db_thread.updated_at,
859 prompt_capabilities_tx,
860 prompt_capabilities_rx,
861 file_read_times: HashMap::default(),
862 }
863 }
864
865 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
866 let initial_project_snapshot = self.initial_project_snapshot.clone();
867 let mut thread = DbThread {
868 title: self.title(),
869 messages: self.messages.clone(),
870 updated_at: self.updated_at,
871 detailed_summary: self.summary.clone(),
872 initial_project_snapshot: None,
873 cumulative_token_usage: self.cumulative_token_usage,
874 request_token_usage: self.request_token_usage.clone(),
875 model: self.model.as_ref().map(|model| DbLanguageModel {
876 provider: model.provider_id().to_string(),
877 model: model.name().0.to_string(),
878 }),
879 completion_mode: Some(self.completion_mode),
880 profile: Some(self.profile_id.clone()),
881 };
882
883 cx.background_spawn(async move {
884 let initial_project_snapshot = initial_project_snapshot.await;
885 thread.initial_project_snapshot = initial_project_snapshot;
886 thread
887 })
888 }
889
890 /// Create a snapshot of the current project state including git information and unsaved buffers.
891 fn project_snapshot(
892 project: Entity<Project>,
893 cx: &mut Context<Self>,
894 ) -> Task<Arc<ProjectSnapshot>> {
895 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
896 cx.spawn(async move |_, _| {
897 let snapshot = task.await;
898
899 Arc::new(ProjectSnapshot {
900 worktree_snapshots: snapshot.worktree_snapshots,
901 timestamp: Utc::now(),
902 })
903 })
904 }
905
906 pub fn project_context(&self) -> &Entity<ProjectContext> {
907 &self.project_context
908 }
909
910 pub fn project(&self) -> &Entity<Project> {
911 &self.project
912 }
913
914 pub fn action_log(&self) -> &Entity<ActionLog> {
915 &self.action_log
916 }
917
918 pub fn is_empty(&self) -> bool {
919 self.messages.is_empty() && self.title.is_none()
920 }
921
922 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
923 self.model.as_ref()
924 }
925
926 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
927 let old_usage = self.latest_token_usage();
928 self.model = Some(model);
929 let new_caps = Self::prompt_capabilities(self.model.as_deref());
930 let new_usage = self.latest_token_usage();
931 if old_usage != new_usage {
932 cx.emit(TokenUsageUpdated(new_usage));
933 }
934 self.prompt_capabilities_tx.send(new_caps).log_err();
935 cx.notify()
936 }
937
938 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
939 self.summarization_model.as_ref()
940 }
941
942 pub fn set_summarization_model(
943 &mut self,
944 model: Option<Arc<dyn LanguageModel>>,
945 cx: &mut Context<Self>,
946 ) {
947 self.summarization_model = model;
948 cx.notify()
949 }
950
951 pub fn completion_mode(&self) -> CompletionMode {
952 self.completion_mode
953 }
954
955 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
956 let old_usage = self.latest_token_usage();
957 self.completion_mode = mode;
958 let new_usage = self.latest_token_usage();
959 if old_usage != new_usage {
960 cx.emit(TokenUsageUpdated(new_usage));
961 }
962 cx.notify()
963 }
964
965 #[cfg(any(test, feature = "test-support"))]
966 pub fn last_message(&self) -> Option<Message> {
967 if let Some(message) = self.pending_message.clone() {
968 Some(Message::Agent(message))
969 } else {
970 self.messages.last().cloned()
971 }
972 }
973
974 pub fn add_default_tools(
975 &mut self,
976 environment: Rc<dyn ThreadEnvironment>,
977 cx: &mut Context<Self>,
978 ) {
979 let language_registry = self.project.read(cx).languages().clone();
980 self.add_tool(CopyPathTool::new(self.project.clone()));
981 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
982 self.add_tool(DeletePathTool::new(
983 self.project.clone(),
984 self.action_log.clone(),
985 ));
986 self.add_tool(DiagnosticsTool::new(self.project.clone()));
987 self.add_tool(EditFileTool::new(
988 self.project.clone(),
989 cx.weak_entity(),
990 language_registry,
991 Templates::new(),
992 ));
993 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
994 self.add_tool(FindPathTool::new(self.project.clone()));
995 self.add_tool(GrepTool::new(self.project.clone()));
996 self.add_tool(ListDirectoryTool::new(self.project.clone()));
997 self.add_tool(MovePathTool::new(self.project.clone()));
998 self.add_tool(NowTool);
999 self.add_tool(OpenTool::new(self.project.clone()));
1000 self.add_tool(ReadFileTool::new(
1001 cx.weak_entity(),
1002 self.project.clone(),
1003 self.action_log.clone(),
1004 ));
1005 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1006 self.add_tool(ThinkingTool);
1007 self.add_tool(WebSearchTool);
1008 }
1009
1010 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1011 self.tools.insert(T::name().into(), tool.erase());
1012 }
1013
1014 pub fn remove_tool(&mut self, name: &str) -> bool {
1015 self.tools.remove(name).is_some()
1016 }
1017
1018 pub fn profile(&self) -> &AgentProfileId {
1019 &self.profile_id
1020 }
1021
1022 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1023 if self.profile_id == profile_id {
1024 return;
1025 }
1026
1027 self.profile_id = profile_id;
1028
1029 // Swap to the profile's preferred model when available.
1030 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1031 self.set_model(model, cx);
1032 }
1033 }
1034
1035 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1036 if let Some(running_turn) = self.running_turn.take() {
1037 running_turn.cancel();
1038 }
1039 self.flush_pending_message(cx);
1040 }
1041
1042 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1043 let Some(last_user_message) = self.last_user_message() else {
1044 return;
1045 };
1046
1047 self.request_token_usage
1048 .insert(last_user_message.id.clone(), update);
1049 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1050 cx.notify();
1051 }
1052
1053 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1054 self.cancel(cx);
1055 let Some(position) = self.messages.iter().position(
1056 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1057 ) else {
1058 return Err(anyhow!("Message not found"));
1059 };
1060
1061 for message in self.messages.drain(position..) {
1062 match message {
1063 Message::User(message) => {
1064 self.request_token_usage.remove(&message.id);
1065 }
1066 Message::Agent(_) | Message::Resume => {}
1067 }
1068 }
1069 self.clear_summary();
1070 cx.notify();
1071 Ok(())
1072 }
1073
1074 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1075 let last_user_message = self.last_user_message()?;
1076 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1077 Some(*tokens)
1078 }
1079
1080 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1081 let usage = self.latest_request_token_usage()?;
1082 let model = self.model.clone()?;
1083 Some(acp_thread::TokenUsage {
1084 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1085 used_tokens: usage.total_tokens(),
1086 })
1087 }
1088
1089 /// Look up the active profile and resolve its preferred model if one is configured.
1090 fn resolve_profile_model(
1091 profile_id: &AgentProfileId,
1092 cx: &mut Context<Self>,
1093 ) -> Option<Arc<dyn LanguageModel>> {
1094 let selection = AgentSettings::get_global(cx)
1095 .profiles
1096 .get(profile_id)?
1097 .default_model
1098 .clone()?;
1099 Self::resolve_model_from_selection(&selection, cx)
1100 }
1101
1102 /// Translate a stored model selection into the configured model from the registry.
1103 fn resolve_model_from_selection(
1104 selection: &LanguageModelSelection,
1105 cx: &mut Context<Self>,
1106 ) -> Option<Arc<dyn LanguageModel>> {
1107 let selected = SelectedModel {
1108 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1109 model: LanguageModelId::from(selection.model.clone()),
1110 };
1111 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1112 registry
1113 .select_model(&selected, cx)
1114 .map(|configured| configured.model)
1115 })
1116 }
1117
1118 pub fn resume(
1119 &mut self,
1120 cx: &mut Context<Self>,
1121 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1122 self.messages.push(Message::Resume);
1123 cx.notify();
1124
1125 log::debug!("Total messages in thread: {}", self.messages.len());
1126 self.run_turn(cx)
1127 }
1128
1129 /// Sending a message results in the model streaming a response, which could include tool calls.
1130 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1131 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1132 pub fn send<T>(
1133 &mut self,
1134 id: UserMessageId,
1135 content: impl IntoIterator<Item = T>,
1136 cx: &mut Context<Self>,
1137 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1138 where
1139 T: Into<UserMessageContent>,
1140 {
1141 let model = self.model().context("No language model configured")?;
1142
1143 log::info!("Thread::send called with model: {}", model.name().0);
1144 self.advance_prompt_id();
1145
1146 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1147 log::debug!("Thread::send content: {:?}", content);
1148
1149 self.messages
1150 .push(Message::User(UserMessage { id, content }));
1151 cx.notify();
1152
1153 log::debug!("Total messages in thread: {}", self.messages.len());
1154 self.run_turn(cx)
1155 }
1156
1157 #[cfg(feature = "eval")]
1158 pub fn proceed(
1159 &mut self,
1160 cx: &mut Context<Self>,
1161 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1162 self.run_turn(cx)
1163 }
1164
1165 fn run_turn(
1166 &mut self,
1167 cx: &mut Context<Self>,
1168 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1169 self.cancel(cx);
1170
1171 let model = self.model.clone().context("No language model configured")?;
1172 let profile = AgentSettings::get_global(cx)
1173 .profiles
1174 .get(&self.profile_id)
1175 .context("Profile not found")?;
1176 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1177 let event_stream = ThreadEventStream(events_tx);
1178 let message_ix = self.messages.len().saturating_sub(1);
1179 self.tool_use_limit_reached = false;
1180 self.clear_summary();
1181 self.running_turn = Some(RunningTurn {
1182 event_stream: event_stream.clone(),
1183 tools: self.enabled_tools(profile, &model, cx),
1184 _task: cx.spawn(async move |this, cx| {
1185 log::debug!("Starting agent turn execution");
1186
1187 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1188 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1189
1190 match turn_result {
1191 Ok(()) => {
1192 log::debug!("Turn execution completed");
1193 event_stream.send_stop(acp::StopReason::EndTurn);
1194 }
1195 Err(error) => {
1196 log::error!("Turn execution failed: {:?}", error);
1197 match error.downcast::<CompletionError>() {
1198 Ok(CompletionError::Refusal) => {
1199 event_stream.send_stop(acp::StopReason::Refusal);
1200 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1201 }
1202 Ok(CompletionError::MaxTokens) => {
1203 event_stream.send_stop(acp::StopReason::MaxTokens);
1204 }
1205 Ok(CompletionError::Other(error)) | Err(error) => {
1206 event_stream.send_error(error);
1207 }
1208 }
1209 }
1210 }
1211
1212 _ = this.update(cx, |this, _| this.running_turn.take());
1213 }),
1214 });
1215 Ok(events_rx)
1216 }
1217
1218 async fn run_turn_internal(
1219 this: &WeakEntity<Self>,
1220 model: Arc<dyn LanguageModel>,
1221 event_stream: &ThreadEventStream,
1222 cx: &mut AsyncApp,
1223 ) -> Result<()> {
1224 let mut attempt = 0;
1225 let mut intent = CompletionIntent::UserPrompt;
1226 loop {
1227 let request =
1228 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1229
1230 telemetry::event!(
1231 "Agent Thread Completion",
1232 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1233 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1234 model = model.telemetry_id(),
1235 model_provider = model.provider_id().to_string(),
1236 attempt
1237 );
1238
1239 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1240
1241 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1242 Ok(events) => (events, None),
1243 Err(err) => (stream::empty().boxed(), Some(err)),
1244 };
1245 let mut tool_results = FuturesUnordered::new();
1246 while let Some(event) = events.next().await {
1247 log::trace!("Received completion event: {:?}", event);
1248 match event {
1249 Ok(event) => {
1250 tool_results.extend(this.update(cx, |this, cx| {
1251 this.handle_completion_event(event, event_stream, cx)
1252 })??);
1253 }
1254 Err(err) => {
1255 error = Some(err);
1256 break;
1257 }
1258 }
1259 }
1260
1261 let end_turn = tool_results.is_empty();
1262 while let Some(tool_result) = tool_results.next().await {
1263 log::debug!("Tool finished {:?}", tool_result);
1264
1265 event_stream.update_tool_call_fields(
1266 &tool_result.tool_use_id,
1267 acp::ToolCallUpdateFields::new()
1268 .status(if tool_result.is_error {
1269 acp::ToolCallStatus::Failed
1270 } else {
1271 acp::ToolCallStatus::Completed
1272 })
1273 .raw_output(tool_result.output.clone()),
1274 );
1275 this.update(cx, |this, _cx| {
1276 this.pending_message()
1277 .tool_results
1278 .insert(tool_result.tool_use_id.clone(), tool_result);
1279 })?;
1280 }
1281
1282 this.update(cx, |this, cx| {
1283 this.flush_pending_message(cx);
1284 if this.title.is_none() && this.pending_title_generation.is_none() {
1285 this.generate_title(cx);
1286 }
1287 })?;
1288
1289 if let Some(error) = error {
1290 attempt += 1;
1291 let retry = this.update(cx, |this, cx| {
1292 let user_store = this.user_store.read(cx);
1293 this.handle_completion_error(error, attempt, user_store.plan())
1294 })??;
1295 let timer = cx.background_executor().timer(retry.duration);
1296 event_stream.send_retry(retry);
1297 timer.await;
1298 this.update(cx, |this, _cx| {
1299 if let Some(Message::Agent(message)) = this.messages.last() {
1300 if message.tool_results.is_empty() {
1301 intent = CompletionIntent::UserPrompt;
1302 this.messages.push(Message::Resume);
1303 }
1304 }
1305 })?;
1306 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1307 return Err(language_model::ToolUseLimitReachedError.into());
1308 } else if end_turn {
1309 return Ok(());
1310 } else {
1311 intent = CompletionIntent::ToolResults;
1312 attempt = 0;
1313 }
1314 }
1315 }
1316
1317 fn handle_completion_error(
1318 &mut self,
1319 error: LanguageModelCompletionError,
1320 attempt: u8,
1321 plan: Option<Plan>,
1322 ) -> Result<acp_thread::RetryStatus> {
1323 let Some(model) = self.model.as_ref() else {
1324 return Err(anyhow!(error));
1325 };
1326
1327 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1328 match plan {
1329 Some(Plan::V2(_)) => true,
1330 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1331 None => false,
1332 }
1333 } else {
1334 true
1335 };
1336
1337 if !auto_retry {
1338 return Err(anyhow!(error));
1339 }
1340
1341 let Some(strategy) = Self::retry_strategy_for(&error) else {
1342 return Err(anyhow!(error));
1343 };
1344
1345 let max_attempts = match &strategy {
1346 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1347 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1348 };
1349
1350 if attempt > max_attempts {
1351 return Err(anyhow!(error));
1352 }
1353
1354 let delay = match &strategy {
1355 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1356 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1357 Duration::from_secs(delay_secs)
1358 }
1359 RetryStrategy::Fixed { delay, .. } => *delay,
1360 };
1361 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1362
1363 Ok(acp_thread::RetryStatus {
1364 last_error: error.to_string().into(),
1365 attempt: attempt as usize,
1366 max_attempts: max_attempts as usize,
1367 started_at: Instant::now(),
1368 duration: delay,
1369 })
1370 }
1371
1372 /// A helper method that's called on every streamed completion event.
1373 /// Returns an optional tool result task, which the main agentic loop will
1374 /// send back to the model when it resolves.
1375 fn handle_completion_event(
1376 &mut self,
1377 event: LanguageModelCompletionEvent,
1378 event_stream: &ThreadEventStream,
1379 cx: &mut Context<Self>,
1380 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1381 log::trace!("Handling streamed completion event: {:?}", event);
1382 use LanguageModelCompletionEvent::*;
1383
1384 match event {
1385 StartMessage { .. } => {
1386 self.flush_pending_message(cx);
1387 self.pending_message = Some(AgentMessage::default());
1388 }
1389 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1390 Thinking { text, signature } => {
1391 self.handle_thinking_event(text, signature, event_stream, cx)
1392 }
1393 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1394 ReasoningDetails(details) => {
1395 let last_message = self.pending_message();
1396 // Store the last non-empty reasoning_details (overwrites earlier ones)
1397 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1398 if let serde_json::Value::Array(ref arr) = details {
1399 if !arr.is_empty() {
1400 last_message.reasoning_details = Some(details);
1401 }
1402 } else {
1403 last_message.reasoning_details = Some(details);
1404 }
1405 }
1406 ToolUse(tool_use) => {
1407 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1408 }
1409 ToolUseJsonParseError {
1410 id,
1411 tool_name,
1412 raw_input,
1413 json_parse_error,
1414 } => {
1415 return Ok(Some(Task::ready(
1416 self.handle_tool_use_json_parse_error_event(
1417 id,
1418 tool_name,
1419 raw_input,
1420 json_parse_error,
1421 ),
1422 )));
1423 }
1424 UsageUpdate(usage) => {
1425 telemetry::event!(
1426 "Agent Thread Completion Usage Updated",
1427 thread_id = self.id.to_string(),
1428 prompt_id = self.prompt_id.to_string(),
1429 model = self.model.as_ref().map(|m| m.telemetry_id()),
1430 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1431 input_tokens = usage.input_tokens,
1432 output_tokens = usage.output_tokens,
1433 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1434 cache_read_input_tokens = usage.cache_read_input_tokens,
1435 );
1436 self.update_token_usage(usage, cx);
1437 }
1438 UsageUpdated { amount, limit } => {
1439 self.update_model_request_usage(amount, limit, cx);
1440 }
1441 ToolUseLimitReached => {
1442 self.tool_use_limit_reached = true;
1443 }
1444 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1445 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1446 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1447 Started | Queued { .. } => {}
1448 }
1449
1450 Ok(None)
1451 }
1452
1453 fn handle_text_event(
1454 &mut self,
1455 new_text: String,
1456 event_stream: &ThreadEventStream,
1457 cx: &mut Context<Self>,
1458 ) {
1459 event_stream.send_text(&new_text);
1460
1461 let last_message = self.pending_message();
1462 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1463 text.push_str(&new_text);
1464 } else {
1465 last_message
1466 .content
1467 .push(AgentMessageContent::Text(new_text));
1468 }
1469
1470 cx.notify();
1471 }
1472
1473 fn handle_thinking_event(
1474 &mut self,
1475 new_text: String,
1476 new_signature: Option<String>,
1477 event_stream: &ThreadEventStream,
1478 cx: &mut Context<Self>,
1479 ) {
1480 event_stream.send_thinking(&new_text);
1481
1482 let last_message = self.pending_message();
1483 if let Some(AgentMessageContent::Thinking { text, signature }) =
1484 last_message.content.last_mut()
1485 {
1486 text.push_str(&new_text);
1487 *signature = new_signature.or(signature.take());
1488 } else {
1489 last_message.content.push(AgentMessageContent::Thinking {
1490 text: new_text,
1491 signature: new_signature,
1492 });
1493 }
1494
1495 cx.notify();
1496 }
1497
1498 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1499 let last_message = self.pending_message();
1500 last_message
1501 .content
1502 .push(AgentMessageContent::RedactedThinking(data));
1503 cx.notify();
1504 }
1505
1506 fn handle_tool_use_event(
1507 &mut self,
1508 tool_use: LanguageModelToolUse,
1509 event_stream: &ThreadEventStream,
1510 cx: &mut Context<Self>,
1511 ) -> Option<Task<LanguageModelToolResult>> {
1512 cx.notify();
1513
1514 let tool = self.tool(tool_use.name.as_ref());
1515 let mut title = SharedString::from(&tool_use.name);
1516 let mut kind = acp::ToolKind::Other;
1517 if let Some(tool) = tool.as_ref() {
1518 title = tool.initial_title(tool_use.input.clone(), cx);
1519 kind = tool.kind();
1520 }
1521
1522 // Ensure the last message ends in the current tool use
1523 let last_message = self.pending_message();
1524 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1525 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1526 if last_tool_use.id == tool_use.id {
1527 *last_tool_use = tool_use.clone();
1528 false
1529 } else {
1530 true
1531 }
1532 } else {
1533 true
1534 }
1535 });
1536
1537 if push_new_tool_use {
1538 event_stream.send_tool_call(
1539 &tool_use.id,
1540 &tool_use.name,
1541 title,
1542 kind,
1543 tool_use.input.clone(),
1544 );
1545 last_message
1546 .content
1547 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1548 } else {
1549 event_stream.update_tool_call_fields(
1550 &tool_use.id,
1551 acp::ToolCallUpdateFields::new()
1552 .title(title.as_str())
1553 .kind(kind)
1554 .raw_input(tool_use.input.clone()),
1555 );
1556 }
1557
1558 if !tool_use.is_input_complete {
1559 return None;
1560 }
1561
1562 let Some(tool) = tool else {
1563 let content = format!("No tool named {} exists", tool_use.name);
1564 return Some(Task::ready(LanguageModelToolResult {
1565 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1566 tool_use_id: tool_use.id,
1567 tool_name: tool_use.name,
1568 is_error: true,
1569 output: None,
1570 }));
1571 };
1572
1573 let fs = self.project.read(cx).fs().clone();
1574 let tool_event_stream =
1575 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1576 tool_event_stream.update_fields(
1577 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1578 );
1579 let supports_images = self.model().is_some_and(|model| model.supports_images());
1580 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1581 log::debug!("Running tool {}", tool_use.name);
1582 Some(cx.foreground_executor().spawn(async move {
1583 let tool_result = tool_result.await.and_then(|output| {
1584 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1585 && !supports_images
1586 {
1587 return Err(anyhow!(
1588 "Attempted to read an image, but this model doesn't support it.",
1589 ));
1590 }
1591 Ok(output)
1592 });
1593
1594 match tool_result {
1595 Ok(output) => LanguageModelToolResult {
1596 tool_use_id: tool_use.id,
1597 tool_name: tool_use.name,
1598 is_error: false,
1599 content: output.llm_output,
1600 output: Some(output.raw_output),
1601 },
1602 Err(error) => LanguageModelToolResult {
1603 tool_use_id: tool_use.id,
1604 tool_name: tool_use.name,
1605 is_error: true,
1606 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1607 output: Some(error.to_string().into()),
1608 },
1609 }
1610 }))
1611 }
1612
1613 fn handle_tool_use_json_parse_error_event(
1614 &mut self,
1615 tool_use_id: LanguageModelToolUseId,
1616 tool_name: Arc<str>,
1617 raw_input: Arc<str>,
1618 json_parse_error: String,
1619 ) -> LanguageModelToolResult {
1620 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1621 LanguageModelToolResult {
1622 tool_use_id,
1623 tool_name,
1624 is_error: true,
1625 content: LanguageModelToolResultContent::Text(tool_output.into()),
1626 output: Some(serde_json::Value::String(raw_input.to_string())),
1627 }
1628 }
1629
1630 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1631 self.project
1632 .read(cx)
1633 .user_store()
1634 .update(cx, |user_store, cx| {
1635 user_store.update_model_request_usage(
1636 ModelRequestUsage(RequestUsage {
1637 amount: amount as i32,
1638 limit,
1639 }),
1640 cx,
1641 )
1642 });
1643 }
1644
1645 pub fn title(&self) -> SharedString {
1646 self.title.clone().unwrap_or("New Thread".into())
1647 }
1648
1649 pub fn is_generating_summary(&self) -> bool {
1650 self.pending_summary_generation.is_some()
1651 }
1652
1653 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1654 if let Some(summary) = self.summary.as_ref() {
1655 return Task::ready(Some(summary.clone())).shared();
1656 }
1657 if let Some(task) = self.pending_summary_generation.clone() {
1658 return task;
1659 }
1660 let Some(model) = self.summarization_model.clone() else {
1661 log::error!("No summarization model available");
1662 return Task::ready(None).shared();
1663 };
1664 let mut request = LanguageModelRequest {
1665 intent: Some(CompletionIntent::ThreadContextSummarization),
1666 temperature: AgentSettings::temperature_for_model(&model, cx),
1667 ..Default::default()
1668 };
1669
1670 for message in &self.messages {
1671 request.messages.extend(message.to_request());
1672 }
1673
1674 request.messages.push(LanguageModelRequestMessage {
1675 role: Role::User,
1676 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1677 cache: false,
1678 reasoning_details: None,
1679 });
1680
1681 let task = cx
1682 .spawn(async move |this, cx| {
1683 let mut summary = String::new();
1684 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1685 while let Some(event) = messages.next().await {
1686 let event = event.log_err()?;
1687 let text = match event {
1688 LanguageModelCompletionEvent::Text(text) => text,
1689 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1690 this.update(cx, |thread, cx| {
1691 thread.update_model_request_usage(amount, limit, cx);
1692 })
1693 .ok()?;
1694 continue;
1695 }
1696 _ => continue,
1697 };
1698
1699 let mut lines = text.lines();
1700 summary.extend(lines.next());
1701 }
1702
1703 log::debug!("Setting summary: {}", summary);
1704 let summary = SharedString::from(summary);
1705
1706 this.update(cx, |this, cx| {
1707 this.summary = Some(summary.clone());
1708 this.pending_summary_generation = None;
1709 cx.notify()
1710 })
1711 .ok()?;
1712
1713 Some(summary)
1714 })
1715 .shared();
1716 self.pending_summary_generation = Some(task.clone());
1717 task
1718 }
1719
1720 fn generate_title(&mut self, cx: &mut Context<Self>) {
1721 let Some(model) = self.summarization_model.clone() else {
1722 return;
1723 };
1724
1725 log::debug!(
1726 "Generating title with model: {:?}",
1727 self.summarization_model.as_ref().map(|model| model.name())
1728 );
1729 let mut request = LanguageModelRequest {
1730 intent: Some(CompletionIntent::ThreadSummarization),
1731 temperature: AgentSettings::temperature_for_model(&model, cx),
1732 ..Default::default()
1733 };
1734
1735 for message in &self.messages {
1736 request.messages.extend(message.to_request());
1737 }
1738
1739 request.messages.push(LanguageModelRequestMessage {
1740 role: Role::User,
1741 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1742 cache: false,
1743 reasoning_details: None,
1744 });
1745 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1746 let mut title = String::new();
1747
1748 let generate = async {
1749 let mut messages = model.stream_completion(request, cx).await?;
1750 while let Some(event) = messages.next().await {
1751 let event = event?;
1752 let text = match event {
1753 LanguageModelCompletionEvent::Text(text) => text,
1754 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1755 this.update(cx, |thread, cx| {
1756 thread.update_model_request_usage(amount, limit, cx);
1757 })?;
1758 continue;
1759 }
1760 _ => continue,
1761 };
1762
1763 let mut lines = text.lines();
1764 title.extend(lines.next());
1765
1766 // Stop if the LLM generated multiple lines.
1767 if lines.next().is_some() {
1768 break;
1769 }
1770 }
1771 anyhow::Ok(())
1772 };
1773
1774 if generate.await.context("failed to generate title").is_ok() {
1775 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1776 }
1777 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1778 }));
1779 }
1780
1781 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1782 self.pending_title_generation = None;
1783 if Some(&title) != self.title.as_ref() {
1784 self.title = Some(title);
1785 cx.emit(TitleUpdated);
1786 cx.notify();
1787 }
1788 }
1789
1790 fn clear_summary(&mut self) {
1791 self.summary = None;
1792 self.pending_summary_generation = None;
1793 }
1794
1795 fn last_user_message(&self) -> Option<&UserMessage> {
1796 self.messages
1797 .iter()
1798 .rev()
1799 .find_map(|message| match message {
1800 Message::User(user_message) => Some(user_message),
1801 Message::Agent(_) => None,
1802 Message::Resume => None,
1803 })
1804 }
1805
1806 fn pending_message(&mut self) -> &mut AgentMessage {
1807 self.pending_message.get_or_insert_default()
1808 }
1809
1810 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1811 let Some(mut message) = self.pending_message.take() else {
1812 return;
1813 };
1814
1815 if message.content.is_empty() {
1816 return;
1817 }
1818
1819 for content in &message.content {
1820 let AgentMessageContent::ToolUse(tool_use) = content else {
1821 continue;
1822 };
1823
1824 if !message.tool_results.contains_key(&tool_use.id) {
1825 message.tool_results.insert(
1826 tool_use.id.clone(),
1827 LanguageModelToolResult {
1828 tool_use_id: tool_use.id.clone(),
1829 tool_name: tool_use.name.clone(),
1830 is_error: true,
1831 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1832 output: None,
1833 },
1834 );
1835 }
1836 }
1837
1838 self.messages.push(Message::Agent(message));
1839 self.updated_at = Utc::now();
1840 self.clear_summary();
1841 cx.notify()
1842 }
1843
1844 pub(crate) fn build_completion_request(
1845 &self,
1846 completion_intent: CompletionIntent,
1847 cx: &App,
1848 ) -> Result<LanguageModelRequest> {
1849 let model = self.model().context("No language model configured")?;
1850 let tools = if let Some(turn) = self.running_turn.as_ref() {
1851 turn.tools
1852 .iter()
1853 .filter_map(|(tool_name, tool)| {
1854 log::trace!("Including tool: {}", tool_name);
1855 Some(LanguageModelRequestTool {
1856 name: tool_name.to_string(),
1857 description: tool.description().to_string(),
1858 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1859 })
1860 })
1861 .collect::<Vec<_>>()
1862 } else {
1863 Vec::new()
1864 };
1865
1866 log::debug!("Building completion request");
1867 log::debug!("Completion intent: {:?}", completion_intent);
1868 log::debug!("Completion mode: {:?}", self.completion_mode);
1869
1870 let available_tools: Vec<_> = self
1871 .running_turn
1872 .as_ref()
1873 .map(|turn| turn.tools.keys().cloned().collect())
1874 .unwrap_or_default();
1875
1876 log::debug!("Request includes {} tools", available_tools.len());
1877 let messages = self.build_request_messages(available_tools, cx);
1878 log::debug!("Request will include {} messages", messages.len());
1879
1880 let request = LanguageModelRequest {
1881 thread_id: Some(self.id.to_string()),
1882 prompt_id: Some(self.prompt_id.to_string()),
1883 intent: Some(completion_intent),
1884 mode: Some(self.completion_mode.into()),
1885 messages,
1886 tools,
1887 tool_choice: None,
1888 stop: Vec::new(),
1889 temperature: AgentSettings::temperature_for_model(model, cx),
1890 thinking_allowed: true,
1891 };
1892
1893 log::debug!("Completion request built successfully");
1894 Ok(request)
1895 }
1896
1897 fn enabled_tools(
1898 &self,
1899 profile: &AgentProfileSettings,
1900 model: &Arc<dyn LanguageModel>,
1901 cx: &App,
1902 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1903 fn truncate(tool_name: &SharedString) -> SharedString {
1904 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1905 let mut truncated = tool_name.to_string();
1906 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1907 truncated.into()
1908 } else {
1909 tool_name.clone()
1910 }
1911 }
1912
1913 let mut tools = self
1914 .tools
1915 .iter()
1916 .filter_map(|(tool_name, tool)| {
1917 if tool.supports_provider(&model.provider_id())
1918 && profile.is_tool_enabled(tool_name)
1919 {
1920 Some((truncate(tool_name), tool.clone()))
1921 } else {
1922 None
1923 }
1924 })
1925 .collect::<BTreeMap<_, _>>();
1926
1927 let mut context_server_tools = Vec::new();
1928 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1929 let mut duplicate_tool_names = HashSet::default();
1930 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1931 for (tool_name, tool) in server_tools {
1932 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1933 let tool_name = truncate(tool_name);
1934 if !seen_tools.insert(tool_name.clone()) {
1935 duplicate_tool_names.insert(tool_name.clone());
1936 }
1937 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1938 }
1939 }
1940 }
1941
1942 // When there are duplicate tool names, disambiguate by prefixing them
1943 // with the server ID. In the rare case there isn't enough space for the
1944 // disambiguated tool name, keep only the last tool with this name.
1945 for (server_id, tool_name, tool) in context_server_tools {
1946 if duplicate_tool_names.contains(&tool_name) {
1947 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1948 if available >= 2 {
1949 let mut disambiguated = server_id.0.to_string();
1950 disambiguated.truncate(available - 1);
1951 disambiguated.push('_');
1952 disambiguated.push_str(&tool_name);
1953 tools.insert(disambiguated.into(), tool.clone());
1954 } else {
1955 tools.insert(tool_name, tool.clone());
1956 }
1957 } else {
1958 tools.insert(tool_name, tool.clone());
1959 }
1960 }
1961
1962 tools
1963 }
1964
1965 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1966 self.running_turn.as_ref()?.tools.get(name).cloned()
1967 }
1968
1969 fn build_request_messages(
1970 &self,
1971 available_tools: Vec<SharedString>,
1972 cx: &App,
1973 ) -> Vec<LanguageModelRequestMessage> {
1974 log::trace!(
1975 "Building request messages from {} thread messages",
1976 self.messages.len()
1977 );
1978
1979 let system_prompt = SystemPromptTemplate {
1980 project: self.project_context.read(cx),
1981 available_tools,
1982 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
1983 }
1984 .render(&self.templates)
1985 .context("failed to build system prompt")
1986 .expect("Invalid template");
1987 let mut messages = vec![LanguageModelRequestMessage {
1988 role: Role::System,
1989 content: vec![system_prompt.into()],
1990 cache: false,
1991 reasoning_details: None,
1992 }];
1993 for message in &self.messages {
1994 messages.extend(message.to_request());
1995 }
1996
1997 if let Some(last_message) = messages.last_mut() {
1998 last_message.cache = true;
1999 }
2000
2001 if let Some(message) = self.pending_message.as_ref() {
2002 messages.extend(message.to_request());
2003 }
2004
2005 messages
2006 }
2007
2008 pub fn to_markdown(&self) -> String {
2009 let mut markdown = String::new();
2010 for (ix, message) in self.messages.iter().enumerate() {
2011 if ix > 0 {
2012 markdown.push('\n');
2013 }
2014 markdown.push_str(&message.to_markdown());
2015 }
2016
2017 if let Some(message) = self.pending_message.as_ref() {
2018 markdown.push('\n');
2019 markdown.push_str(&message.to_markdown());
2020 }
2021
2022 markdown
2023 }
2024
2025 fn advance_prompt_id(&mut self) {
2026 self.prompt_id = PromptId::new();
2027 }
2028
2029 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2030 use LanguageModelCompletionError::*;
2031 use http_client::StatusCode;
2032
2033 // General strategy here:
2034 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2035 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2036 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2037 match error {
2038 HttpResponseError {
2039 status_code: StatusCode::TOO_MANY_REQUESTS,
2040 ..
2041 } => Some(RetryStrategy::ExponentialBackoff {
2042 initial_delay: BASE_RETRY_DELAY,
2043 max_attempts: MAX_RETRY_ATTEMPTS,
2044 }),
2045 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2046 Some(RetryStrategy::Fixed {
2047 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2048 max_attempts: MAX_RETRY_ATTEMPTS,
2049 })
2050 }
2051 UpstreamProviderError {
2052 status,
2053 retry_after,
2054 ..
2055 } => match *status {
2056 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2057 Some(RetryStrategy::Fixed {
2058 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2059 max_attempts: MAX_RETRY_ATTEMPTS,
2060 })
2061 }
2062 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2063 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2064 // Internal Server Error could be anything, retry up to 3 times.
2065 max_attempts: 3,
2066 }),
2067 status => {
2068 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2069 // but we frequently get them in practice. See https://http.dev/529
2070 if status.as_u16() == 529 {
2071 Some(RetryStrategy::Fixed {
2072 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2073 max_attempts: MAX_RETRY_ATTEMPTS,
2074 })
2075 } else {
2076 Some(RetryStrategy::Fixed {
2077 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2078 max_attempts: 2,
2079 })
2080 }
2081 }
2082 },
2083 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2084 delay: BASE_RETRY_DELAY,
2085 max_attempts: 3,
2086 }),
2087 ApiReadResponseError { .. }
2088 | HttpSend { .. }
2089 | DeserializeResponse { .. }
2090 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2091 delay: BASE_RETRY_DELAY,
2092 max_attempts: 3,
2093 }),
2094 // Retrying these errors definitely shouldn't help.
2095 HttpResponseError {
2096 status_code:
2097 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2098 ..
2099 }
2100 | AuthenticationError { .. }
2101 | PermissionError { .. }
2102 | NoApiKey { .. }
2103 | ApiEndpointNotFound { .. }
2104 | PromptTooLarge { .. } => None,
2105 // These errors might be transient, so retry them
2106 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2107 delay: BASE_RETRY_DELAY,
2108 max_attempts: 1,
2109 }),
2110 // Retry all other 4xx and 5xx errors once.
2111 HttpResponseError { status_code, .. }
2112 if status_code.is_client_error() || status_code.is_server_error() =>
2113 {
2114 Some(RetryStrategy::Fixed {
2115 delay: BASE_RETRY_DELAY,
2116 max_attempts: 3,
2117 })
2118 }
2119 Other(err)
2120 if err.is::<language_model::PaymentRequiredError>()
2121 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2122 {
2123 // Retrying won't help for Payment Required or Model Request Limit errors (where
2124 // the user must upgrade to usage-based billing to get more requests, or else wait
2125 // for a significant amount of time for the request limit to reset).
2126 None
2127 }
2128 // Conservatively assume that any other errors are non-retryable
2129 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2130 delay: BASE_RETRY_DELAY,
2131 max_attempts: 2,
2132 }),
2133 }
2134 }
2135}
2136
2137struct RunningTurn {
2138 /// Holds the task that handles agent interaction until the end of the turn.
2139 /// Survives across multiple requests as the model performs tool calls and
2140 /// we run tools, report their results.
2141 _task: Task<()>,
2142 /// The current event stream for the running turn. Used to report a final
2143 /// cancellation event if we cancel the turn.
2144 event_stream: ThreadEventStream,
2145 /// The tools that were enabled for this turn.
2146 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2147}
2148
2149impl RunningTurn {
2150 fn cancel(self) {
2151 log::debug!("Cancelling in progress turn");
2152 self.event_stream.send_canceled();
2153 }
2154}
2155
2156pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2157
2158impl EventEmitter<TokenUsageUpdated> for Thread {}
2159
2160pub struct TitleUpdated;
2161
2162impl EventEmitter<TitleUpdated> for Thread {}
2163
2164pub trait AgentTool
2165where
2166 Self: 'static + Sized,
2167{
2168 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2169 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2170
2171 fn name() -> &'static str;
2172
2173 fn description() -> SharedString {
2174 let schema = schemars::schema_for!(Self::Input);
2175 SharedString::new(
2176 schema
2177 .get("description")
2178 .and_then(|description| description.as_str())
2179 .unwrap_or_default(),
2180 )
2181 }
2182
2183 fn kind() -> acp::ToolKind;
2184
2185 /// The initial tool title to display. Can be updated during the tool run.
2186 fn initial_title(
2187 &self,
2188 input: Result<Self::Input, serde_json::Value>,
2189 cx: &mut App,
2190 ) -> SharedString;
2191
2192 /// Returns the JSON schema that describes the tool's input.
2193 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2194 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2195 }
2196
2197 /// Some tools rely on a provider for the underlying billing or other reasons.
2198 /// Allow the tool to check if they are compatible, or should be filtered out.
2199 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2200 true
2201 }
2202
2203 /// Runs the tool with the provided input.
2204 fn run(
2205 self: Arc<Self>,
2206 input: Self::Input,
2207 event_stream: ToolCallEventStream,
2208 cx: &mut App,
2209 ) -> Task<Result<Self::Output>>;
2210
2211 /// Emits events for a previous execution of the tool.
2212 fn replay(
2213 &self,
2214 _input: Self::Input,
2215 _output: Self::Output,
2216 _event_stream: ToolCallEventStream,
2217 _cx: &mut App,
2218 ) -> Result<()> {
2219 Ok(())
2220 }
2221
2222 fn erase(self) -> Arc<dyn AnyAgentTool> {
2223 Arc::new(Erased(Arc::new(self)))
2224 }
2225}
2226
2227pub struct Erased<T>(T);
2228
2229pub struct AgentToolOutput {
2230 pub llm_output: LanguageModelToolResultContent,
2231 pub raw_output: serde_json::Value,
2232}
2233
2234pub trait AnyAgentTool {
2235 fn name(&self) -> SharedString;
2236 fn description(&self) -> SharedString;
2237 fn kind(&self) -> acp::ToolKind;
2238 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2239 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2240 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2241 true
2242 }
2243 fn run(
2244 self: Arc<Self>,
2245 input: serde_json::Value,
2246 event_stream: ToolCallEventStream,
2247 cx: &mut App,
2248 ) -> Task<Result<AgentToolOutput>>;
2249 fn replay(
2250 &self,
2251 input: serde_json::Value,
2252 output: serde_json::Value,
2253 event_stream: ToolCallEventStream,
2254 cx: &mut App,
2255 ) -> Result<()>;
2256}
2257
2258impl<T> AnyAgentTool for Erased<Arc<T>>
2259where
2260 T: AgentTool,
2261{
2262 fn name(&self) -> SharedString {
2263 T::name().into()
2264 }
2265
2266 fn description(&self) -> SharedString {
2267 T::description()
2268 }
2269
2270 fn kind(&self) -> agent_client_protocol::ToolKind {
2271 T::kind()
2272 }
2273
2274 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2275 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2276 self.0.initial_title(parsed_input, _cx)
2277 }
2278
2279 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2280 let mut json = serde_json::to_value(T::input_schema(format))?;
2281 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2282 Ok(json)
2283 }
2284
2285 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2286 T::supports_provider(provider)
2287 }
2288
2289 fn run(
2290 self: Arc<Self>,
2291 input: serde_json::Value,
2292 event_stream: ToolCallEventStream,
2293 cx: &mut App,
2294 ) -> Task<Result<AgentToolOutput>> {
2295 cx.spawn(async move |cx| {
2296 let input = serde_json::from_value(input)?;
2297 let output = cx
2298 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2299 .await?;
2300 let raw_output = serde_json::to_value(&output)?;
2301 Ok(AgentToolOutput {
2302 llm_output: output.into(),
2303 raw_output,
2304 })
2305 })
2306 }
2307
2308 fn replay(
2309 &self,
2310 input: serde_json::Value,
2311 output: serde_json::Value,
2312 event_stream: ToolCallEventStream,
2313 cx: &mut App,
2314 ) -> Result<()> {
2315 let input = serde_json::from_value(input)?;
2316 let output = serde_json::from_value(output)?;
2317 self.0.replay(input, output, event_stream, cx)
2318 }
2319}
2320
2321#[derive(Clone)]
2322struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2323
2324impl ThreadEventStream {
2325 fn send_user_message(&self, message: &UserMessage) {
2326 self.0
2327 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2328 .ok();
2329 }
2330
2331 fn send_text(&self, text: &str) {
2332 self.0
2333 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2334 .ok();
2335 }
2336
2337 fn send_thinking(&self, text: &str) {
2338 self.0
2339 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2340 .ok();
2341 }
2342
2343 fn send_tool_call(
2344 &self,
2345 id: &LanguageModelToolUseId,
2346 tool_name: &str,
2347 title: SharedString,
2348 kind: acp::ToolKind,
2349 input: serde_json::Value,
2350 ) {
2351 self.0
2352 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2353 id,
2354 tool_name,
2355 title.to_string(),
2356 kind,
2357 input,
2358 ))))
2359 .ok();
2360 }
2361
2362 fn initial_tool_call(
2363 id: &LanguageModelToolUseId,
2364 tool_name: &str,
2365 title: String,
2366 kind: acp::ToolKind,
2367 input: serde_json::Value,
2368 ) -> acp::ToolCall {
2369 acp::ToolCall::new(id.to_string(), title)
2370 .kind(kind)
2371 .raw_input(input)
2372 .meta(acp::Meta::from_iter([(
2373 "tool_name".into(),
2374 tool_name.into(),
2375 )]))
2376 }
2377
2378 fn update_tool_call_fields(
2379 &self,
2380 tool_use_id: &LanguageModelToolUseId,
2381 fields: acp::ToolCallUpdateFields,
2382 ) {
2383 self.0
2384 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2385 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2386 )))
2387 .ok();
2388 }
2389
2390 fn send_retry(&self, status: acp_thread::RetryStatus) {
2391 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2392 }
2393
2394 fn send_stop(&self, reason: acp::StopReason) {
2395 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2396 }
2397
2398 fn send_canceled(&self) {
2399 self.0
2400 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2401 .ok();
2402 }
2403
2404 fn send_error(&self, error: impl Into<anyhow::Error>) {
2405 self.0.unbounded_send(Err(error.into())).ok();
2406 }
2407}
2408
2409#[derive(Clone)]
2410pub struct ToolCallEventStream {
2411 tool_use_id: LanguageModelToolUseId,
2412 stream: ThreadEventStream,
2413 fs: Option<Arc<dyn Fs>>,
2414}
2415
2416impl ToolCallEventStream {
2417 #[cfg(any(test, feature = "test-support"))]
2418 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2419 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2420
2421 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2422
2423 (stream, ToolCallEventStreamReceiver(events_rx))
2424 }
2425
2426 fn new(
2427 tool_use_id: LanguageModelToolUseId,
2428 stream: ThreadEventStream,
2429 fs: Option<Arc<dyn Fs>>,
2430 ) -> Self {
2431 Self {
2432 tool_use_id,
2433 stream,
2434 fs,
2435 }
2436 }
2437
2438 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2439 self.stream
2440 .update_tool_call_fields(&self.tool_use_id, fields);
2441 }
2442
2443 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2444 self.stream
2445 .0
2446 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2447 acp_thread::ToolCallUpdateDiff {
2448 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2449 diff,
2450 }
2451 .into(),
2452 )))
2453 .ok();
2454 }
2455
2456 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2457 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2458 return Task::ready(Ok(()));
2459 }
2460
2461 let (response_tx, response_rx) = oneshot::channel();
2462 self.stream
2463 .0
2464 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2465 ToolCallAuthorization {
2466 tool_call: acp::ToolCallUpdate::new(
2467 self.tool_use_id.to_string(),
2468 acp::ToolCallUpdateFields::new().title(title.into()),
2469 ),
2470 options: vec![
2471 acp::PermissionOption::new(
2472 acp::PermissionOptionId::new("always_allow"),
2473 "Always Allow",
2474 acp::PermissionOptionKind::AllowAlways,
2475 ),
2476 acp::PermissionOption::new(
2477 acp::PermissionOptionId::new("allow"),
2478 "Allow",
2479 acp::PermissionOptionKind::AllowOnce,
2480 ),
2481 acp::PermissionOption::new(
2482 acp::PermissionOptionId::new("deny"),
2483 "Deny",
2484 acp::PermissionOptionKind::RejectOnce,
2485 ),
2486 ],
2487 response: response_tx,
2488 },
2489 )))
2490 .ok();
2491 let fs = self.fs.clone();
2492 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2493 "always_allow" => {
2494 if let Some(fs) = fs.clone() {
2495 cx.update(|cx| {
2496 update_settings_file(fs, cx, |settings, _| {
2497 settings
2498 .agent
2499 .get_or_insert_default()
2500 .set_always_allow_tool_actions(true);
2501 });
2502 })?;
2503 }
2504
2505 Ok(())
2506 }
2507 "allow" => Ok(()),
2508 _ => Err(anyhow!("Permission to run tool denied by user")),
2509 })
2510 }
2511}
2512
2513#[cfg(any(test, feature = "test-support"))]
2514pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2515
2516#[cfg(any(test, feature = "test-support"))]
2517impl ToolCallEventStreamReceiver {
2518 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2519 let event = self.0.next().await;
2520 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2521 auth
2522 } else {
2523 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2524 }
2525 }
2526
2527 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2528 let event = self.0.next().await;
2529 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2530 update,
2531 )))) = event
2532 {
2533 update.fields
2534 } else {
2535 panic!("Expected update fields but got: {:?}", event);
2536 }
2537 }
2538
2539 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2540 let event = self.0.next().await;
2541 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2542 update,
2543 )))) = event
2544 {
2545 update.diff
2546 } else {
2547 panic!("Expected diff but got: {:?}", event);
2548 }
2549 }
2550
2551 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2552 let event = self.0.next().await;
2553 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2554 update,
2555 )))) = event
2556 {
2557 update.terminal
2558 } else {
2559 panic!("Expected terminal but got: {:?}", event);
2560 }
2561 }
2562}
2563
2564#[cfg(any(test, feature = "test-support"))]
2565impl std::ops::Deref for ToolCallEventStreamReceiver {
2566 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2567
2568 fn deref(&self) -> &Self::Target {
2569 &self.0
2570 }
2571}
2572
2573#[cfg(any(test, feature = "test-support"))]
2574impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2575 fn deref_mut(&mut self) -> &mut Self::Target {
2576 &mut self.0
2577 }
2578}
2579
2580impl From<&str> for UserMessageContent {
2581 fn from(text: &str) -> Self {
2582 Self::Text(text.into())
2583 }
2584}
2585
2586impl UserMessageContent {
2587 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
2588 match value {
2589 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2590 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2591 acp::ContentBlock::Audio(_) => {
2592 // TODO
2593 Self::Text("[audio]".to_string())
2594 }
2595 acp::ContentBlock::ResourceLink(resource_link) => {
2596 match MentionUri::parse(&resource_link.uri, path_style) {
2597 Ok(uri) => Self::Mention {
2598 uri,
2599 content: String::new(),
2600 },
2601 Err(err) => {
2602 log::error!("Failed to parse mention link: {}", err);
2603 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2604 }
2605 }
2606 }
2607 acp::ContentBlock::Resource(resource) => match resource.resource {
2608 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2609 match MentionUri::parse(&resource.uri, path_style) {
2610 Ok(uri) => Self::Mention {
2611 uri,
2612 content: resource.text,
2613 },
2614 Err(err) => {
2615 log::error!("Failed to parse mention link: {}", err);
2616 Self::Text(
2617 MarkdownCodeBlock {
2618 tag: &resource.uri,
2619 text: &resource.text,
2620 }
2621 .to_string(),
2622 )
2623 }
2624 }
2625 }
2626 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2627 // TODO
2628 Self::Text("[blob]".to_string())
2629 }
2630 other => {
2631 log::warn!("Unexpected content type: {:?}", other);
2632 Self::Text("[unknown]".to_string())
2633 }
2634 },
2635 other => {
2636 log::warn!("Unexpected content type: {:?}", other);
2637 Self::Text("[unknown]".to_string())
2638 }
2639 }
2640 }
2641}
2642
2643impl From<UserMessageContent> for acp::ContentBlock {
2644 fn from(content: UserMessageContent) -> Self {
2645 match content {
2646 UserMessageContent::Text(text) => text.into(),
2647 UserMessageContent::Image(image) => {
2648 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
2649 }
2650 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
2651 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
2652 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
2653 )),
2654 ),
2655 }
2656 }
2657}
2658
2659fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2660 LanguageModelImage {
2661 source: image_content.data.into(),
2662 // TODO: make this optional?
2663 size: gpui::Size::new(0.into(), 0.into()),
2664 }
2665}