1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 SystemPromptTemplate, Template, Templates, TerminalTool, ThinkingTool, WebSearchTool,
6};
7use acp_thread::{MentionUri, UserMessageId};
8use action_log::ActionLog;
9
10use agent_client_protocol as acp;
11use agent_settings::{
12 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
13 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
14};
15use anyhow::{Context as _, Result, anyhow};
16use chrono::{DateTime, Utc};
17use client::{ModelRequestUsage, RequestUsage, UserStore};
18use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, Plan, UsageLimit};
19use collections::{HashMap, HashSet, IndexMap};
20use fs::Fs;
21use futures::stream;
22use futures::{
23 FutureExt,
24 channel::{mpsc, oneshot},
25 future::Shared,
26 stream::FuturesUnordered,
27};
28use gpui::{
29 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
30};
31use language_model::{
32 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
33 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
34 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
35 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
36 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
37 ZED_CLOUD_PROVIDER_ID,
38};
39use project::Project;
40use prompt_store::ProjectContext;
41use schemars::{JsonSchema, Schema};
42use serde::{Deserialize, Serialize};
43use settings::{LanguageModelSelection, Settings, update_settings_file};
44use smol::stream::StreamExt;
45use std::{
46 collections::BTreeMap,
47 ops::RangeInclusive,
48 path::Path,
49 rc::Rc,
50 sync::Arc,
51 time::{Duration, Instant},
52};
53use std::{fmt::Write, path::PathBuf};
54use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
55use uuid::Uuid;
56
57const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
58pub const MAX_TOOL_NAME_LENGTH: usize = 64;
59
60/// The ID of the user prompt that initiated a request.
61///
62/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
63#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
64pub struct PromptId(Arc<str>);
65
66impl PromptId {
67 pub fn new() -> Self {
68 Self(Uuid::new_v4().to_string().into())
69 }
70}
71
72impl std::fmt::Display for PromptId {
73 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
74 write!(f, "{}", self.0)
75 }
76}
77
78pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
79pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
80
81#[derive(Debug, Clone)]
82enum RetryStrategy {
83 ExponentialBackoff {
84 initial_delay: Duration,
85 max_attempts: u8,
86 },
87 Fixed {
88 delay: Duration,
89 max_attempts: u8,
90 },
91}
92
93#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
94pub enum Message {
95 User(UserMessage),
96 Agent(AgentMessage),
97 Resume,
98}
99
100impl Message {
101 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
102 match self {
103 Message::Agent(agent_message) => Some(agent_message),
104 _ => None,
105 }
106 }
107
108 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
109 match self {
110 Message::User(message) => vec![message.to_request()],
111 Message::Agent(message) => message.to_request(),
112 Message::Resume => vec![LanguageModelRequestMessage {
113 role: Role::User,
114 content: vec!["Continue where you left off".into()],
115 cache: false,
116 reasoning_details: None,
117 }],
118 }
119 }
120
121 pub fn to_markdown(&self) -> String {
122 match self {
123 Message::User(message) => message.to_markdown(),
124 Message::Agent(message) => message.to_markdown(),
125 Message::Resume => "[resume]\n".into(),
126 }
127 }
128
129 pub fn role(&self) -> Role {
130 match self {
131 Message::User(_) | Message::Resume => Role::User,
132 Message::Agent(_) => Role::Assistant,
133 }
134 }
135}
136
137#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
138pub struct UserMessage {
139 pub id: UserMessageId,
140 pub content: Vec<UserMessageContent>,
141}
142
143#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
144pub enum UserMessageContent {
145 Text(String),
146 Mention { uri: MentionUri, content: String },
147 Image(LanguageModelImage),
148}
149
150impl UserMessage {
151 pub fn to_markdown(&self) -> String {
152 let mut markdown = String::from("## User\n\n");
153
154 for content in &self.content {
155 match content {
156 UserMessageContent::Text(text) => {
157 markdown.push_str(text);
158 markdown.push('\n');
159 }
160 UserMessageContent::Image(_) => {
161 markdown.push_str("<image />\n");
162 }
163 UserMessageContent::Mention { uri, content } => {
164 if !content.is_empty() {
165 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
166 } else {
167 let _ = writeln!(&mut markdown, "{}", uri.as_link());
168 }
169 }
170 }
171 }
172
173 markdown
174 }
175
176 fn to_request(&self) -> LanguageModelRequestMessage {
177 let mut message = LanguageModelRequestMessage {
178 role: Role::User,
179 content: Vec::with_capacity(self.content.len()),
180 cache: false,
181 reasoning_details: None,
182 };
183
184 const OPEN_CONTEXT: &str = "<context>\n\
185 The following items were attached by the user. \
186 They are up-to-date and don't need to be re-read.\n\n";
187
188 const OPEN_FILES_TAG: &str = "<files>";
189 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
190 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
191 const OPEN_SELECTIONS_TAG: &str = "<selections>";
192 const OPEN_THREADS_TAG: &str = "<threads>";
193 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
194 const OPEN_RULES_TAG: &str =
195 "<rules>\nThe user has specified the following rules that should be applied:\n";
196
197 let mut file_context = OPEN_FILES_TAG.to_string();
198 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
199 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
200 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
201 let mut thread_context = OPEN_THREADS_TAG.to_string();
202 let mut fetch_context = OPEN_FETCH_TAG.to_string();
203 let mut rules_context = OPEN_RULES_TAG.to_string();
204
205 for chunk in &self.content {
206 let chunk = match chunk {
207 UserMessageContent::Text(text) => {
208 language_model::MessageContent::Text(text.clone())
209 }
210 UserMessageContent::Image(value) => {
211 language_model::MessageContent::Image(value.clone())
212 }
213 UserMessageContent::Mention { uri, content } => {
214 match uri {
215 MentionUri::File { abs_path } => {
216 write!(
217 &mut file_context,
218 "\n{}",
219 MarkdownCodeBlock {
220 tag: &codeblock_tag(abs_path, None),
221 text: &content.to_string(),
222 }
223 )
224 .ok();
225 }
226 MentionUri::PastedImage => {
227 debug_panic!("pasted image URI should not be used in mention content")
228 }
229 MentionUri::Directory { .. } => {
230 write!(&mut directory_context, "\n{}\n", content).ok();
231 }
232 MentionUri::Symbol {
233 abs_path: path,
234 line_range,
235 ..
236 } => {
237 write!(
238 &mut symbol_context,
239 "\n{}",
240 MarkdownCodeBlock {
241 tag: &codeblock_tag(path, Some(line_range)),
242 text: content
243 }
244 )
245 .ok();
246 }
247 MentionUri::Selection {
248 abs_path: path,
249 line_range,
250 ..
251 } => {
252 write!(
253 &mut selection_context,
254 "\n{}",
255 MarkdownCodeBlock {
256 tag: &codeblock_tag(
257 path.as_deref().unwrap_or("Untitled".as_ref()),
258 Some(line_range)
259 ),
260 text: content
261 }
262 )
263 .ok();
264 }
265 MentionUri::Thread { .. } => {
266 write!(&mut thread_context, "\n{}\n", content).ok();
267 }
268 MentionUri::TextThread { .. } => {
269 write!(&mut thread_context, "\n{}\n", content).ok();
270 }
271 MentionUri::Rule { .. } => {
272 write!(
273 &mut rules_context,
274 "\n{}",
275 MarkdownCodeBlock {
276 tag: "",
277 text: content
278 }
279 )
280 .ok();
281 }
282 MentionUri::Fetch { url } => {
283 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
284 }
285 }
286
287 language_model::MessageContent::Text(uri.as_link().to_string())
288 }
289 };
290
291 message.content.push(chunk);
292 }
293
294 let len_before_context = message.content.len();
295
296 if file_context.len() > OPEN_FILES_TAG.len() {
297 file_context.push_str("</files>\n");
298 message
299 .content
300 .push(language_model::MessageContent::Text(file_context));
301 }
302
303 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
304 directory_context.push_str("</directories>\n");
305 message
306 .content
307 .push(language_model::MessageContent::Text(directory_context));
308 }
309
310 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
311 symbol_context.push_str("</symbols>\n");
312 message
313 .content
314 .push(language_model::MessageContent::Text(symbol_context));
315 }
316
317 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
318 selection_context.push_str("</selections>\n");
319 message
320 .content
321 .push(language_model::MessageContent::Text(selection_context));
322 }
323
324 if thread_context.len() > OPEN_THREADS_TAG.len() {
325 thread_context.push_str("</threads>\n");
326 message
327 .content
328 .push(language_model::MessageContent::Text(thread_context));
329 }
330
331 if fetch_context.len() > OPEN_FETCH_TAG.len() {
332 fetch_context.push_str("</fetched_urls>\n");
333 message
334 .content
335 .push(language_model::MessageContent::Text(fetch_context));
336 }
337
338 if rules_context.len() > OPEN_RULES_TAG.len() {
339 rules_context.push_str("</user_rules>\n");
340 message
341 .content
342 .push(language_model::MessageContent::Text(rules_context));
343 }
344
345 if message.content.len() > len_before_context {
346 message.content.insert(
347 len_before_context,
348 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
349 );
350 message
351 .content
352 .push(language_model::MessageContent::Text("</context>".into()));
353 }
354
355 message
356 }
357}
358
359fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
360 let mut result = String::new();
361
362 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
363 let _ = write!(result, "{} ", extension);
364 }
365
366 let _ = write!(result, "{}", full_path.display());
367
368 if let Some(range) = line_range {
369 if range.start() == range.end() {
370 let _ = write!(result, ":{}", range.start() + 1);
371 } else {
372 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
373 }
374 }
375
376 result
377}
378
379impl AgentMessage {
380 pub fn to_markdown(&self) -> String {
381 let mut markdown = String::from("## Assistant\n\n");
382
383 for content in &self.content {
384 match content {
385 AgentMessageContent::Text(text) => {
386 markdown.push_str(text);
387 markdown.push('\n');
388 }
389 AgentMessageContent::Thinking { text, .. } => {
390 markdown.push_str("<think>");
391 markdown.push_str(text);
392 markdown.push_str("</think>\n");
393 }
394 AgentMessageContent::RedactedThinking(_) => {
395 markdown.push_str("<redacted_thinking />\n")
396 }
397 AgentMessageContent::ToolUse(tool_use) => {
398 markdown.push_str(&format!(
399 "**Tool Use**: {} (ID: {})\n",
400 tool_use.name, tool_use.id
401 ));
402 markdown.push_str(&format!(
403 "{}\n",
404 MarkdownCodeBlock {
405 tag: "json",
406 text: &format!("{:#}", tool_use.input)
407 }
408 ));
409 }
410 }
411 }
412
413 for tool_result in self.tool_results.values() {
414 markdown.push_str(&format!(
415 "**Tool Result**: {} (ID: {})\n\n",
416 tool_result.tool_name, tool_result.tool_use_id
417 ));
418 if tool_result.is_error {
419 markdown.push_str("**ERROR:**\n");
420 }
421
422 match &tool_result.content {
423 LanguageModelToolResultContent::Text(text) => {
424 writeln!(markdown, "{text}\n").ok();
425 }
426 LanguageModelToolResultContent::Image(_) => {
427 writeln!(markdown, "<image />\n").ok();
428 }
429 }
430
431 if let Some(output) = tool_result.output.as_ref() {
432 writeln!(
433 markdown,
434 "**Debug Output**:\n\n```json\n{}\n```\n",
435 serde_json::to_string_pretty(output).unwrap()
436 )
437 .unwrap();
438 }
439 }
440
441 markdown
442 }
443
444 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
445 let mut assistant_message = LanguageModelRequestMessage {
446 role: Role::Assistant,
447 content: Vec::with_capacity(self.content.len()),
448 cache: false,
449 reasoning_details: self.reasoning_details.clone(),
450 };
451 for chunk in &self.content {
452 match chunk {
453 AgentMessageContent::Text(text) => {
454 assistant_message
455 .content
456 .push(language_model::MessageContent::Text(text.clone()));
457 }
458 AgentMessageContent::Thinking { text, signature } => {
459 assistant_message
460 .content
461 .push(language_model::MessageContent::Thinking {
462 text: text.clone(),
463 signature: signature.clone(),
464 });
465 }
466 AgentMessageContent::RedactedThinking(value) => {
467 assistant_message.content.push(
468 language_model::MessageContent::RedactedThinking(value.clone()),
469 );
470 }
471 AgentMessageContent::ToolUse(tool_use) => {
472 if self.tool_results.contains_key(&tool_use.id) {
473 assistant_message
474 .content
475 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
476 }
477 }
478 };
479 }
480
481 let mut user_message = LanguageModelRequestMessage {
482 role: Role::User,
483 content: Vec::new(),
484 cache: false,
485 reasoning_details: None,
486 };
487
488 for tool_result in self.tool_results.values() {
489 let mut tool_result = tool_result.clone();
490 // Surprisingly, the API fails if we return an empty string here.
491 // It thinks we are sending a tool use without a tool result.
492 if tool_result.content.is_empty() {
493 tool_result.content = "<Tool returned an empty string>".into();
494 }
495 user_message
496 .content
497 .push(language_model::MessageContent::ToolResult(tool_result));
498 }
499
500 let mut messages = Vec::new();
501 if !assistant_message.content.is_empty() {
502 messages.push(assistant_message);
503 }
504 if !user_message.content.is_empty() {
505 messages.push(user_message);
506 }
507 messages
508 }
509}
510
511#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
512pub struct AgentMessage {
513 pub content: Vec<AgentMessageContent>,
514 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
515 pub reasoning_details: Option<serde_json::Value>,
516}
517
518#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
519pub enum AgentMessageContent {
520 Text(String),
521 Thinking {
522 text: String,
523 signature: Option<String>,
524 },
525 RedactedThinking(String),
526 ToolUse(LanguageModelToolUse),
527}
528
529pub trait TerminalHandle {
530 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
531 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
532 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
533}
534
535pub trait ThreadEnvironment {
536 fn create_terminal(
537 &self,
538 command: String,
539 cwd: Option<PathBuf>,
540 output_byte_limit: Option<u64>,
541 cx: &mut AsyncApp,
542 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
543}
544
545#[derive(Debug)]
546pub enum ThreadEvent {
547 UserMessage(UserMessage),
548 AgentText(String),
549 AgentThinking(String),
550 ToolCall(acp::ToolCall),
551 ToolCallUpdate(acp_thread::ToolCallUpdate),
552 ToolCallAuthorization(ToolCallAuthorization),
553 Retry(acp_thread::RetryStatus),
554 Stop(acp::StopReason),
555}
556
557#[derive(Debug)]
558pub struct NewTerminal {
559 pub command: String,
560 pub output_byte_limit: Option<u64>,
561 pub cwd: Option<PathBuf>,
562 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
563}
564
565#[derive(Debug)]
566pub struct ToolCallAuthorization {
567 pub tool_call: acp::ToolCallUpdate,
568 pub options: Vec<acp::PermissionOption>,
569 pub response: oneshot::Sender<acp::PermissionOptionId>,
570}
571
572#[derive(Debug, thiserror::Error)]
573enum CompletionError {
574 #[error("max tokens")]
575 MaxTokens,
576 #[error("refusal")]
577 Refusal,
578 #[error(transparent)]
579 Other(#[from] anyhow::Error),
580}
581
582pub struct Thread {
583 id: acp::SessionId,
584 prompt_id: PromptId,
585 updated_at: DateTime<Utc>,
586 title: Option<SharedString>,
587 pending_title_generation: Option<Task<()>>,
588 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
589 summary: Option<SharedString>,
590 messages: Vec<Message>,
591 user_store: Entity<UserStore>,
592 completion_mode: CompletionMode,
593 /// Holds the task that handles agent interaction until the end of the turn.
594 /// Survives across multiple requests as the model performs tool calls and
595 /// we run tools, report their results.
596 running_turn: Option<RunningTurn>,
597 pending_message: Option<AgentMessage>,
598 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
599 tool_use_limit_reached: bool,
600 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
601 #[allow(unused)]
602 cumulative_token_usage: TokenUsage,
603 #[allow(unused)]
604 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
605 context_server_registry: Entity<ContextServerRegistry>,
606 profile_id: AgentProfileId,
607 project_context: Entity<ProjectContext>,
608 templates: Arc<Templates>,
609 model: Option<Arc<dyn LanguageModel>>,
610 summarization_model: Option<Arc<dyn LanguageModel>>,
611 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
612 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
613 pub(crate) project: Entity<Project>,
614 pub(crate) action_log: Entity<ActionLog>,
615 /// Tracks the last time files were read by the agent, to detect external modifications
616 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
617}
618
619impl Thread {
620 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
621 let image = model.map_or(true, |model| model.supports_images());
622 acp::PromptCapabilities {
623 meta: None,
624 image,
625 audio: false,
626 embedded_context: true,
627 }
628 }
629
630 pub fn new(
631 project: Entity<Project>,
632 project_context: Entity<ProjectContext>,
633 context_server_registry: Entity<ContextServerRegistry>,
634 templates: Arc<Templates>,
635 model: Option<Arc<dyn LanguageModel>>,
636 cx: &mut Context<Self>,
637 ) -> Self {
638 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
639 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
640 let (prompt_capabilities_tx, prompt_capabilities_rx) =
641 watch::channel(Self::prompt_capabilities(model.as_deref()));
642 Self {
643 id: acp::SessionId(uuid::Uuid::new_v4().to_string().into()),
644 prompt_id: PromptId::new(),
645 updated_at: Utc::now(),
646 title: None,
647 pending_title_generation: None,
648 pending_summary_generation: None,
649 summary: None,
650 messages: Vec::new(),
651 user_store: project.read(cx).user_store(),
652 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
653 running_turn: None,
654 pending_message: None,
655 tools: BTreeMap::default(),
656 tool_use_limit_reached: false,
657 request_token_usage: HashMap::default(),
658 cumulative_token_usage: TokenUsage::default(),
659 initial_project_snapshot: {
660 let project_snapshot = Self::project_snapshot(project.clone(), cx);
661 cx.foreground_executor()
662 .spawn(async move { Some(project_snapshot.await) })
663 .shared()
664 },
665 context_server_registry,
666 profile_id,
667 project_context,
668 templates,
669 model,
670 summarization_model: None,
671 prompt_capabilities_tx,
672 prompt_capabilities_rx,
673 project,
674 action_log,
675 file_read_times: HashMap::default(),
676 }
677 }
678
679 pub fn id(&self) -> &acp::SessionId {
680 &self.id
681 }
682
683 pub fn replay(
684 &mut self,
685 cx: &mut Context<Self>,
686 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
687 let (tx, rx) = mpsc::unbounded();
688 let stream = ThreadEventStream(tx);
689 for message in &self.messages {
690 match message {
691 Message::User(user_message) => stream.send_user_message(user_message),
692 Message::Agent(assistant_message) => {
693 for content in &assistant_message.content {
694 match content {
695 AgentMessageContent::Text(text) => stream.send_text(text),
696 AgentMessageContent::Thinking { text, .. } => {
697 stream.send_thinking(text)
698 }
699 AgentMessageContent::RedactedThinking(_) => {}
700 AgentMessageContent::ToolUse(tool_use) => {
701 self.replay_tool_call(
702 tool_use,
703 assistant_message.tool_results.get(&tool_use.id),
704 &stream,
705 cx,
706 );
707 }
708 }
709 }
710 }
711 Message::Resume => {}
712 }
713 }
714 rx
715 }
716
717 fn replay_tool_call(
718 &self,
719 tool_use: &LanguageModelToolUse,
720 tool_result: Option<&LanguageModelToolResult>,
721 stream: &ThreadEventStream,
722 cx: &mut Context<Self>,
723 ) {
724 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
725 self.context_server_registry
726 .read(cx)
727 .servers()
728 .find_map(|(_, tools)| {
729 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
730 Some(tool.clone())
731 } else {
732 None
733 }
734 })
735 });
736
737 let Some(tool) = tool else {
738 stream
739 .0
740 .unbounded_send(Ok(ThreadEvent::ToolCall(acp::ToolCall {
741 meta: None,
742 id: acp::ToolCallId(tool_use.id.to_string().into()),
743 title: tool_use.name.to_string(),
744 kind: acp::ToolKind::Other,
745 status: acp::ToolCallStatus::Failed,
746 content: Vec::new(),
747 locations: Vec::new(),
748 raw_input: Some(tool_use.input.clone()),
749 raw_output: None,
750 })))
751 .ok();
752 return;
753 };
754
755 let title = tool.initial_title(tool_use.input.clone(), cx);
756 let kind = tool.kind();
757 stream.send_tool_call(
758 &tool_use.id,
759 &tool_use.name,
760 title,
761 kind,
762 tool_use.input.clone(),
763 );
764
765 let output = tool_result
766 .as_ref()
767 .and_then(|result| result.output.clone());
768 if let Some(output) = output.clone() {
769 let tool_event_stream = ToolCallEventStream::new(
770 tool_use.id.clone(),
771 stream.clone(),
772 Some(self.project.read(cx).fs().clone()),
773 );
774 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
775 .log_err();
776 }
777
778 stream.update_tool_call_fields(
779 &tool_use.id,
780 acp::ToolCallUpdateFields {
781 status: Some(
782 tool_result
783 .as_ref()
784 .map_or(acp::ToolCallStatus::Failed, |result| {
785 if result.is_error {
786 acp::ToolCallStatus::Failed
787 } else {
788 acp::ToolCallStatus::Completed
789 }
790 }),
791 ),
792 raw_output: output,
793 ..Default::default()
794 },
795 );
796 }
797
798 pub fn from_db(
799 id: acp::SessionId,
800 db_thread: DbThread,
801 project: Entity<Project>,
802 project_context: Entity<ProjectContext>,
803 context_server_registry: Entity<ContextServerRegistry>,
804 templates: Arc<Templates>,
805 cx: &mut Context<Self>,
806 ) -> Self {
807 let profile_id = db_thread
808 .profile
809 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
810
811 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
812 db_thread
813 .model
814 .and_then(|model| {
815 let model = SelectedModel {
816 provider: model.provider.clone().into(),
817 model: model.model.into(),
818 };
819 registry.select_model(&model, cx)
820 })
821 .or_else(|| registry.default_model())
822 .map(|model| model.model)
823 });
824
825 if model.is_none() {
826 model = Self::resolve_profile_model(&profile_id, cx);
827 }
828 if model.is_none() {
829 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
830 registry.default_model().map(|model| model.model)
831 });
832 }
833
834 let (prompt_capabilities_tx, prompt_capabilities_rx) =
835 watch::channel(Self::prompt_capabilities(model.as_deref()));
836
837 let action_log = cx.new(|_| ActionLog::new(project.clone()));
838
839 Self {
840 id,
841 prompt_id: PromptId::new(),
842 title: if db_thread.title.is_empty() {
843 None
844 } else {
845 Some(db_thread.title.clone())
846 },
847 pending_title_generation: None,
848 pending_summary_generation: None,
849 summary: db_thread.detailed_summary,
850 messages: db_thread.messages,
851 user_store: project.read(cx).user_store(),
852 completion_mode: db_thread.completion_mode.unwrap_or_default(),
853 running_turn: None,
854 pending_message: None,
855 tools: BTreeMap::default(),
856 tool_use_limit_reached: false,
857 request_token_usage: db_thread.request_token_usage.clone(),
858 cumulative_token_usage: db_thread.cumulative_token_usage,
859 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
860 context_server_registry,
861 profile_id,
862 project_context,
863 templates,
864 model,
865 summarization_model: None,
866 project,
867 action_log,
868 updated_at: db_thread.updated_at,
869 prompt_capabilities_tx,
870 prompt_capabilities_rx,
871 file_read_times: HashMap::default(),
872 }
873 }
874
875 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
876 let initial_project_snapshot = self.initial_project_snapshot.clone();
877 let mut thread = DbThread {
878 title: self.title(),
879 messages: self.messages.clone(),
880 updated_at: self.updated_at,
881 detailed_summary: self.summary.clone(),
882 initial_project_snapshot: None,
883 cumulative_token_usage: self.cumulative_token_usage,
884 request_token_usage: self.request_token_usage.clone(),
885 model: self.model.as_ref().map(|model| DbLanguageModel {
886 provider: model.provider_id().to_string(),
887 model: model.name().0.to_string(),
888 }),
889 completion_mode: Some(self.completion_mode),
890 profile: Some(self.profile_id.clone()),
891 };
892
893 cx.background_spawn(async move {
894 let initial_project_snapshot = initial_project_snapshot.await;
895 thread.initial_project_snapshot = initial_project_snapshot;
896 thread
897 })
898 }
899
900 /// Create a snapshot of the current project state including git information and unsaved buffers.
901 fn project_snapshot(
902 project: Entity<Project>,
903 cx: &mut Context<Self>,
904 ) -> Task<Arc<ProjectSnapshot>> {
905 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
906 cx.spawn(async move |_, _| {
907 let snapshot = task.await;
908
909 Arc::new(ProjectSnapshot {
910 worktree_snapshots: snapshot.worktree_snapshots,
911 timestamp: Utc::now(),
912 })
913 })
914 }
915
916 pub fn project_context(&self) -> &Entity<ProjectContext> {
917 &self.project_context
918 }
919
920 pub fn project(&self) -> &Entity<Project> {
921 &self.project
922 }
923
924 pub fn action_log(&self) -> &Entity<ActionLog> {
925 &self.action_log
926 }
927
928 pub fn is_empty(&self) -> bool {
929 self.messages.is_empty() && self.title.is_none()
930 }
931
932 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
933 self.model.as_ref()
934 }
935
936 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
937 let old_usage = self.latest_token_usage();
938 self.model = Some(model);
939 let new_caps = Self::prompt_capabilities(self.model.as_deref());
940 let new_usage = self.latest_token_usage();
941 if old_usage != new_usage {
942 cx.emit(TokenUsageUpdated(new_usage));
943 }
944 self.prompt_capabilities_tx.send(new_caps).log_err();
945 cx.notify()
946 }
947
948 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
949 self.summarization_model.as_ref()
950 }
951
952 pub fn set_summarization_model(
953 &mut self,
954 model: Option<Arc<dyn LanguageModel>>,
955 cx: &mut Context<Self>,
956 ) {
957 self.summarization_model = model;
958 cx.notify()
959 }
960
961 pub fn completion_mode(&self) -> CompletionMode {
962 self.completion_mode
963 }
964
965 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
966 let old_usage = self.latest_token_usage();
967 self.completion_mode = mode;
968 let new_usage = self.latest_token_usage();
969 if old_usage != new_usage {
970 cx.emit(TokenUsageUpdated(new_usage));
971 }
972 cx.notify()
973 }
974
975 #[cfg(any(test, feature = "test-support"))]
976 pub fn last_message(&self) -> Option<Message> {
977 if let Some(message) = self.pending_message.clone() {
978 Some(Message::Agent(message))
979 } else {
980 self.messages.last().cloned()
981 }
982 }
983
984 pub fn add_default_tools(
985 &mut self,
986 environment: Rc<dyn ThreadEnvironment>,
987 cx: &mut Context<Self>,
988 ) {
989 let language_registry = self.project.read(cx).languages().clone();
990 self.add_tool(CopyPathTool::new(self.project.clone()));
991 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
992 self.add_tool(DeletePathTool::new(
993 self.project.clone(),
994 self.action_log.clone(),
995 ));
996 self.add_tool(DiagnosticsTool::new(self.project.clone()));
997 self.add_tool(EditFileTool::new(
998 self.project.clone(),
999 cx.weak_entity(),
1000 language_registry,
1001 Templates::new(),
1002 ));
1003 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1004 self.add_tool(FindPathTool::new(self.project.clone()));
1005 self.add_tool(GrepTool::new(self.project.clone()));
1006 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1007 self.add_tool(MovePathTool::new(self.project.clone()));
1008 self.add_tool(NowTool);
1009 self.add_tool(OpenTool::new(self.project.clone()));
1010 self.add_tool(ReadFileTool::new(
1011 cx.weak_entity(),
1012 self.project.clone(),
1013 self.action_log.clone(),
1014 ));
1015 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1016 self.add_tool(ThinkingTool);
1017 self.add_tool(WebSearchTool);
1018 }
1019
1020 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1021 self.tools.insert(T::name().into(), tool.erase());
1022 }
1023
1024 pub fn remove_tool(&mut self, name: &str) -> bool {
1025 self.tools.remove(name).is_some()
1026 }
1027
1028 pub fn profile(&self) -> &AgentProfileId {
1029 &self.profile_id
1030 }
1031
1032 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1033 if self.profile_id == profile_id {
1034 return;
1035 }
1036
1037 self.profile_id = profile_id;
1038
1039 // Swap to the profile's preferred model when available.
1040 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1041 self.set_model(model, cx);
1042 }
1043 }
1044
1045 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1046 if let Some(running_turn) = self.running_turn.take() {
1047 running_turn.cancel();
1048 }
1049 self.flush_pending_message(cx);
1050 }
1051
1052 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1053 let Some(last_user_message) = self.last_user_message() else {
1054 return;
1055 };
1056
1057 self.request_token_usage
1058 .insert(last_user_message.id.clone(), update);
1059 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1060 cx.notify();
1061 }
1062
1063 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1064 self.cancel(cx);
1065 let Some(position) = self.messages.iter().position(
1066 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1067 ) else {
1068 return Err(anyhow!("Message not found"));
1069 };
1070
1071 for message in self.messages.drain(position..) {
1072 match message {
1073 Message::User(message) => {
1074 self.request_token_usage.remove(&message.id);
1075 }
1076 Message::Agent(_) | Message::Resume => {}
1077 }
1078 }
1079 self.clear_summary();
1080 cx.notify();
1081 Ok(())
1082 }
1083
1084 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1085 let last_user_message = self.last_user_message()?;
1086 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1087 Some(*tokens)
1088 }
1089
1090 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1091 let usage = self.latest_request_token_usage()?;
1092 let model = self.model.clone()?;
1093 Some(acp_thread::TokenUsage {
1094 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1095 used_tokens: usage.total_tokens(),
1096 })
1097 }
1098
1099 /// Look up the active profile and resolve its preferred model if one is configured.
1100 fn resolve_profile_model(
1101 profile_id: &AgentProfileId,
1102 cx: &mut Context<Self>,
1103 ) -> Option<Arc<dyn LanguageModel>> {
1104 let selection = AgentSettings::get_global(cx)
1105 .profiles
1106 .get(profile_id)?
1107 .default_model
1108 .clone()?;
1109 Self::resolve_model_from_selection(&selection, cx)
1110 }
1111
1112 /// Translate a stored model selection into the configured model from the registry.
1113 fn resolve_model_from_selection(
1114 selection: &LanguageModelSelection,
1115 cx: &mut Context<Self>,
1116 ) -> Option<Arc<dyn LanguageModel>> {
1117 let selected = SelectedModel {
1118 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1119 model: LanguageModelId::from(selection.model.clone()),
1120 };
1121 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1122 registry
1123 .select_model(&selected, cx)
1124 .map(|configured| configured.model)
1125 })
1126 }
1127
1128 pub fn resume(
1129 &mut self,
1130 cx: &mut Context<Self>,
1131 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1132 self.messages.push(Message::Resume);
1133 cx.notify();
1134
1135 log::debug!("Total messages in thread: {}", self.messages.len());
1136 self.run_turn(cx)
1137 }
1138
1139 /// Sending a message results in the model streaming a response, which could include tool calls.
1140 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1141 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1142 pub fn send<T>(
1143 &mut self,
1144 id: UserMessageId,
1145 content: impl IntoIterator<Item = T>,
1146 cx: &mut Context<Self>,
1147 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1148 where
1149 T: Into<UserMessageContent>,
1150 {
1151 let model = self.model().context("No language model configured")?;
1152
1153 log::info!("Thread::send called with model: {}", model.name().0);
1154 self.advance_prompt_id();
1155
1156 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1157 log::debug!("Thread::send content: {:?}", content);
1158
1159 self.messages
1160 .push(Message::User(UserMessage { id, content }));
1161 cx.notify();
1162
1163 log::debug!("Total messages in thread: {}", self.messages.len());
1164 self.run_turn(cx)
1165 }
1166
1167 #[cfg(feature = "eval")]
1168 pub fn proceed(
1169 &mut self,
1170 cx: &mut Context<Self>,
1171 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1172 self.run_turn(cx)
1173 }
1174
1175 fn run_turn(
1176 &mut self,
1177 cx: &mut Context<Self>,
1178 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1179 self.cancel(cx);
1180
1181 let model = self.model.clone().context("No language model configured")?;
1182 let profile = AgentSettings::get_global(cx)
1183 .profiles
1184 .get(&self.profile_id)
1185 .context("Profile not found")?;
1186 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1187 let event_stream = ThreadEventStream(events_tx);
1188 let message_ix = self.messages.len().saturating_sub(1);
1189 self.tool_use_limit_reached = false;
1190 self.clear_summary();
1191 self.running_turn = Some(RunningTurn {
1192 event_stream: event_stream.clone(),
1193 tools: self.enabled_tools(profile, &model, cx),
1194 _task: cx.spawn(async move |this, cx| {
1195 log::debug!("Starting agent turn execution");
1196
1197 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1198 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1199
1200 match turn_result {
1201 Ok(()) => {
1202 log::debug!("Turn execution completed");
1203 event_stream.send_stop(acp::StopReason::EndTurn);
1204 }
1205 Err(error) => {
1206 log::error!("Turn execution failed: {:?}", error);
1207 match error.downcast::<CompletionError>() {
1208 Ok(CompletionError::Refusal) => {
1209 event_stream.send_stop(acp::StopReason::Refusal);
1210 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1211 }
1212 Ok(CompletionError::MaxTokens) => {
1213 event_stream.send_stop(acp::StopReason::MaxTokens);
1214 }
1215 Ok(CompletionError::Other(error)) | Err(error) => {
1216 event_stream.send_error(error);
1217 }
1218 }
1219 }
1220 }
1221
1222 _ = this.update(cx, |this, _| this.running_turn.take());
1223 }),
1224 });
1225 Ok(events_rx)
1226 }
1227
1228 async fn run_turn_internal(
1229 this: &WeakEntity<Self>,
1230 model: Arc<dyn LanguageModel>,
1231 event_stream: &ThreadEventStream,
1232 cx: &mut AsyncApp,
1233 ) -> Result<()> {
1234 let mut attempt = 0;
1235 let mut intent = CompletionIntent::UserPrompt;
1236 loop {
1237 let request =
1238 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1239
1240 telemetry::event!(
1241 "Agent Thread Completion",
1242 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1243 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1244 model = model.telemetry_id(),
1245 model_provider = model.provider_id().to_string(),
1246 attempt
1247 );
1248
1249 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1250
1251 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1252 Ok(events) => (events, None),
1253 Err(err) => (stream::empty().boxed(), Some(err)),
1254 };
1255 let mut tool_results = FuturesUnordered::new();
1256 while let Some(event) = events.next().await {
1257 log::trace!("Received completion event: {:?}", event);
1258 match event {
1259 Ok(event) => {
1260 tool_results.extend(this.update(cx, |this, cx| {
1261 this.handle_completion_event(event, event_stream, cx)
1262 })??);
1263 }
1264 Err(err) => {
1265 error = Some(err);
1266 break;
1267 }
1268 }
1269 }
1270
1271 let end_turn = tool_results.is_empty();
1272 while let Some(tool_result) = tool_results.next().await {
1273 log::debug!("Tool finished {:?}", tool_result);
1274
1275 event_stream.update_tool_call_fields(
1276 &tool_result.tool_use_id,
1277 acp::ToolCallUpdateFields {
1278 status: Some(if tool_result.is_error {
1279 acp::ToolCallStatus::Failed
1280 } else {
1281 acp::ToolCallStatus::Completed
1282 }),
1283 raw_output: tool_result.output.clone(),
1284 ..Default::default()
1285 },
1286 );
1287 this.update(cx, |this, _cx| {
1288 this.pending_message()
1289 .tool_results
1290 .insert(tool_result.tool_use_id.clone(), tool_result);
1291 })?;
1292 }
1293
1294 this.update(cx, |this, cx| {
1295 this.flush_pending_message(cx);
1296 if this.title.is_none() && this.pending_title_generation.is_none() {
1297 this.generate_title(cx);
1298 }
1299 })?;
1300
1301 if let Some(error) = error {
1302 attempt += 1;
1303 let retry = this.update(cx, |this, cx| {
1304 let user_store = this.user_store.read(cx);
1305 this.handle_completion_error(error, attempt, user_store.plan())
1306 })??;
1307 let timer = cx.background_executor().timer(retry.duration);
1308 event_stream.send_retry(retry);
1309 timer.await;
1310 this.update(cx, |this, _cx| {
1311 if let Some(Message::Agent(message)) = this.messages.last() {
1312 if message.tool_results.is_empty() {
1313 intent = CompletionIntent::UserPrompt;
1314 this.messages.push(Message::Resume);
1315 }
1316 }
1317 })?;
1318 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1319 return Err(language_model::ToolUseLimitReachedError.into());
1320 } else if end_turn {
1321 return Ok(());
1322 } else {
1323 intent = CompletionIntent::ToolResults;
1324 attempt = 0;
1325 }
1326 }
1327 }
1328
1329 fn handle_completion_error(
1330 &mut self,
1331 error: LanguageModelCompletionError,
1332 attempt: u8,
1333 plan: Option<Plan>,
1334 ) -> Result<acp_thread::RetryStatus> {
1335 let Some(model) = self.model.as_ref() else {
1336 return Err(anyhow!(error));
1337 };
1338
1339 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1340 match plan {
1341 Some(Plan::V2(_)) => true,
1342 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1343 None => false,
1344 }
1345 } else {
1346 true
1347 };
1348
1349 if !auto_retry {
1350 return Err(anyhow!(error));
1351 }
1352
1353 let Some(strategy) = Self::retry_strategy_for(&error) else {
1354 return Err(anyhow!(error));
1355 };
1356
1357 let max_attempts = match &strategy {
1358 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1359 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1360 };
1361
1362 if attempt > max_attempts {
1363 return Err(anyhow!(error));
1364 }
1365
1366 let delay = match &strategy {
1367 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1368 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1369 Duration::from_secs(delay_secs)
1370 }
1371 RetryStrategy::Fixed { delay, .. } => *delay,
1372 };
1373 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1374
1375 Ok(acp_thread::RetryStatus {
1376 last_error: error.to_string().into(),
1377 attempt: attempt as usize,
1378 max_attempts: max_attempts as usize,
1379 started_at: Instant::now(),
1380 duration: delay,
1381 })
1382 }
1383
1384 /// A helper method that's called on every streamed completion event.
1385 /// Returns an optional tool result task, which the main agentic loop will
1386 /// send back to the model when it resolves.
1387 fn handle_completion_event(
1388 &mut self,
1389 event: LanguageModelCompletionEvent,
1390 event_stream: &ThreadEventStream,
1391 cx: &mut Context<Self>,
1392 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1393 log::trace!("Handling streamed completion event: {:?}", event);
1394 use LanguageModelCompletionEvent::*;
1395
1396 match event {
1397 StartMessage { .. } => {
1398 self.flush_pending_message(cx);
1399 self.pending_message = Some(AgentMessage::default());
1400 }
1401 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1402 Thinking { text, signature } => {
1403 self.handle_thinking_event(text, signature, event_stream, cx)
1404 }
1405 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1406 ReasoningDetails(details) => {
1407 let last_message = self.pending_message();
1408 // Store the last non-empty reasoning_details (overwrites earlier ones)
1409 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1410 if let serde_json::Value::Array(ref arr) = details {
1411 if !arr.is_empty() {
1412 last_message.reasoning_details = Some(details);
1413 }
1414 } else {
1415 last_message.reasoning_details = Some(details);
1416 }
1417 }
1418 ToolUse(tool_use) => {
1419 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1420 }
1421 ToolUseJsonParseError {
1422 id,
1423 tool_name,
1424 raw_input,
1425 json_parse_error,
1426 } => {
1427 return Ok(Some(Task::ready(
1428 self.handle_tool_use_json_parse_error_event(
1429 id,
1430 tool_name,
1431 raw_input,
1432 json_parse_error,
1433 ),
1434 )));
1435 }
1436 UsageUpdate(usage) => {
1437 telemetry::event!(
1438 "Agent Thread Completion Usage Updated",
1439 thread_id = self.id.to_string(),
1440 prompt_id = self.prompt_id.to_string(),
1441 model = self.model.as_ref().map(|m| m.telemetry_id()),
1442 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1443 input_tokens = usage.input_tokens,
1444 output_tokens = usage.output_tokens,
1445 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1446 cache_read_input_tokens = usage.cache_read_input_tokens,
1447 );
1448 self.update_token_usage(usage, cx);
1449 }
1450 StatusUpdate(CompletionRequestStatus::UsageUpdated { amount, limit }) => {
1451 self.update_model_request_usage(amount, limit, cx);
1452 }
1453 StatusUpdate(
1454 CompletionRequestStatus::Started
1455 | CompletionRequestStatus::Queued { .. }
1456 | CompletionRequestStatus::Failed { .. },
1457 ) => {}
1458 StatusUpdate(CompletionRequestStatus::ToolUseLimitReached) => {
1459 self.tool_use_limit_reached = true;
1460 }
1461 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1462 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1463 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1464 }
1465
1466 Ok(None)
1467 }
1468
1469 fn handle_text_event(
1470 &mut self,
1471 new_text: String,
1472 event_stream: &ThreadEventStream,
1473 cx: &mut Context<Self>,
1474 ) {
1475 event_stream.send_text(&new_text);
1476
1477 let last_message = self.pending_message();
1478 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1479 text.push_str(&new_text);
1480 } else {
1481 last_message
1482 .content
1483 .push(AgentMessageContent::Text(new_text));
1484 }
1485
1486 cx.notify();
1487 }
1488
1489 fn handle_thinking_event(
1490 &mut self,
1491 new_text: String,
1492 new_signature: Option<String>,
1493 event_stream: &ThreadEventStream,
1494 cx: &mut Context<Self>,
1495 ) {
1496 event_stream.send_thinking(&new_text);
1497
1498 let last_message = self.pending_message();
1499 if let Some(AgentMessageContent::Thinking { text, signature }) =
1500 last_message.content.last_mut()
1501 {
1502 text.push_str(&new_text);
1503 *signature = new_signature.or(signature.take());
1504 } else {
1505 last_message.content.push(AgentMessageContent::Thinking {
1506 text: new_text,
1507 signature: new_signature,
1508 });
1509 }
1510
1511 cx.notify();
1512 }
1513
1514 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1515 let last_message = self.pending_message();
1516 last_message
1517 .content
1518 .push(AgentMessageContent::RedactedThinking(data));
1519 cx.notify();
1520 }
1521
1522 fn handle_tool_use_event(
1523 &mut self,
1524 tool_use: LanguageModelToolUse,
1525 event_stream: &ThreadEventStream,
1526 cx: &mut Context<Self>,
1527 ) -> Option<Task<LanguageModelToolResult>> {
1528 cx.notify();
1529
1530 let tool = self.tool(tool_use.name.as_ref());
1531 let mut title = SharedString::from(&tool_use.name);
1532 let mut kind = acp::ToolKind::Other;
1533 if let Some(tool) = tool.as_ref() {
1534 title = tool.initial_title(tool_use.input.clone(), cx);
1535 kind = tool.kind();
1536 }
1537
1538 // Ensure the last message ends in the current tool use
1539 let last_message = self.pending_message();
1540 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1541 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1542 if last_tool_use.id == tool_use.id {
1543 *last_tool_use = tool_use.clone();
1544 false
1545 } else {
1546 true
1547 }
1548 } else {
1549 true
1550 }
1551 });
1552
1553 if push_new_tool_use {
1554 event_stream.send_tool_call(
1555 &tool_use.id,
1556 &tool_use.name,
1557 title,
1558 kind,
1559 tool_use.input.clone(),
1560 );
1561 last_message
1562 .content
1563 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1564 } else {
1565 event_stream.update_tool_call_fields(
1566 &tool_use.id,
1567 acp::ToolCallUpdateFields {
1568 title: Some(title.into()),
1569 kind: Some(kind),
1570 raw_input: Some(tool_use.input.clone()),
1571 ..Default::default()
1572 },
1573 );
1574 }
1575
1576 if !tool_use.is_input_complete {
1577 return None;
1578 }
1579
1580 let Some(tool) = tool else {
1581 let content = format!("No tool named {} exists", tool_use.name);
1582 return Some(Task::ready(LanguageModelToolResult {
1583 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1584 tool_use_id: tool_use.id,
1585 tool_name: tool_use.name,
1586 is_error: true,
1587 output: None,
1588 }));
1589 };
1590
1591 let fs = self.project.read(cx).fs().clone();
1592 let tool_event_stream =
1593 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1594 tool_event_stream.update_fields(acp::ToolCallUpdateFields {
1595 status: Some(acp::ToolCallStatus::InProgress),
1596 ..Default::default()
1597 });
1598 let supports_images = self.model().is_some_and(|model| model.supports_images());
1599 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1600 log::debug!("Running tool {}", tool_use.name);
1601 Some(cx.foreground_executor().spawn(async move {
1602 let tool_result = tool_result.await.and_then(|output| {
1603 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1604 && !supports_images
1605 {
1606 return Err(anyhow!(
1607 "Attempted to read an image, but this model doesn't support it.",
1608 ));
1609 }
1610 Ok(output)
1611 });
1612
1613 match tool_result {
1614 Ok(output) => LanguageModelToolResult {
1615 tool_use_id: tool_use.id,
1616 tool_name: tool_use.name,
1617 is_error: false,
1618 content: output.llm_output,
1619 output: Some(output.raw_output),
1620 },
1621 Err(error) => LanguageModelToolResult {
1622 tool_use_id: tool_use.id,
1623 tool_name: tool_use.name,
1624 is_error: true,
1625 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1626 output: Some(error.to_string().into()),
1627 },
1628 }
1629 }))
1630 }
1631
1632 fn handle_tool_use_json_parse_error_event(
1633 &mut self,
1634 tool_use_id: LanguageModelToolUseId,
1635 tool_name: Arc<str>,
1636 raw_input: Arc<str>,
1637 json_parse_error: String,
1638 ) -> LanguageModelToolResult {
1639 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1640 LanguageModelToolResult {
1641 tool_use_id,
1642 tool_name,
1643 is_error: true,
1644 content: LanguageModelToolResultContent::Text(tool_output.into()),
1645 output: Some(serde_json::Value::String(raw_input.to_string())),
1646 }
1647 }
1648
1649 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1650 self.project
1651 .read(cx)
1652 .user_store()
1653 .update(cx, |user_store, cx| {
1654 user_store.update_model_request_usage(
1655 ModelRequestUsage(RequestUsage {
1656 amount: amount as i32,
1657 limit,
1658 }),
1659 cx,
1660 )
1661 });
1662 }
1663
1664 pub fn title(&self) -> SharedString {
1665 self.title.clone().unwrap_or("New Thread".into())
1666 }
1667
1668 pub fn is_generating_summary(&self) -> bool {
1669 self.pending_summary_generation.is_some()
1670 }
1671
1672 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1673 if let Some(summary) = self.summary.as_ref() {
1674 return Task::ready(Some(summary.clone())).shared();
1675 }
1676 if let Some(task) = self.pending_summary_generation.clone() {
1677 return task;
1678 }
1679 let Some(model) = self.summarization_model.clone() else {
1680 log::error!("No summarization model available");
1681 return Task::ready(None).shared();
1682 };
1683 let mut request = LanguageModelRequest {
1684 intent: Some(CompletionIntent::ThreadContextSummarization),
1685 temperature: AgentSettings::temperature_for_model(&model, cx),
1686 ..Default::default()
1687 };
1688
1689 for message in &self.messages {
1690 request.messages.extend(message.to_request());
1691 }
1692
1693 request.messages.push(LanguageModelRequestMessage {
1694 role: Role::User,
1695 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1696 cache: false,
1697 reasoning_details: None,
1698 });
1699
1700 let task = cx
1701 .spawn(async move |this, cx| {
1702 let mut summary = String::new();
1703 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1704 while let Some(event) = messages.next().await {
1705 let event = event.log_err()?;
1706 let text = match event {
1707 LanguageModelCompletionEvent::Text(text) => text,
1708 LanguageModelCompletionEvent::StatusUpdate(
1709 CompletionRequestStatus::UsageUpdated { amount, limit },
1710 ) => {
1711 this.update(cx, |thread, cx| {
1712 thread.update_model_request_usage(amount, limit, cx);
1713 })
1714 .ok()?;
1715 continue;
1716 }
1717 _ => continue,
1718 };
1719
1720 let mut lines = text.lines();
1721 summary.extend(lines.next());
1722 }
1723
1724 log::debug!("Setting summary: {}", summary);
1725 let summary = SharedString::from(summary);
1726
1727 this.update(cx, |this, cx| {
1728 this.summary = Some(summary.clone());
1729 this.pending_summary_generation = None;
1730 cx.notify()
1731 })
1732 .ok()?;
1733
1734 Some(summary)
1735 })
1736 .shared();
1737 self.pending_summary_generation = Some(task.clone());
1738 task
1739 }
1740
1741 fn generate_title(&mut self, cx: &mut Context<Self>) {
1742 let Some(model) = self.summarization_model.clone() else {
1743 return;
1744 };
1745
1746 log::debug!(
1747 "Generating title with model: {:?}",
1748 self.summarization_model.as_ref().map(|model| model.name())
1749 );
1750 let mut request = LanguageModelRequest {
1751 intent: Some(CompletionIntent::ThreadSummarization),
1752 temperature: AgentSettings::temperature_for_model(&model, cx),
1753 ..Default::default()
1754 };
1755
1756 for message in &self.messages {
1757 request.messages.extend(message.to_request());
1758 }
1759
1760 request.messages.push(LanguageModelRequestMessage {
1761 role: Role::User,
1762 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1763 cache: false,
1764 reasoning_details: None,
1765 });
1766 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1767 let mut title = String::new();
1768
1769 let generate = async {
1770 let mut messages = model.stream_completion(request, cx).await?;
1771 while let Some(event) = messages.next().await {
1772 let event = event?;
1773 let text = match event {
1774 LanguageModelCompletionEvent::Text(text) => text,
1775 LanguageModelCompletionEvent::StatusUpdate(
1776 CompletionRequestStatus::UsageUpdated { amount, limit },
1777 ) => {
1778 this.update(cx, |thread, cx| {
1779 thread.update_model_request_usage(amount, limit, cx);
1780 })?;
1781 continue;
1782 }
1783 _ => continue,
1784 };
1785
1786 let mut lines = text.lines();
1787 title.extend(lines.next());
1788
1789 // Stop if the LLM generated multiple lines.
1790 if lines.next().is_some() {
1791 break;
1792 }
1793 }
1794 anyhow::Ok(())
1795 };
1796
1797 if generate.await.context("failed to generate title").is_ok() {
1798 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1799 }
1800 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1801 }));
1802 }
1803
1804 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1805 self.pending_title_generation = None;
1806 if Some(&title) != self.title.as_ref() {
1807 self.title = Some(title);
1808 cx.emit(TitleUpdated);
1809 cx.notify();
1810 }
1811 }
1812
1813 fn clear_summary(&mut self) {
1814 self.summary = None;
1815 self.pending_summary_generation = None;
1816 }
1817
1818 fn last_user_message(&self) -> Option<&UserMessage> {
1819 self.messages
1820 .iter()
1821 .rev()
1822 .find_map(|message| match message {
1823 Message::User(user_message) => Some(user_message),
1824 Message::Agent(_) => None,
1825 Message::Resume => None,
1826 })
1827 }
1828
1829 fn pending_message(&mut self) -> &mut AgentMessage {
1830 self.pending_message.get_or_insert_default()
1831 }
1832
1833 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1834 let Some(mut message) = self.pending_message.take() else {
1835 return;
1836 };
1837
1838 if message.content.is_empty() {
1839 return;
1840 }
1841
1842 for content in &message.content {
1843 let AgentMessageContent::ToolUse(tool_use) = content else {
1844 continue;
1845 };
1846
1847 if !message.tool_results.contains_key(&tool_use.id) {
1848 message.tool_results.insert(
1849 tool_use.id.clone(),
1850 LanguageModelToolResult {
1851 tool_use_id: tool_use.id.clone(),
1852 tool_name: tool_use.name.clone(),
1853 is_error: true,
1854 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1855 output: None,
1856 },
1857 );
1858 }
1859 }
1860
1861 self.messages.push(Message::Agent(message));
1862 self.updated_at = Utc::now();
1863 self.clear_summary();
1864 cx.notify()
1865 }
1866
1867 pub(crate) fn build_completion_request(
1868 &self,
1869 completion_intent: CompletionIntent,
1870 cx: &App,
1871 ) -> Result<LanguageModelRequest> {
1872 let model = self.model().context("No language model configured")?;
1873 let tools = if let Some(turn) = self.running_turn.as_ref() {
1874 turn.tools
1875 .iter()
1876 .filter_map(|(tool_name, tool)| {
1877 log::trace!("Including tool: {}", tool_name);
1878 Some(LanguageModelRequestTool {
1879 name: tool_name.to_string(),
1880 description: tool.description().to_string(),
1881 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1882 })
1883 })
1884 .collect::<Vec<_>>()
1885 } else {
1886 Vec::new()
1887 };
1888
1889 log::debug!("Building completion request");
1890 log::debug!("Completion intent: {:?}", completion_intent);
1891 log::debug!("Completion mode: {:?}", self.completion_mode);
1892
1893 let available_tools: Vec<_> = self
1894 .running_turn
1895 .as_ref()
1896 .map(|turn| turn.tools.keys().cloned().collect())
1897 .unwrap_or_default();
1898
1899 log::debug!("Request includes {} tools", available_tools.len());
1900 let messages = self.build_request_messages(available_tools, cx);
1901 log::debug!("Request will include {} messages", messages.len());
1902
1903 let request = LanguageModelRequest {
1904 thread_id: Some(self.id.to_string()),
1905 prompt_id: Some(self.prompt_id.to_string()),
1906 intent: Some(completion_intent),
1907 mode: Some(self.completion_mode.into()),
1908 messages,
1909 tools,
1910 tool_choice: None,
1911 stop: Vec::new(),
1912 temperature: AgentSettings::temperature_for_model(model, cx),
1913 thinking_allowed: true,
1914 };
1915
1916 log::debug!("Completion request built successfully");
1917 Ok(request)
1918 }
1919
1920 fn enabled_tools(
1921 &self,
1922 profile: &AgentProfileSettings,
1923 model: &Arc<dyn LanguageModel>,
1924 cx: &App,
1925 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1926 fn truncate(tool_name: &SharedString) -> SharedString {
1927 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1928 let mut truncated = tool_name.to_string();
1929 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1930 truncated.into()
1931 } else {
1932 tool_name.clone()
1933 }
1934 }
1935
1936 let mut tools = self
1937 .tools
1938 .iter()
1939 .filter_map(|(tool_name, tool)| {
1940 if tool.supports_provider(&model.provider_id())
1941 && profile.is_tool_enabled(tool_name)
1942 {
1943 Some((truncate(tool_name), tool.clone()))
1944 } else {
1945 None
1946 }
1947 })
1948 .collect::<BTreeMap<_, _>>();
1949
1950 let mut context_server_tools = Vec::new();
1951 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1952 let mut duplicate_tool_names = HashSet::default();
1953 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1954 for (tool_name, tool) in server_tools {
1955 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1956 let tool_name = truncate(tool_name);
1957 if !seen_tools.insert(tool_name.clone()) {
1958 duplicate_tool_names.insert(tool_name.clone());
1959 }
1960 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1961 }
1962 }
1963 }
1964
1965 // When there are duplicate tool names, disambiguate by prefixing them
1966 // with the server ID. In the rare case there isn't enough space for the
1967 // disambiguated tool name, keep only the last tool with this name.
1968 for (server_id, tool_name, tool) in context_server_tools {
1969 if duplicate_tool_names.contains(&tool_name) {
1970 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1971 if available >= 2 {
1972 let mut disambiguated = server_id.0.to_string();
1973 disambiguated.truncate(available - 1);
1974 disambiguated.push('_');
1975 disambiguated.push_str(&tool_name);
1976 tools.insert(disambiguated.into(), tool.clone());
1977 } else {
1978 tools.insert(tool_name, tool.clone());
1979 }
1980 } else {
1981 tools.insert(tool_name, tool.clone());
1982 }
1983 }
1984
1985 tools
1986 }
1987
1988 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1989 self.running_turn.as_ref()?.tools.get(name).cloned()
1990 }
1991
1992 fn build_request_messages(
1993 &self,
1994 available_tools: Vec<SharedString>,
1995 cx: &App,
1996 ) -> Vec<LanguageModelRequestMessage> {
1997 log::trace!(
1998 "Building request messages from {} thread messages",
1999 self.messages.len()
2000 );
2001
2002 let system_prompt = SystemPromptTemplate {
2003 project: self.project_context.read(cx),
2004 available_tools,
2005 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
2006 }
2007 .render(&self.templates)
2008 .context("failed to build system prompt")
2009 .expect("Invalid template");
2010 let mut messages = vec![LanguageModelRequestMessage {
2011 role: Role::System,
2012 content: vec![system_prompt.into()],
2013 cache: false,
2014 reasoning_details: None,
2015 }];
2016 for message in &self.messages {
2017 messages.extend(message.to_request());
2018 }
2019
2020 if let Some(last_message) = messages.last_mut() {
2021 last_message.cache = true;
2022 }
2023
2024 if let Some(message) = self.pending_message.as_ref() {
2025 messages.extend(message.to_request());
2026 }
2027
2028 messages
2029 }
2030
2031 pub fn to_markdown(&self) -> String {
2032 let mut markdown = String::new();
2033 for (ix, message) in self.messages.iter().enumerate() {
2034 if ix > 0 {
2035 markdown.push('\n');
2036 }
2037 markdown.push_str(&message.to_markdown());
2038 }
2039
2040 if let Some(message) = self.pending_message.as_ref() {
2041 markdown.push('\n');
2042 markdown.push_str(&message.to_markdown());
2043 }
2044
2045 markdown
2046 }
2047
2048 fn advance_prompt_id(&mut self) {
2049 self.prompt_id = PromptId::new();
2050 }
2051
2052 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2053 use LanguageModelCompletionError::*;
2054 use http_client::StatusCode;
2055
2056 // General strategy here:
2057 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2058 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2059 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2060 match error {
2061 HttpResponseError {
2062 status_code: StatusCode::TOO_MANY_REQUESTS,
2063 ..
2064 } => Some(RetryStrategy::ExponentialBackoff {
2065 initial_delay: BASE_RETRY_DELAY,
2066 max_attempts: MAX_RETRY_ATTEMPTS,
2067 }),
2068 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2069 Some(RetryStrategy::Fixed {
2070 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2071 max_attempts: MAX_RETRY_ATTEMPTS,
2072 })
2073 }
2074 UpstreamProviderError {
2075 status,
2076 retry_after,
2077 ..
2078 } => match *status {
2079 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2080 Some(RetryStrategy::Fixed {
2081 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2082 max_attempts: MAX_RETRY_ATTEMPTS,
2083 })
2084 }
2085 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2086 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2087 // Internal Server Error could be anything, retry up to 3 times.
2088 max_attempts: 3,
2089 }),
2090 status => {
2091 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2092 // but we frequently get them in practice. See https://http.dev/529
2093 if status.as_u16() == 529 {
2094 Some(RetryStrategy::Fixed {
2095 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2096 max_attempts: MAX_RETRY_ATTEMPTS,
2097 })
2098 } else {
2099 Some(RetryStrategy::Fixed {
2100 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2101 max_attempts: 2,
2102 })
2103 }
2104 }
2105 },
2106 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2107 delay: BASE_RETRY_DELAY,
2108 max_attempts: 3,
2109 }),
2110 ApiReadResponseError { .. }
2111 | HttpSend { .. }
2112 | DeserializeResponse { .. }
2113 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2114 delay: BASE_RETRY_DELAY,
2115 max_attempts: 3,
2116 }),
2117 // Retrying these errors definitely shouldn't help.
2118 HttpResponseError {
2119 status_code:
2120 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2121 ..
2122 }
2123 | AuthenticationError { .. }
2124 | PermissionError { .. }
2125 | NoApiKey { .. }
2126 | ApiEndpointNotFound { .. }
2127 | PromptTooLarge { .. } => None,
2128 // These errors might be transient, so retry them
2129 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2130 delay: BASE_RETRY_DELAY,
2131 max_attempts: 1,
2132 }),
2133 // Retry all other 4xx and 5xx errors once.
2134 HttpResponseError { status_code, .. }
2135 if status_code.is_client_error() || status_code.is_server_error() =>
2136 {
2137 Some(RetryStrategy::Fixed {
2138 delay: BASE_RETRY_DELAY,
2139 max_attempts: 3,
2140 })
2141 }
2142 Other(err)
2143 if err.is::<language_model::PaymentRequiredError>()
2144 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2145 {
2146 // Retrying won't help for Payment Required or Model Request Limit errors (where
2147 // the user must upgrade to usage-based billing to get more requests, or else wait
2148 // for a significant amount of time for the request limit to reset).
2149 None
2150 }
2151 // Conservatively assume that any other errors are non-retryable
2152 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2153 delay: BASE_RETRY_DELAY,
2154 max_attempts: 2,
2155 }),
2156 }
2157 }
2158}
2159
2160struct RunningTurn {
2161 /// Holds the task that handles agent interaction until the end of the turn.
2162 /// Survives across multiple requests as the model performs tool calls and
2163 /// we run tools, report their results.
2164 _task: Task<()>,
2165 /// The current event stream for the running turn. Used to report a final
2166 /// cancellation event if we cancel the turn.
2167 event_stream: ThreadEventStream,
2168 /// The tools that were enabled for this turn.
2169 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2170}
2171
2172impl RunningTurn {
2173 fn cancel(self) {
2174 log::debug!("Cancelling in progress turn");
2175 self.event_stream.send_canceled();
2176 }
2177}
2178
2179pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2180
2181impl EventEmitter<TokenUsageUpdated> for Thread {}
2182
2183pub struct TitleUpdated;
2184
2185impl EventEmitter<TitleUpdated> for Thread {}
2186
2187pub trait AgentTool
2188where
2189 Self: 'static + Sized,
2190{
2191 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2192 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2193
2194 fn name() -> &'static str;
2195
2196 fn description() -> SharedString {
2197 let schema = schemars::schema_for!(Self::Input);
2198 SharedString::new(
2199 schema
2200 .get("description")
2201 .and_then(|description| description.as_str())
2202 .unwrap_or_default(),
2203 )
2204 }
2205
2206 fn kind() -> acp::ToolKind;
2207
2208 /// The initial tool title to display. Can be updated during the tool run.
2209 fn initial_title(
2210 &self,
2211 input: Result<Self::Input, serde_json::Value>,
2212 cx: &mut App,
2213 ) -> SharedString;
2214
2215 /// Returns the JSON schema that describes the tool's input.
2216 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2217 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2218 }
2219
2220 /// Some tools rely on a provider for the underlying billing or other reasons.
2221 /// Allow the tool to check if they are compatible, or should be filtered out.
2222 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2223 true
2224 }
2225
2226 /// Runs the tool with the provided input.
2227 fn run(
2228 self: Arc<Self>,
2229 input: Self::Input,
2230 event_stream: ToolCallEventStream,
2231 cx: &mut App,
2232 ) -> Task<Result<Self::Output>>;
2233
2234 /// Emits events for a previous execution of the tool.
2235 fn replay(
2236 &self,
2237 _input: Self::Input,
2238 _output: Self::Output,
2239 _event_stream: ToolCallEventStream,
2240 _cx: &mut App,
2241 ) -> Result<()> {
2242 Ok(())
2243 }
2244
2245 fn erase(self) -> Arc<dyn AnyAgentTool> {
2246 Arc::new(Erased(Arc::new(self)))
2247 }
2248}
2249
2250pub struct Erased<T>(T);
2251
2252pub struct AgentToolOutput {
2253 pub llm_output: LanguageModelToolResultContent,
2254 pub raw_output: serde_json::Value,
2255}
2256
2257pub trait AnyAgentTool {
2258 fn name(&self) -> SharedString;
2259 fn description(&self) -> SharedString;
2260 fn kind(&self) -> acp::ToolKind;
2261 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2262 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2263 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2264 true
2265 }
2266 fn run(
2267 self: Arc<Self>,
2268 input: serde_json::Value,
2269 event_stream: ToolCallEventStream,
2270 cx: &mut App,
2271 ) -> Task<Result<AgentToolOutput>>;
2272 fn replay(
2273 &self,
2274 input: serde_json::Value,
2275 output: serde_json::Value,
2276 event_stream: ToolCallEventStream,
2277 cx: &mut App,
2278 ) -> Result<()>;
2279}
2280
2281impl<T> AnyAgentTool for Erased<Arc<T>>
2282where
2283 T: AgentTool,
2284{
2285 fn name(&self) -> SharedString {
2286 T::name().into()
2287 }
2288
2289 fn description(&self) -> SharedString {
2290 T::description()
2291 }
2292
2293 fn kind(&self) -> agent_client_protocol::ToolKind {
2294 T::kind()
2295 }
2296
2297 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2298 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2299 self.0.initial_title(parsed_input, _cx)
2300 }
2301
2302 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2303 let mut json = serde_json::to_value(T::input_schema(format))?;
2304 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2305 Ok(json)
2306 }
2307
2308 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2309 T::supports_provider(provider)
2310 }
2311
2312 fn run(
2313 self: Arc<Self>,
2314 input: serde_json::Value,
2315 event_stream: ToolCallEventStream,
2316 cx: &mut App,
2317 ) -> Task<Result<AgentToolOutput>> {
2318 cx.spawn(async move |cx| {
2319 let input = serde_json::from_value(input)?;
2320 let output = cx
2321 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2322 .await?;
2323 let raw_output = serde_json::to_value(&output)?;
2324 Ok(AgentToolOutput {
2325 llm_output: output.into(),
2326 raw_output,
2327 })
2328 })
2329 }
2330
2331 fn replay(
2332 &self,
2333 input: serde_json::Value,
2334 output: serde_json::Value,
2335 event_stream: ToolCallEventStream,
2336 cx: &mut App,
2337 ) -> Result<()> {
2338 let input = serde_json::from_value(input)?;
2339 let output = serde_json::from_value(output)?;
2340 self.0.replay(input, output, event_stream, cx)
2341 }
2342}
2343
2344#[derive(Clone)]
2345struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2346
2347impl ThreadEventStream {
2348 fn send_user_message(&self, message: &UserMessage) {
2349 self.0
2350 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2351 .ok();
2352 }
2353
2354 fn send_text(&self, text: &str) {
2355 self.0
2356 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2357 .ok();
2358 }
2359
2360 fn send_thinking(&self, text: &str) {
2361 self.0
2362 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2363 .ok();
2364 }
2365
2366 fn send_tool_call(
2367 &self,
2368 id: &LanguageModelToolUseId,
2369 tool_name: &str,
2370 title: SharedString,
2371 kind: acp::ToolKind,
2372 input: serde_json::Value,
2373 ) {
2374 self.0
2375 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2376 id,
2377 tool_name,
2378 title.to_string(),
2379 kind,
2380 input,
2381 ))))
2382 .ok();
2383 }
2384
2385 fn initial_tool_call(
2386 id: &LanguageModelToolUseId,
2387 tool_name: &str,
2388 title: String,
2389 kind: acp::ToolKind,
2390 input: serde_json::Value,
2391 ) -> acp::ToolCall {
2392 acp::ToolCall {
2393 meta: Some(serde_json::json!({
2394 "tool_name": tool_name
2395 })),
2396 id: acp::ToolCallId(id.to_string().into()),
2397 title,
2398 kind,
2399 status: acp::ToolCallStatus::Pending,
2400 content: vec![],
2401 locations: vec![],
2402 raw_input: Some(input),
2403 raw_output: None,
2404 }
2405 }
2406
2407 fn update_tool_call_fields(
2408 &self,
2409 tool_use_id: &LanguageModelToolUseId,
2410 fields: acp::ToolCallUpdateFields,
2411 ) {
2412 self.0
2413 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2414 acp::ToolCallUpdate {
2415 meta: None,
2416 id: acp::ToolCallId(tool_use_id.to_string().into()),
2417 fields,
2418 }
2419 .into(),
2420 )))
2421 .ok();
2422 }
2423
2424 fn send_retry(&self, status: acp_thread::RetryStatus) {
2425 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2426 }
2427
2428 fn send_stop(&self, reason: acp::StopReason) {
2429 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2430 }
2431
2432 fn send_canceled(&self) {
2433 self.0
2434 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2435 .ok();
2436 }
2437
2438 fn send_error(&self, error: impl Into<anyhow::Error>) {
2439 self.0.unbounded_send(Err(error.into())).ok();
2440 }
2441}
2442
2443#[derive(Clone)]
2444pub struct ToolCallEventStream {
2445 tool_use_id: LanguageModelToolUseId,
2446 stream: ThreadEventStream,
2447 fs: Option<Arc<dyn Fs>>,
2448}
2449
2450impl ToolCallEventStream {
2451 #[cfg(any(test, feature = "test-support"))]
2452 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2453 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2454
2455 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2456
2457 (stream, ToolCallEventStreamReceiver(events_rx))
2458 }
2459
2460 fn new(
2461 tool_use_id: LanguageModelToolUseId,
2462 stream: ThreadEventStream,
2463 fs: Option<Arc<dyn Fs>>,
2464 ) -> Self {
2465 Self {
2466 tool_use_id,
2467 stream,
2468 fs,
2469 }
2470 }
2471
2472 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2473 self.stream
2474 .update_tool_call_fields(&self.tool_use_id, fields);
2475 }
2476
2477 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2478 self.stream
2479 .0
2480 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2481 acp_thread::ToolCallUpdateDiff {
2482 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2483 diff,
2484 }
2485 .into(),
2486 )))
2487 .ok();
2488 }
2489
2490 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2491 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2492 return Task::ready(Ok(()));
2493 }
2494
2495 let (response_tx, response_rx) = oneshot::channel();
2496 self.stream
2497 .0
2498 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2499 ToolCallAuthorization {
2500 tool_call: acp::ToolCallUpdate {
2501 meta: None,
2502 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2503 fields: acp::ToolCallUpdateFields {
2504 title: Some(title.into()),
2505 ..Default::default()
2506 },
2507 },
2508 options: vec![
2509 acp::PermissionOption {
2510 id: acp::PermissionOptionId("always_allow".into()),
2511 name: "Always Allow".into(),
2512 kind: acp::PermissionOptionKind::AllowAlways,
2513 meta: None,
2514 },
2515 acp::PermissionOption {
2516 id: acp::PermissionOptionId("allow".into()),
2517 name: "Allow".into(),
2518 kind: acp::PermissionOptionKind::AllowOnce,
2519 meta: None,
2520 },
2521 acp::PermissionOption {
2522 id: acp::PermissionOptionId("deny".into()),
2523 name: "Deny".into(),
2524 kind: acp::PermissionOptionKind::RejectOnce,
2525 meta: None,
2526 },
2527 ],
2528 response: response_tx,
2529 },
2530 )))
2531 .ok();
2532 let fs = self.fs.clone();
2533 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2534 "always_allow" => {
2535 if let Some(fs) = fs.clone() {
2536 cx.update(|cx| {
2537 update_settings_file(fs, cx, |settings, _| {
2538 settings
2539 .agent
2540 .get_or_insert_default()
2541 .set_always_allow_tool_actions(true);
2542 });
2543 })?;
2544 }
2545
2546 Ok(())
2547 }
2548 "allow" => Ok(()),
2549 _ => Err(anyhow!("Permission to run tool denied by user")),
2550 })
2551 }
2552}
2553
2554#[cfg(any(test, feature = "test-support"))]
2555pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2556
2557#[cfg(any(test, feature = "test-support"))]
2558impl ToolCallEventStreamReceiver {
2559 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2560 let event = self.0.next().await;
2561 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2562 auth
2563 } else {
2564 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2565 }
2566 }
2567
2568 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2569 let event = self.0.next().await;
2570 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2571 update,
2572 )))) = event
2573 {
2574 update.fields
2575 } else {
2576 panic!("Expected update fields but got: {:?}", event);
2577 }
2578 }
2579
2580 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2581 let event = self.0.next().await;
2582 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2583 update,
2584 )))) = event
2585 {
2586 update.diff
2587 } else {
2588 panic!("Expected diff but got: {:?}", event);
2589 }
2590 }
2591
2592 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2593 let event = self.0.next().await;
2594 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2595 update,
2596 )))) = event
2597 {
2598 update.terminal
2599 } else {
2600 panic!("Expected terminal but got: {:?}", event);
2601 }
2602 }
2603}
2604
2605#[cfg(any(test, feature = "test-support"))]
2606impl std::ops::Deref for ToolCallEventStreamReceiver {
2607 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2608
2609 fn deref(&self) -> &Self::Target {
2610 &self.0
2611 }
2612}
2613
2614#[cfg(any(test, feature = "test-support"))]
2615impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2616 fn deref_mut(&mut self) -> &mut Self::Target {
2617 &mut self.0
2618 }
2619}
2620
2621impl From<&str> for UserMessageContent {
2622 fn from(text: &str) -> Self {
2623 Self::Text(text.into())
2624 }
2625}
2626
2627impl UserMessageContent {
2628 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
2629 match value {
2630 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2631 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2632 acp::ContentBlock::Audio(_) => {
2633 // TODO
2634 Self::Text("[audio]".to_string())
2635 }
2636 acp::ContentBlock::ResourceLink(resource_link) => {
2637 match MentionUri::parse(&resource_link.uri, path_style) {
2638 Ok(uri) => Self::Mention {
2639 uri,
2640 content: String::new(),
2641 },
2642 Err(err) => {
2643 log::error!("Failed to parse mention link: {}", err);
2644 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2645 }
2646 }
2647 }
2648 acp::ContentBlock::Resource(resource) => match resource.resource {
2649 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2650 match MentionUri::parse(&resource.uri, path_style) {
2651 Ok(uri) => Self::Mention {
2652 uri,
2653 content: resource.text,
2654 },
2655 Err(err) => {
2656 log::error!("Failed to parse mention link: {}", err);
2657 Self::Text(
2658 MarkdownCodeBlock {
2659 tag: &resource.uri,
2660 text: &resource.text,
2661 }
2662 .to_string(),
2663 )
2664 }
2665 }
2666 }
2667 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2668 // TODO
2669 Self::Text("[blob]".to_string())
2670 }
2671 },
2672 }
2673 }
2674}
2675
2676impl From<UserMessageContent> for acp::ContentBlock {
2677 fn from(content: UserMessageContent) -> Self {
2678 match content {
2679 UserMessageContent::Text(text) => acp::ContentBlock::Text(acp::TextContent {
2680 text,
2681 annotations: None,
2682 meta: None,
2683 }),
2684 UserMessageContent::Image(image) => acp::ContentBlock::Image(acp::ImageContent {
2685 data: image.source.to_string(),
2686 mime_type: "image/png".to_string(),
2687 meta: None,
2688 annotations: None,
2689 uri: None,
2690 }),
2691 UserMessageContent::Mention { uri, content } => {
2692 acp::ContentBlock::Resource(acp::EmbeddedResource {
2693 meta: None,
2694 resource: acp::EmbeddedResourceResource::TextResourceContents(
2695 acp::TextResourceContents {
2696 meta: None,
2697 mime_type: None,
2698 text: content,
2699 uri: uri.to_uri().to_string(),
2700 },
2701 ),
2702 annotations: None,
2703 })
2704 }
2705 }
2706 }
2707}
2708
2709fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2710 LanguageModelImage {
2711 source: image_content.data.into(),
2712 // TODO: make this optional?
2713 size: gpui::Size::new(0.into(), 0.into()),
2714 }
2715}