1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 SystemPromptTemplate, Template, Templates, TerminalTool, ThinkingTool, WebSearchTool,
6};
7use acp_thread::{MentionUri, UserMessageId};
8use action_log::ActionLog;
9
10use agent_client_protocol as acp;
11use agent_settings::{
12 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
13 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
14};
15use anyhow::{Context as _, Result, anyhow};
16use chrono::{DateTime, Utc};
17use client::{ModelRequestUsage, RequestUsage, UserStore};
18use cloud_llm_client::{CompletionIntent, Plan, UsageLimit};
19use collections::{HashMap, HashSet, IndexMap};
20use fs::Fs;
21use futures::stream;
22use futures::{
23 FutureExt,
24 channel::{mpsc, oneshot},
25 future::Shared,
26 stream::FuturesUnordered,
27};
28use gpui::{
29 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
30};
31use language_model::{
32 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
33 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
34 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
35 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
36 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
37 ZED_CLOUD_PROVIDER_ID,
38};
39use project::Project;
40use prompt_store::ProjectContext;
41use schemars::{JsonSchema, Schema};
42use serde::{Deserialize, Serialize};
43use settings::{LanguageModelSelection, Settings, update_settings_file};
44use smol::stream::StreamExt;
45use std::{
46 collections::BTreeMap,
47 ops::RangeInclusive,
48 path::Path,
49 rc::Rc,
50 sync::Arc,
51 time::{Duration, Instant},
52};
53use std::{fmt::Write, path::PathBuf};
54use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
55use uuid::Uuid;
56
57const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
58pub const MAX_TOOL_NAME_LENGTH: usize = 64;
59
60/// The ID of the user prompt that initiated a request.
61///
62/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
63#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
64pub struct PromptId(Arc<str>);
65
66impl PromptId {
67 pub fn new() -> Self {
68 Self(Uuid::new_v4().to_string().into())
69 }
70}
71
72impl std::fmt::Display for PromptId {
73 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
74 write!(f, "{}", self.0)
75 }
76}
77
78pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
79pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
80
81#[derive(Debug, Clone)]
82enum RetryStrategy {
83 ExponentialBackoff {
84 initial_delay: Duration,
85 max_attempts: u8,
86 },
87 Fixed {
88 delay: Duration,
89 max_attempts: u8,
90 },
91}
92
93#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
94pub enum Message {
95 User(UserMessage),
96 Agent(AgentMessage),
97 Resume,
98}
99
100impl Message {
101 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
102 match self {
103 Message::Agent(agent_message) => Some(agent_message),
104 _ => None,
105 }
106 }
107
108 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
109 match self {
110 Message::User(message) => vec![message.to_request()],
111 Message::Agent(message) => message.to_request(),
112 Message::Resume => vec![LanguageModelRequestMessage {
113 role: Role::User,
114 content: vec!["Continue where you left off".into()],
115 cache: false,
116 reasoning_details: None,
117 }],
118 }
119 }
120
121 pub fn to_markdown(&self) -> String {
122 match self {
123 Message::User(message) => message.to_markdown(),
124 Message::Agent(message) => message.to_markdown(),
125 Message::Resume => "[resume]\n".into(),
126 }
127 }
128
129 pub fn role(&self) -> Role {
130 match self {
131 Message::User(_) | Message::Resume => Role::User,
132 Message::Agent(_) => Role::Assistant,
133 }
134 }
135}
136
137#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
138pub struct UserMessage {
139 pub id: UserMessageId,
140 pub content: Vec<UserMessageContent>,
141}
142
143#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
144pub enum UserMessageContent {
145 Text(String),
146 Mention { uri: MentionUri, content: String },
147 Image(LanguageModelImage),
148}
149
150impl UserMessage {
151 pub fn to_markdown(&self) -> String {
152 let mut markdown = String::from("## User\n\n");
153
154 for content in &self.content {
155 match content {
156 UserMessageContent::Text(text) => {
157 markdown.push_str(text);
158 markdown.push('\n');
159 }
160 UserMessageContent::Image(_) => {
161 markdown.push_str("<image />\n");
162 }
163 UserMessageContent::Mention { uri, content } => {
164 if !content.is_empty() {
165 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
166 } else {
167 let _ = writeln!(&mut markdown, "{}", uri.as_link());
168 }
169 }
170 }
171 }
172
173 markdown
174 }
175
176 fn to_request(&self) -> LanguageModelRequestMessage {
177 let mut message = LanguageModelRequestMessage {
178 role: Role::User,
179 content: Vec::with_capacity(self.content.len()),
180 cache: false,
181 reasoning_details: None,
182 };
183
184 const OPEN_CONTEXT: &str = "<context>\n\
185 The following items were attached by the user. \
186 They are up-to-date and don't need to be re-read.\n\n";
187
188 const OPEN_FILES_TAG: &str = "<files>";
189 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
190 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
191 const OPEN_SELECTIONS_TAG: &str = "<selections>";
192 const OPEN_THREADS_TAG: &str = "<threads>";
193 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
194 const OPEN_RULES_TAG: &str =
195 "<rules>\nThe user has specified the following rules that should be applied:\n";
196
197 let mut file_context = OPEN_FILES_TAG.to_string();
198 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
199 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
200 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
201 let mut thread_context = OPEN_THREADS_TAG.to_string();
202 let mut fetch_context = OPEN_FETCH_TAG.to_string();
203 let mut rules_context = OPEN_RULES_TAG.to_string();
204
205 for chunk in &self.content {
206 let chunk = match chunk {
207 UserMessageContent::Text(text) => {
208 language_model::MessageContent::Text(text.clone())
209 }
210 UserMessageContent::Image(value) => {
211 language_model::MessageContent::Image(value.clone())
212 }
213 UserMessageContent::Mention { uri, content } => {
214 match uri {
215 MentionUri::File { abs_path } => {
216 write!(
217 &mut file_context,
218 "\n{}",
219 MarkdownCodeBlock {
220 tag: &codeblock_tag(abs_path, None),
221 text: &content.to_string(),
222 }
223 )
224 .ok();
225 }
226 MentionUri::PastedImage => {
227 debug_panic!("pasted image URI should not be used in mention content")
228 }
229 MentionUri::Directory { .. } => {
230 write!(&mut directory_context, "\n{}\n", content).ok();
231 }
232 MentionUri::Symbol {
233 abs_path: path,
234 line_range,
235 ..
236 } => {
237 write!(
238 &mut symbol_context,
239 "\n{}",
240 MarkdownCodeBlock {
241 tag: &codeblock_tag(path, Some(line_range)),
242 text: content
243 }
244 )
245 .ok();
246 }
247 MentionUri::Selection {
248 abs_path: path,
249 line_range,
250 ..
251 } => {
252 write!(
253 &mut selection_context,
254 "\n{}",
255 MarkdownCodeBlock {
256 tag: &codeblock_tag(
257 path.as_deref().unwrap_or("Untitled".as_ref()),
258 Some(line_range)
259 ),
260 text: content
261 }
262 )
263 .ok();
264 }
265 MentionUri::Thread { .. } => {
266 write!(&mut thread_context, "\n{}\n", content).ok();
267 }
268 MentionUri::TextThread { .. } => {
269 write!(&mut thread_context, "\n{}\n", content).ok();
270 }
271 MentionUri::Rule { .. } => {
272 write!(
273 &mut rules_context,
274 "\n{}",
275 MarkdownCodeBlock {
276 tag: "",
277 text: content
278 }
279 )
280 .ok();
281 }
282 MentionUri::Fetch { url } => {
283 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
284 }
285 }
286
287 language_model::MessageContent::Text(uri.as_link().to_string())
288 }
289 };
290
291 message.content.push(chunk);
292 }
293
294 let len_before_context = message.content.len();
295
296 if file_context.len() > OPEN_FILES_TAG.len() {
297 file_context.push_str("</files>\n");
298 message
299 .content
300 .push(language_model::MessageContent::Text(file_context));
301 }
302
303 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
304 directory_context.push_str("</directories>\n");
305 message
306 .content
307 .push(language_model::MessageContent::Text(directory_context));
308 }
309
310 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
311 symbol_context.push_str("</symbols>\n");
312 message
313 .content
314 .push(language_model::MessageContent::Text(symbol_context));
315 }
316
317 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
318 selection_context.push_str("</selections>\n");
319 message
320 .content
321 .push(language_model::MessageContent::Text(selection_context));
322 }
323
324 if thread_context.len() > OPEN_THREADS_TAG.len() {
325 thread_context.push_str("</threads>\n");
326 message
327 .content
328 .push(language_model::MessageContent::Text(thread_context));
329 }
330
331 if fetch_context.len() > OPEN_FETCH_TAG.len() {
332 fetch_context.push_str("</fetched_urls>\n");
333 message
334 .content
335 .push(language_model::MessageContent::Text(fetch_context));
336 }
337
338 if rules_context.len() > OPEN_RULES_TAG.len() {
339 rules_context.push_str("</user_rules>\n");
340 message
341 .content
342 .push(language_model::MessageContent::Text(rules_context));
343 }
344
345 if message.content.len() > len_before_context {
346 message.content.insert(
347 len_before_context,
348 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
349 );
350 message
351 .content
352 .push(language_model::MessageContent::Text("</context>".into()));
353 }
354
355 message
356 }
357}
358
359fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
360 let mut result = String::new();
361
362 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
363 let _ = write!(result, "{} ", extension);
364 }
365
366 let _ = write!(result, "{}", full_path.display());
367
368 if let Some(range) = line_range {
369 if range.start() == range.end() {
370 let _ = write!(result, ":{}", range.start() + 1);
371 } else {
372 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
373 }
374 }
375
376 result
377}
378
379impl AgentMessage {
380 pub fn to_markdown(&self) -> String {
381 let mut markdown = String::from("## Assistant\n\n");
382
383 for content in &self.content {
384 match content {
385 AgentMessageContent::Text(text) => {
386 markdown.push_str(text);
387 markdown.push('\n');
388 }
389 AgentMessageContent::Thinking { text, .. } => {
390 markdown.push_str("<think>");
391 markdown.push_str(text);
392 markdown.push_str("</think>\n");
393 }
394 AgentMessageContent::RedactedThinking(_) => {
395 markdown.push_str("<redacted_thinking />\n")
396 }
397 AgentMessageContent::ToolUse(tool_use) => {
398 markdown.push_str(&format!(
399 "**Tool Use**: {} (ID: {})\n",
400 tool_use.name, tool_use.id
401 ));
402 markdown.push_str(&format!(
403 "{}\n",
404 MarkdownCodeBlock {
405 tag: "json",
406 text: &format!("{:#}", tool_use.input)
407 }
408 ));
409 }
410 }
411 }
412
413 for tool_result in self.tool_results.values() {
414 markdown.push_str(&format!(
415 "**Tool Result**: {} (ID: {})\n\n",
416 tool_result.tool_name, tool_result.tool_use_id
417 ));
418 if tool_result.is_error {
419 markdown.push_str("**ERROR:**\n");
420 }
421
422 match &tool_result.content {
423 LanguageModelToolResultContent::Text(text) => {
424 writeln!(markdown, "{text}\n").ok();
425 }
426 LanguageModelToolResultContent::Image(_) => {
427 writeln!(markdown, "<image />\n").ok();
428 }
429 }
430
431 if let Some(output) = tool_result.output.as_ref() {
432 writeln!(
433 markdown,
434 "**Debug Output**:\n\n```json\n{}\n```\n",
435 serde_json::to_string_pretty(output).unwrap()
436 )
437 .unwrap();
438 }
439 }
440
441 markdown
442 }
443
444 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
445 let mut assistant_message = LanguageModelRequestMessage {
446 role: Role::Assistant,
447 content: Vec::with_capacity(self.content.len()),
448 cache: false,
449 reasoning_details: self.reasoning_details.clone(),
450 };
451 for chunk in &self.content {
452 match chunk {
453 AgentMessageContent::Text(text) => {
454 assistant_message
455 .content
456 .push(language_model::MessageContent::Text(text.clone()));
457 }
458 AgentMessageContent::Thinking { text, signature } => {
459 assistant_message
460 .content
461 .push(language_model::MessageContent::Thinking {
462 text: text.clone(),
463 signature: signature.clone(),
464 });
465 }
466 AgentMessageContent::RedactedThinking(value) => {
467 assistant_message.content.push(
468 language_model::MessageContent::RedactedThinking(value.clone()),
469 );
470 }
471 AgentMessageContent::ToolUse(tool_use) => {
472 if self.tool_results.contains_key(&tool_use.id) {
473 assistant_message
474 .content
475 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
476 }
477 }
478 };
479 }
480
481 let mut user_message = LanguageModelRequestMessage {
482 role: Role::User,
483 content: Vec::new(),
484 cache: false,
485 reasoning_details: None,
486 };
487
488 for tool_result in self.tool_results.values() {
489 let mut tool_result = tool_result.clone();
490 // Surprisingly, the API fails if we return an empty string here.
491 // It thinks we are sending a tool use without a tool result.
492 if tool_result.content.is_empty() {
493 tool_result.content = "<Tool returned an empty string>".into();
494 }
495 user_message
496 .content
497 .push(language_model::MessageContent::ToolResult(tool_result));
498 }
499
500 let mut messages = Vec::new();
501 if !assistant_message.content.is_empty() {
502 messages.push(assistant_message);
503 }
504 if !user_message.content.is_empty() {
505 messages.push(user_message);
506 }
507 messages
508 }
509}
510
511#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
512pub struct AgentMessage {
513 pub content: Vec<AgentMessageContent>,
514 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
515 pub reasoning_details: Option<serde_json::Value>,
516}
517
518#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
519pub enum AgentMessageContent {
520 Text(String),
521 Thinking {
522 text: String,
523 signature: Option<String>,
524 },
525 RedactedThinking(String),
526 ToolUse(LanguageModelToolUse),
527}
528
529pub trait TerminalHandle {
530 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
531 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
532 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
533}
534
535pub trait ThreadEnvironment {
536 fn create_terminal(
537 &self,
538 command: String,
539 cwd: Option<PathBuf>,
540 output_byte_limit: Option<u64>,
541 cx: &mut AsyncApp,
542 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
543}
544
545#[derive(Debug)]
546pub enum ThreadEvent {
547 UserMessage(UserMessage),
548 AgentText(String),
549 AgentThinking(String),
550 ToolCall(acp::ToolCall),
551 ToolCallUpdate(acp_thread::ToolCallUpdate),
552 ToolCallAuthorization(ToolCallAuthorization),
553 Retry(acp_thread::RetryStatus),
554 Stop(acp::StopReason),
555}
556
557#[derive(Debug)]
558pub struct NewTerminal {
559 pub command: String,
560 pub output_byte_limit: Option<u64>,
561 pub cwd: Option<PathBuf>,
562 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
563}
564
565#[derive(Debug)]
566pub struct ToolCallAuthorization {
567 pub tool_call: acp::ToolCallUpdate,
568 pub options: Vec<acp::PermissionOption>,
569 pub response: oneshot::Sender<acp::PermissionOptionId>,
570}
571
572#[derive(Debug, thiserror::Error)]
573enum CompletionError {
574 #[error("max tokens")]
575 MaxTokens,
576 #[error("refusal")]
577 Refusal,
578 #[error(transparent)]
579 Other(#[from] anyhow::Error),
580}
581
582pub struct Thread {
583 id: acp::SessionId,
584 prompt_id: PromptId,
585 updated_at: DateTime<Utc>,
586 title: Option<SharedString>,
587 pending_title_generation: Option<Task<()>>,
588 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
589 summary: Option<SharedString>,
590 messages: Vec<Message>,
591 user_store: Entity<UserStore>,
592 completion_mode: CompletionMode,
593 /// Holds the task that handles agent interaction until the end of the turn.
594 /// Survives across multiple requests as the model performs tool calls and
595 /// we run tools, report their results.
596 running_turn: Option<RunningTurn>,
597 pending_message: Option<AgentMessage>,
598 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
599 tool_use_limit_reached: bool,
600 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
601 #[allow(unused)]
602 cumulative_token_usage: TokenUsage,
603 #[allow(unused)]
604 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
605 context_server_registry: Entity<ContextServerRegistry>,
606 profile_id: AgentProfileId,
607 project_context: Entity<ProjectContext>,
608 templates: Arc<Templates>,
609 model: Option<Arc<dyn LanguageModel>>,
610 summarization_model: Option<Arc<dyn LanguageModel>>,
611 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
612 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
613 pub(crate) project: Entity<Project>,
614 pub(crate) action_log: Entity<ActionLog>,
615 /// Tracks the last time files were read by the agent, to detect external modifications
616 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
617}
618
619impl Thread {
620 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
621 let image = model.map_or(true, |model| model.supports_images());
622 acp::PromptCapabilities::new()
623 .image(image)
624 .embedded_context(true)
625 }
626
627 pub fn new(
628 project: Entity<Project>,
629 project_context: Entity<ProjectContext>,
630 context_server_registry: Entity<ContextServerRegistry>,
631 templates: Arc<Templates>,
632 model: Option<Arc<dyn LanguageModel>>,
633 cx: &mut Context<Self>,
634 ) -> Self {
635 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
636 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
637 let (prompt_capabilities_tx, prompt_capabilities_rx) =
638 watch::channel(Self::prompt_capabilities(model.as_deref()));
639 Self {
640 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
641 prompt_id: PromptId::new(),
642 updated_at: Utc::now(),
643 title: None,
644 pending_title_generation: None,
645 pending_summary_generation: None,
646 summary: None,
647 messages: Vec::new(),
648 user_store: project.read(cx).user_store(),
649 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
650 running_turn: None,
651 pending_message: None,
652 tools: BTreeMap::default(),
653 tool_use_limit_reached: false,
654 request_token_usage: HashMap::default(),
655 cumulative_token_usage: TokenUsage::default(),
656 initial_project_snapshot: {
657 let project_snapshot = Self::project_snapshot(project.clone(), cx);
658 cx.foreground_executor()
659 .spawn(async move { Some(project_snapshot.await) })
660 .shared()
661 },
662 context_server_registry,
663 profile_id,
664 project_context,
665 templates,
666 model,
667 summarization_model: None,
668 prompt_capabilities_tx,
669 prompt_capabilities_rx,
670 project,
671 action_log,
672 file_read_times: HashMap::default(),
673 }
674 }
675
676 pub fn id(&self) -> &acp::SessionId {
677 &self.id
678 }
679
680 pub fn replay(
681 &mut self,
682 cx: &mut Context<Self>,
683 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
684 let (tx, rx) = mpsc::unbounded();
685 let stream = ThreadEventStream(tx);
686 for message in &self.messages {
687 match message {
688 Message::User(user_message) => stream.send_user_message(user_message),
689 Message::Agent(assistant_message) => {
690 for content in &assistant_message.content {
691 match content {
692 AgentMessageContent::Text(text) => stream.send_text(text),
693 AgentMessageContent::Thinking { text, .. } => {
694 stream.send_thinking(text)
695 }
696 AgentMessageContent::RedactedThinking(_) => {}
697 AgentMessageContent::ToolUse(tool_use) => {
698 self.replay_tool_call(
699 tool_use,
700 assistant_message.tool_results.get(&tool_use.id),
701 &stream,
702 cx,
703 );
704 }
705 }
706 }
707 }
708 Message::Resume => {}
709 }
710 }
711 rx
712 }
713
714 fn replay_tool_call(
715 &self,
716 tool_use: &LanguageModelToolUse,
717 tool_result: Option<&LanguageModelToolResult>,
718 stream: &ThreadEventStream,
719 cx: &mut Context<Self>,
720 ) {
721 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
722 self.context_server_registry
723 .read(cx)
724 .servers()
725 .find_map(|(_, tools)| {
726 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
727 Some(tool.clone())
728 } else {
729 None
730 }
731 })
732 });
733
734 let Some(tool) = tool else {
735 stream
736 .0
737 .unbounded_send(Ok(ThreadEvent::ToolCall(
738 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
739 .status(acp::ToolCallStatus::Failed)
740 .raw_input(tool_use.input.clone()),
741 )))
742 .ok();
743 return;
744 };
745
746 let title = tool.initial_title(tool_use.input.clone(), cx);
747 let kind = tool.kind();
748 stream.send_tool_call(
749 &tool_use.id,
750 &tool_use.name,
751 title,
752 kind,
753 tool_use.input.clone(),
754 );
755
756 let output = tool_result
757 .as_ref()
758 .and_then(|result| result.output.clone());
759 if let Some(output) = output.clone() {
760 let tool_event_stream = ToolCallEventStream::new(
761 tool_use.id.clone(),
762 stream.clone(),
763 Some(self.project.read(cx).fs().clone()),
764 );
765 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
766 .log_err();
767 }
768
769 let mut fields = acp::ToolCallUpdateFields::new().status(tool_result.as_ref().map_or(
770 acp::ToolCallStatus::Failed,
771 |result| {
772 if result.is_error {
773 acp::ToolCallStatus::Failed
774 } else {
775 acp::ToolCallStatus::Completed
776 }
777 },
778 ));
779 if let Some(output) = output {
780 fields = fields.raw_output(output);
781 }
782 stream.update_tool_call_fields(&tool_use.id, fields);
783 }
784
785 pub fn from_db(
786 id: acp::SessionId,
787 db_thread: DbThread,
788 project: Entity<Project>,
789 project_context: Entity<ProjectContext>,
790 context_server_registry: Entity<ContextServerRegistry>,
791 templates: Arc<Templates>,
792 cx: &mut Context<Self>,
793 ) -> Self {
794 let profile_id = db_thread
795 .profile
796 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
797
798 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
799 db_thread
800 .model
801 .and_then(|model| {
802 let model = SelectedModel {
803 provider: model.provider.clone().into(),
804 model: model.model.into(),
805 };
806 registry.select_model(&model, cx)
807 })
808 .or_else(|| registry.default_model())
809 .map(|model| model.model)
810 });
811
812 if model.is_none() {
813 model = Self::resolve_profile_model(&profile_id, cx);
814 }
815 if model.is_none() {
816 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
817 registry.default_model().map(|model| model.model)
818 });
819 }
820
821 let (prompt_capabilities_tx, prompt_capabilities_rx) =
822 watch::channel(Self::prompt_capabilities(model.as_deref()));
823
824 let action_log = cx.new(|_| ActionLog::new(project.clone()));
825
826 Self {
827 id,
828 prompt_id: PromptId::new(),
829 title: if db_thread.title.is_empty() {
830 None
831 } else {
832 Some(db_thread.title.clone())
833 },
834 pending_title_generation: None,
835 pending_summary_generation: None,
836 summary: db_thread.detailed_summary,
837 messages: db_thread.messages,
838 user_store: project.read(cx).user_store(),
839 completion_mode: db_thread.completion_mode.unwrap_or_default(),
840 running_turn: None,
841 pending_message: None,
842 tools: BTreeMap::default(),
843 tool_use_limit_reached: false,
844 request_token_usage: db_thread.request_token_usage.clone(),
845 cumulative_token_usage: db_thread.cumulative_token_usage,
846 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
847 context_server_registry,
848 profile_id,
849 project_context,
850 templates,
851 model,
852 summarization_model: None,
853 project,
854 action_log,
855 updated_at: db_thread.updated_at,
856 prompt_capabilities_tx,
857 prompt_capabilities_rx,
858 file_read_times: HashMap::default(),
859 }
860 }
861
862 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
863 let initial_project_snapshot = self.initial_project_snapshot.clone();
864 let mut thread = DbThread {
865 title: self.title(),
866 messages: self.messages.clone(),
867 updated_at: self.updated_at,
868 detailed_summary: self.summary.clone(),
869 initial_project_snapshot: None,
870 cumulative_token_usage: self.cumulative_token_usage,
871 request_token_usage: self.request_token_usage.clone(),
872 model: self.model.as_ref().map(|model| DbLanguageModel {
873 provider: model.provider_id().to_string(),
874 model: model.name().0.to_string(),
875 }),
876 completion_mode: Some(self.completion_mode),
877 profile: Some(self.profile_id.clone()),
878 };
879
880 cx.background_spawn(async move {
881 let initial_project_snapshot = initial_project_snapshot.await;
882 thread.initial_project_snapshot = initial_project_snapshot;
883 thread
884 })
885 }
886
887 /// Create a snapshot of the current project state including git information and unsaved buffers.
888 fn project_snapshot(
889 project: Entity<Project>,
890 cx: &mut Context<Self>,
891 ) -> Task<Arc<ProjectSnapshot>> {
892 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
893 cx.spawn(async move |_, _| {
894 let snapshot = task.await;
895
896 Arc::new(ProjectSnapshot {
897 worktree_snapshots: snapshot.worktree_snapshots,
898 timestamp: Utc::now(),
899 })
900 })
901 }
902
903 pub fn project_context(&self) -> &Entity<ProjectContext> {
904 &self.project_context
905 }
906
907 pub fn project(&self) -> &Entity<Project> {
908 &self.project
909 }
910
911 pub fn action_log(&self) -> &Entity<ActionLog> {
912 &self.action_log
913 }
914
915 pub fn is_empty(&self) -> bool {
916 self.messages.is_empty() && self.title.is_none()
917 }
918
919 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
920 self.model.as_ref()
921 }
922
923 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
924 let old_usage = self.latest_token_usage();
925 self.model = Some(model);
926 let new_caps = Self::prompt_capabilities(self.model.as_deref());
927 let new_usage = self.latest_token_usage();
928 if old_usage != new_usage {
929 cx.emit(TokenUsageUpdated(new_usage));
930 }
931 self.prompt_capabilities_tx.send(new_caps).log_err();
932 cx.notify()
933 }
934
935 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
936 self.summarization_model.as_ref()
937 }
938
939 pub fn set_summarization_model(
940 &mut self,
941 model: Option<Arc<dyn LanguageModel>>,
942 cx: &mut Context<Self>,
943 ) {
944 self.summarization_model = model;
945 cx.notify()
946 }
947
948 pub fn completion_mode(&self) -> CompletionMode {
949 self.completion_mode
950 }
951
952 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
953 let old_usage = self.latest_token_usage();
954 self.completion_mode = mode;
955 let new_usage = self.latest_token_usage();
956 if old_usage != new_usage {
957 cx.emit(TokenUsageUpdated(new_usage));
958 }
959 cx.notify()
960 }
961
962 #[cfg(any(test, feature = "test-support"))]
963 pub fn last_message(&self) -> Option<Message> {
964 if let Some(message) = self.pending_message.clone() {
965 Some(Message::Agent(message))
966 } else {
967 self.messages.last().cloned()
968 }
969 }
970
971 pub fn add_default_tools(
972 &mut self,
973 environment: Rc<dyn ThreadEnvironment>,
974 cx: &mut Context<Self>,
975 ) {
976 let language_registry = self.project.read(cx).languages().clone();
977 self.add_tool(CopyPathTool::new(self.project.clone()));
978 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
979 self.add_tool(DeletePathTool::new(
980 self.project.clone(),
981 self.action_log.clone(),
982 ));
983 self.add_tool(DiagnosticsTool::new(self.project.clone()));
984 self.add_tool(EditFileTool::new(
985 self.project.clone(),
986 cx.weak_entity(),
987 language_registry,
988 Templates::new(),
989 ));
990 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
991 self.add_tool(FindPathTool::new(self.project.clone()));
992 self.add_tool(GrepTool::new(self.project.clone()));
993 self.add_tool(ListDirectoryTool::new(self.project.clone()));
994 self.add_tool(MovePathTool::new(self.project.clone()));
995 self.add_tool(NowTool);
996 self.add_tool(OpenTool::new(self.project.clone()));
997 self.add_tool(ReadFileTool::new(
998 cx.weak_entity(),
999 self.project.clone(),
1000 self.action_log.clone(),
1001 ));
1002 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1003 self.add_tool(ThinkingTool);
1004 self.add_tool(WebSearchTool);
1005 }
1006
1007 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1008 self.tools.insert(T::name().into(), tool.erase());
1009 }
1010
1011 pub fn remove_tool(&mut self, name: &str) -> bool {
1012 self.tools.remove(name).is_some()
1013 }
1014
1015 pub fn profile(&self) -> &AgentProfileId {
1016 &self.profile_id
1017 }
1018
1019 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1020 if self.profile_id == profile_id {
1021 return;
1022 }
1023
1024 self.profile_id = profile_id;
1025
1026 // Swap to the profile's preferred model when available.
1027 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1028 self.set_model(model, cx);
1029 }
1030 }
1031
1032 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1033 if let Some(running_turn) = self.running_turn.take() {
1034 running_turn.cancel();
1035 }
1036 self.flush_pending_message(cx);
1037 }
1038
1039 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1040 let Some(last_user_message) = self.last_user_message() else {
1041 return;
1042 };
1043
1044 self.request_token_usage
1045 .insert(last_user_message.id.clone(), update);
1046 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1047 cx.notify();
1048 }
1049
1050 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1051 self.cancel(cx);
1052 let Some(position) = self.messages.iter().position(
1053 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1054 ) else {
1055 return Err(anyhow!("Message not found"));
1056 };
1057
1058 for message in self.messages.drain(position..) {
1059 match message {
1060 Message::User(message) => {
1061 self.request_token_usage.remove(&message.id);
1062 }
1063 Message::Agent(_) | Message::Resume => {}
1064 }
1065 }
1066 self.clear_summary();
1067 cx.notify();
1068 Ok(())
1069 }
1070
1071 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1072 let last_user_message = self.last_user_message()?;
1073 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1074 Some(*tokens)
1075 }
1076
1077 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1078 let usage = self.latest_request_token_usage()?;
1079 let model = self.model.clone()?;
1080 Some(acp_thread::TokenUsage {
1081 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1082 used_tokens: usage.total_tokens(),
1083 })
1084 }
1085
1086 /// Look up the active profile and resolve its preferred model if one is configured.
1087 fn resolve_profile_model(
1088 profile_id: &AgentProfileId,
1089 cx: &mut Context<Self>,
1090 ) -> Option<Arc<dyn LanguageModel>> {
1091 let selection = AgentSettings::get_global(cx)
1092 .profiles
1093 .get(profile_id)?
1094 .default_model
1095 .clone()?;
1096 Self::resolve_model_from_selection(&selection, cx)
1097 }
1098
1099 /// Translate a stored model selection into the configured model from the registry.
1100 fn resolve_model_from_selection(
1101 selection: &LanguageModelSelection,
1102 cx: &mut Context<Self>,
1103 ) -> Option<Arc<dyn LanguageModel>> {
1104 let selected = SelectedModel {
1105 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1106 model: LanguageModelId::from(selection.model.clone()),
1107 };
1108 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1109 registry
1110 .select_model(&selected, cx)
1111 .map(|configured| configured.model)
1112 })
1113 }
1114
1115 pub fn resume(
1116 &mut self,
1117 cx: &mut Context<Self>,
1118 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1119 self.messages.push(Message::Resume);
1120 cx.notify();
1121
1122 log::debug!("Total messages in thread: {}", self.messages.len());
1123 self.run_turn(cx)
1124 }
1125
1126 /// Sending a message results in the model streaming a response, which could include tool calls.
1127 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1128 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1129 pub fn send<T>(
1130 &mut self,
1131 id: UserMessageId,
1132 content: impl IntoIterator<Item = T>,
1133 cx: &mut Context<Self>,
1134 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1135 where
1136 T: Into<UserMessageContent>,
1137 {
1138 let model = self.model().context("No language model configured")?;
1139
1140 log::info!("Thread::send called with model: {}", model.name().0);
1141 self.advance_prompt_id();
1142
1143 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1144 log::debug!("Thread::send content: {:?}", content);
1145
1146 self.messages
1147 .push(Message::User(UserMessage { id, content }));
1148 cx.notify();
1149
1150 log::debug!("Total messages in thread: {}", self.messages.len());
1151 self.run_turn(cx)
1152 }
1153
1154 #[cfg(feature = "eval")]
1155 pub fn proceed(
1156 &mut self,
1157 cx: &mut Context<Self>,
1158 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1159 self.run_turn(cx)
1160 }
1161
1162 fn run_turn(
1163 &mut self,
1164 cx: &mut Context<Self>,
1165 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1166 self.cancel(cx);
1167
1168 let model = self.model.clone().context("No language model configured")?;
1169 let profile = AgentSettings::get_global(cx)
1170 .profiles
1171 .get(&self.profile_id)
1172 .context("Profile not found")?;
1173 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1174 let event_stream = ThreadEventStream(events_tx);
1175 let message_ix = self.messages.len().saturating_sub(1);
1176 self.tool_use_limit_reached = false;
1177 self.clear_summary();
1178 self.running_turn = Some(RunningTurn {
1179 event_stream: event_stream.clone(),
1180 tools: self.enabled_tools(profile, &model, cx),
1181 _task: cx.spawn(async move |this, cx| {
1182 log::debug!("Starting agent turn execution");
1183
1184 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1185 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1186
1187 match turn_result {
1188 Ok(()) => {
1189 log::debug!("Turn execution completed");
1190 event_stream.send_stop(acp::StopReason::EndTurn);
1191 }
1192 Err(error) => {
1193 log::error!("Turn execution failed: {:?}", error);
1194 match error.downcast::<CompletionError>() {
1195 Ok(CompletionError::Refusal) => {
1196 event_stream.send_stop(acp::StopReason::Refusal);
1197 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1198 }
1199 Ok(CompletionError::MaxTokens) => {
1200 event_stream.send_stop(acp::StopReason::MaxTokens);
1201 }
1202 Ok(CompletionError::Other(error)) | Err(error) => {
1203 event_stream.send_error(error);
1204 }
1205 }
1206 }
1207 }
1208
1209 _ = this.update(cx, |this, _| this.running_turn.take());
1210 }),
1211 });
1212 Ok(events_rx)
1213 }
1214
1215 async fn run_turn_internal(
1216 this: &WeakEntity<Self>,
1217 model: Arc<dyn LanguageModel>,
1218 event_stream: &ThreadEventStream,
1219 cx: &mut AsyncApp,
1220 ) -> Result<()> {
1221 let mut attempt = 0;
1222 let mut intent = CompletionIntent::UserPrompt;
1223 loop {
1224 let request =
1225 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1226
1227 telemetry::event!(
1228 "Agent Thread Completion",
1229 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1230 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1231 model = model.telemetry_id(),
1232 model_provider = model.provider_id().to_string(),
1233 attempt
1234 );
1235
1236 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1237
1238 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1239 Ok(events) => (events, None),
1240 Err(err) => (stream::empty().boxed(), Some(err)),
1241 };
1242 let mut tool_results = FuturesUnordered::new();
1243 while let Some(event) = events.next().await {
1244 log::trace!("Received completion event: {:?}", event);
1245 match event {
1246 Ok(event) => {
1247 tool_results.extend(this.update(cx, |this, cx| {
1248 this.handle_completion_event(event, event_stream, cx)
1249 })??);
1250 }
1251 Err(err) => {
1252 error = Some(err);
1253 break;
1254 }
1255 }
1256 }
1257
1258 let end_turn = tool_results.is_empty();
1259 while let Some(tool_result) = tool_results.next().await {
1260 log::debug!("Tool finished {:?}", tool_result);
1261
1262 let mut fields = acp::ToolCallUpdateFields::new().status(if tool_result.is_error {
1263 acp::ToolCallStatus::Failed
1264 } else {
1265 acp::ToolCallStatus::Completed
1266 });
1267 if let Some(output) = &tool_result.output {
1268 fields = fields.raw_output(output.clone());
1269 }
1270 event_stream.update_tool_call_fields(&tool_result.tool_use_id, fields);
1271 this.update(cx, |this, _cx| {
1272 this.pending_message()
1273 .tool_results
1274 .insert(tool_result.tool_use_id.clone(), tool_result);
1275 })?;
1276 }
1277
1278 this.update(cx, |this, cx| {
1279 this.flush_pending_message(cx);
1280 if this.title.is_none() && this.pending_title_generation.is_none() {
1281 this.generate_title(cx);
1282 }
1283 })?;
1284
1285 if let Some(error) = error {
1286 attempt += 1;
1287 let retry = this.update(cx, |this, cx| {
1288 let user_store = this.user_store.read(cx);
1289 this.handle_completion_error(error, attempt, user_store.plan())
1290 })??;
1291 let timer = cx.background_executor().timer(retry.duration);
1292 event_stream.send_retry(retry);
1293 timer.await;
1294 this.update(cx, |this, _cx| {
1295 if let Some(Message::Agent(message)) = this.messages.last() {
1296 if message.tool_results.is_empty() {
1297 intent = CompletionIntent::UserPrompt;
1298 this.messages.push(Message::Resume);
1299 }
1300 }
1301 })?;
1302 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1303 return Err(language_model::ToolUseLimitReachedError.into());
1304 } else if end_turn {
1305 return Ok(());
1306 } else {
1307 intent = CompletionIntent::ToolResults;
1308 attempt = 0;
1309 }
1310 }
1311 }
1312
1313 fn handle_completion_error(
1314 &mut self,
1315 error: LanguageModelCompletionError,
1316 attempt: u8,
1317 plan: Option<Plan>,
1318 ) -> Result<acp_thread::RetryStatus> {
1319 let Some(model) = self.model.as_ref() else {
1320 return Err(anyhow!(error));
1321 };
1322
1323 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1324 match plan {
1325 Some(Plan::V2(_)) => true,
1326 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1327 None => false,
1328 }
1329 } else {
1330 true
1331 };
1332
1333 if !auto_retry {
1334 return Err(anyhow!(error));
1335 }
1336
1337 let Some(strategy) = Self::retry_strategy_for(&error) else {
1338 return Err(anyhow!(error));
1339 };
1340
1341 let max_attempts = match &strategy {
1342 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1343 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1344 };
1345
1346 if attempt > max_attempts {
1347 return Err(anyhow!(error));
1348 }
1349
1350 let delay = match &strategy {
1351 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1352 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1353 Duration::from_secs(delay_secs)
1354 }
1355 RetryStrategy::Fixed { delay, .. } => *delay,
1356 };
1357 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1358
1359 Ok(acp_thread::RetryStatus {
1360 last_error: error.to_string().into(),
1361 attempt: attempt as usize,
1362 max_attempts: max_attempts as usize,
1363 started_at: Instant::now(),
1364 duration: delay,
1365 })
1366 }
1367
1368 /// A helper method that's called on every streamed completion event.
1369 /// Returns an optional tool result task, which the main agentic loop will
1370 /// send back to the model when it resolves.
1371 fn handle_completion_event(
1372 &mut self,
1373 event: LanguageModelCompletionEvent,
1374 event_stream: &ThreadEventStream,
1375 cx: &mut Context<Self>,
1376 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1377 log::trace!("Handling streamed completion event: {:?}", event);
1378 use LanguageModelCompletionEvent::*;
1379
1380 match event {
1381 StartMessage { .. } => {
1382 self.flush_pending_message(cx);
1383 self.pending_message = Some(AgentMessage::default());
1384 }
1385 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1386 Thinking { text, signature } => {
1387 self.handle_thinking_event(text, signature, event_stream, cx)
1388 }
1389 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1390 ReasoningDetails(details) => {
1391 let last_message = self.pending_message();
1392 // Store the last non-empty reasoning_details (overwrites earlier ones)
1393 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1394 if let serde_json::Value::Array(ref arr) = details {
1395 if !arr.is_empty() {
1396 last_message.reasoning_details = Some(details);
1397 }
1398 } else {
1399 last_message.reasoning_details = Some(details);
1400 }
1401 }
1402 ToolUse(tool_use) => {
1403 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1404 }
1405 ToolUseJsonParseError {
1406 id,
1407 tool_name,
1408 raw_input,
1409 json_parse_error,
1410 } => {
1411 return Ok(Some(Task::ready(
1412 self.handle_tool_use_json_parse_error_event(
1413 id,
1414 tool_name,
1415 raw_input,
1416 json_parse_error,
1417 ),
1418 )));
1419 }
1420 UsageUpdate(usage) => {
1421 telemetry::event!(
1422 "Agent Thread Completion Usage Updated",
1423 thread_id = self.id.to_string(),
1424 prompt_id = self.prompt_id.to_string(),
1425 model = self.model.as_ref().map(|m| m.telemetry_id()),
1426 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1427 input_tokens = usage.input_tokens,
1428 output_tokens = usage.output_tokens,
1429 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1430 cache_read_input_tokens = usage.cache_read_input_tokens,
1431 );
1432 self.update_token_usage(usage, cx);
1433 }
1434 UsageUpdated { amount, limit } => {
1435 self.update_model_request_usage(amount, limit, cx);
1436 }
1437 ToolUseLimitReached => {
1438 self.tool_use_limit_reached = true;
1439 }
1440 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1441 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1442 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1443 Started | Queued { .. } => {}
1444 }
1445
1446 Ok(None)
1447 }
1448
1449 fn handle_text_event(
1450 &mut self,
1451 new_text: String,
1452 event_stream: &ThreadEventStream,
1453 cx: &mut Context<Self>,
1454 ) {
1455 event_stream.send_text(&new_text);
1456
1457 let last_message = self.pending_message();
1458 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1459 text.push_str(&new_text);
1460 } else {
1461 last_message
1462 .content
1463 .push(AgentMessageContent::Text(new_text));
1464 }
1465
1466 cx.notify();
1467 }
1468
1469 fn handle_thinking_event(
1470 &mut self,
1471 new_text: String,
1472 new_signature: Option<String>,
1473 event_stream: &ThreadEventStream,
1474 cx: &mut Context<Self>,
1475 ) {
1476 event_stream.send_thinking(&new_text);
1477
1478 let last_message = self.pending_message();
1479 if let Some(AgentMessageContent::Thinking { text, signature }) =
1480 last_message.content.last_mut()
1481 {
1482 text.push_str(&new_text);
1483 *signature = new_signature.or(signature.take());
1484 } else {
1485 last_message.content.push(AgentMessageContent::Thinking {
1486 text: new_text,
1487 signature: new_signature,
1488 });
1489 }
1490
1491 cx.notify();
1492 }
1493
1494 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1495 let last_message = self.pending_message();
1496 last_message
1497 .content
1498 .push(AgentMessageContent::RedactedThinking(data));
1499 cx.notify();
1500 }
1501
1502 fn handle_tool_use_event(
1503 &mut self,
1504 tool_use: LanguageModelToolUse,
1505 event_stream: &ThreadEventStream,
1506 cx: &mut Context<Self>,
1507 ) -> Option<Task<LanguageModelToolResult>> {
1508 cx.notify();
1509
1510 let tool = self.tool(tool_use.name.as_ref());
1511 let mut title = SharedString::from(&tool_use.name);
1512 let mut kind = acp::ToolKind::Other;
1513 if let Some(tool) = tool.as_ref() {
1514 title = tool.initial_title(tool_use.input.clone(), cx);
1515 kind = tool.kind();
1516 }
1517
1518 // Ensure the last message ends in the current tool use
1519 let last_message = self.pending_message();
1520 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1521 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1522 if last_tool_use.id == tool_use.id {
1523 *last_tool_use = tool_use.clone();
1524 false
1525 } else {
1526 true
1527 }
1528 } else {
1529 true
1530 }
1531 });
1532
1533 if push_new_tool_use {
1534 event_stream.send_tool_call(
1535 &tool_use.id,
1536 &tool_use.name,
1537 title,
1538 kind,
1539 tool_use.input.clone(),
1540 );
1541 last_message
1542 .content
1543 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1544 } else {
1545 event_stream.update_tool_call_fields(
1546 &tool_use.id,
1547 acp::ToolCallUpdateFields::new()
1548 .title(title)
1549 .kind(kind)
1550 .raw_input(tool_use.input.clone()),
1551 );
1552 }
1553
1554 if !tool_use.is_input_complete {
1555 return None;
1556 }
1557
1558 let Some(tool) = tool else {
1559 let content = format!("No tool named {} exists", tool_use.name);
1560 return Some(Task::ready(LanguageModelToolResult {
1561 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1562 tool_use_id: tool_use.id,
1563 tool_name: tool_use.name,
1564 is_error: true,
1565 output: None,
1566 }));
1567 };
1568
1569 let fs = self.project.read(cx).fs().clone();
1570 let tool_event_stream =
1571 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1572 tool_event_stream.update_fields(
1573 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1574 );
1575 let supports_images = self.model().is_some_and(|model| model.supports_images());
1576 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1577 log::debug!("Running tool {}", tool_use.name);
1578 Some(cx.foreground_executor().spawn(async move {
1579 let tool_result = tool_result.await.and_then(|output| {
1580 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1581 && !supports_images
1582 {
1583 return Err(anyhow!(
1584 "Attempted to read an image, but this model doesn't support it.",
1585 ));
1586 }
1587 Ok(output)
1588 });
1589
1590 match tool_result {
1591 Ok(output) => LanguageModelToolResult {
1592 tool_use_id: tool_use.id,
1593 tool_name: tool_use.name,
1594 is_error: false,
1595 content: output.llm_output,
1596 output: Some(output.raw_output),
1597 },
1598 Err(error) => LanguageModelToolResult {
1599 tool_use_id: tool_use.id,
1600 tool_name: tool_use.name,
1601 is_error: true,
1602 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1603 output: Some(error.to_string().into()),
1604 },
1605 }
1606 }))
1607 }
1608
1609 fn handle_tool_use_json_parse_error_event(
1610 &mut self,
1611 tool_use_id: LanguageModelToolUseId,
1612 tool_name: Arc<str>,
1613 raw_input: Arc<str>,
1614 json_parse_error: String,
1615 ) -> LanguageModelToolResult {
1616 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1617 LanguageModelToolResult {
1618 tool_use_id,
1619 tool_name,
1620 is_error: true,
1621 content: LanguageModelToolResultContent::Text(tool_output.into()),
1622 output: Some(serde_json::Value::String(raw_input.to_string())),
1623 }
1624 }
1625
1626 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1627 self.project
1628 .read(cx)
1629 .user_store()
1630 .update(cx, |user_store, cx| {
1631 user_store.update_model_request_usage(
1632 ModelRequestUsage(RequestUsage {
1633 amount: amount as i32,
1634 limit,
1635 }),
1636 cx,
1637 )
1638 });
1639 }
1640
1641 pub fn title(&self) -> SharedString {
1642 self.title.clone().unwrap_or("New Thread".into())
1643 }
1644
1645 pub fn is_generating_summary(&self) -> bool {
1646 self.pending_summary_generation.is_some()
1647 }
1648
1649 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1650 if let Some(summary) = self.summary.as_ref() {
1651 return Task::ready(Some(summary.clone())).shared();
1652 }
1653 if let Some(task) = self.pending_summary_generation.clone() {
1654 return task;
1655 }
1656 let Some(model) = self.summarization_model.clone() else {
1657 log::error!("No summarization model available");
1658 return Task::ready(None).shared();
1659 };
1660 let mut request = LanguageModelRequest {
1661 intent: Some(CompletionIntent::ThreadContextSummarization),
1662 temperature: AgentSettings::temperature_for_model(&model, cx),
1663 ..Default::default()
1664 };
1665
1666 for message in &self.messages {
1667 request.messages.extend(message.to_request());
1668 }
1669
1670 request.messages.push(LanguageModelRequestMessage {
1671 role: Role::User,
1672 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1673 cache: false,
1674 reasoning_details: None,
1675 });
1676
1677 let task = cx
1678 .spawn(async move |this, cx| {
1679 let mut summary = String::new();
1680 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1681 while let Some(event) = messages.next().await {
1682 let event = event.log_err()?;
1683 let text = match event {
1684 LanguageModelCompletionEvent::Text(text) => text,
1685 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1686 this.update(cx, |thread, cx| {
1687 thread.update_model_request_usage(amount, limit, cx);
1688 })
1689 .ok()?;
1690 continue;
1691 }
1692 _ => continue,
1693 };
1694
1695 let mut lines = text.lines();
1696 summary.extend(lines.next());
1697 }
1698
1699 log::debug!("Setting summary: {}", summary);
1700 let summary = SharedString::from(summary);
1701
1702 this.update(cx, |this, cx| {
1703 this.summary = Some(summary.clone());
1704 this.pending_summary_generation = None;
1705 cx.notify()
1706 })
1707 .ok()?;
1708
1709 Some(summary)
1710 })
1711 .shared();
1712 self.pending_summary_generation = Some(task.clone());
1713 task
1714 }
1715
1716 fn generate_title(&mut self, cx: &mut Context<Self>) {
1717 let Some(model) = self.summarization_model.clone() else {
1718 return;
1719 };
1720
1721 log::debug!(
1722 "Generating title with model: {:?}",
1723 self.summarization_model.as_ref().map(|model| model.name())
1724 );
1725 let mut request = LanguageModelRequest {
1726 intent: Some(CompletionIntent::ThreadSummarization),
1727 temperature: AgentSettings::temperature_for_model(&model, cx),
1728 ..Default::default()
1729 };
1730
1731 for message in &self.messages {
1732 request.messages.extend(message.to_request());
1733 }
1734
1735 request.messages.push(LanguageModelRequestMessage {
1736 role: Role::User,
1737 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1738 cache: false,
1739 reasoning_details: None,
1740 });
1741 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1742 let mut title = String::new();
1743
1744 let generate = async {
1745 let mut messages = model.stream_completion(request, cx).await?;
1746 while let Some(event) = messages.next().await {
1747 let event = event?;
1748 let text = match event {
1749 LanguageModelCompletionEvent::Text(text) => text,
1750 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1751 this.update(cx, |thread, cx| {
1752 thread.update_model_request_usage(amount, limit, cx);
1753 })?;
1754 continue;
1755 }
1756 _ => continue,
1757 };
1758
1759 let mut lines = text.lines();
1760 title.extend(lines.next());
1761
1762 // Stop if the LLM generated multiple lines.
1763 if lines.next().is_some() {
1764 break;
1765 }
1766 }
1767 anyhow::Ok(())
1768 };
1769
1770 if generate.await.context("failed to generate title").is_ok() {
1771 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1772 }
1773 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1774 }));
1775 }
1776
1777 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1778 self.pending_title_generation = None;
1779 if Some(&title) != self.title.as_ref() {
1780 self.title = Some(title);
1781 cx.emit(TitleUpdated);
1782 cx.notify();
1783 }
1784 }
1785
1786 fn clear_summary(&mut self) {
1787 self.summary = None;
1788 self.pending_summary_generation = None;
1789 }
1790
1791 fn last_user_message(&self) -> Option<&UserMessage> {
1792 self.messages
1793 .iter()
1794 .rev()
1795 .find_map(|message| match message {
1796 Message::User(user_message) => Some(user_message),
1797 Message::Agent(_) => None,
1798 Message::Resume => None,
1799 })
1800 }
1801
1802 fn pending_message(&mut self) -> &mut AgentMessage {
1803 self.pending_message.get_or_insert_default()
1804 }
1805
1806 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1807 let Some(mut message) = self.pending_message.take() else {
1808 return;
1809 };
1810
1811 if message.content.is_empty() {
1812 return;
1813 }
1814
1815 for content in &message.content {
1816 let AgentMessageContent::ToolUse(tool_use) = content else {
1817 continue;
1818 };
1819
1820 if !message.tool_results.contains_key(&tool_use.id) {
1821 message.tool_results.insert(
1822 tool_use.id.clone(),
1823 LanguageModelToolResult {
1824 tool_use_id: tool_use.id.clone(),
1825 tool_name: tool_use.name.clone(),
1826 is_error: true,
1827 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1828 output: None,
1829 },
1830 );
1831 }
1832 }
1833
1834 self.messages.push(Message::Agent(message));
1835 self.updated_at = Utc::now();
1836 self.clear_summary();
1837 cx.notify()
1838 }
1839
1840 pub(crate) fn build_completion_request(
1841 &self,
1842 completion_intent: CompletionIntent,
1843 cx: &App,
1844 ) -> Result<LanguageModelRequest> {
1845 let model = self.model().context("No language model configured")?;
1846 let tools = if let Some(turn) = self.running_turn.as_ref() {
1847 turn.tools
1848 .iter()
1849 .filter_map(|(tool_name, tool)| {
1850 log::trace!("Including tool: {}", tool_name);
1851 Some(LanguageModelRequestTool {
1852 name: tool_name.to_string(),
1853 description: tool.description().to_string(),
1854 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1855 })
1856 })
1857 .collect::<Vec<_>>()
1858 } else {
1859 Vec::new()
1860 };
1861
1862 log::debug!("Building completion request");
1863 log::debug!("Completion intent: {:?}", completion_intent);
1864 log::debug!("Completion mode: {:?}", self.completion_mode);
1865
1866 let available_tools: Vec<_> = self
1867 .running_turn
1868 .as_ref()
1869 .map(|turn| turn.tools.keys().cloned().collect())
1870 .unwrap_or_default();
1871
1872 log::debug!("Request includes {} tools", available_tools.len());
1873 let messages = self.build_request_messages(available_tools, cx);
1874 log::debug!("Request will include {} messages", messages.len());
1875
1876 let request = LanguageModelRequest {
1877 thread_id: Some(self.id.to_string()),
1878 prompt_id: Some(self.prompt_id.to_string()),
1879 intent: Some(completion_intent),
1880 mode: Some(self.completion_mode.into()),
1881 messages,
1882 tools,
1883 tool_choice: None,
1884 stop: Vec::new(),
1885 temperature: AgentSettings::temperature_for_model(model, cx),
1886 thinking_allowed: true,
1887 };
1888
1889 log::debug!("Completion request built successfully");
1890 Ok(request)
1891 }
1892
1893 fn enabled_tools(
1894 &self,
1895 profile: &AgentProfileSettings,
1896 model: &Arc<dyn LanguageModel>,
1897 cx: &App,
1898 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1899 fn truncate(tool_name: &SharedString) -> SharedString {
1900 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1901 let mut truncated = tool_name.to_string();
1902 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1903 truncated.into()
1904 } else {
1905 tool_name.clone()
1906 }
1907 }
1908
1909 let mut tools = self
1910 .tools
1911 .iter()
1912 .filter_map(|(tool_name, tool)| {
1913 if tool.supports_provider(&model.provider_id())
1914 && profile.is_tool_enabled(tool_name)
1915 {
1916 Some((truncate(tool_name), tool.clone()))
1917 } else {
1918 None
1919 }
1920 })
1921 .collect::<BTreeMap<_, _>>();
1922
1923 let mut context_server_tools = Vec::new();
1924 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1925 let mut duplicate_tool_names = HashSet::default();
1926 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1927 for (tool_name, tool) in server_tools {
1928 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1929 let tool_name = truncate(tool_name);
1930 if !seen_tools.insert(tool_name.clone()) {
1931 duplicate_tool_names.insert(tool_name.clone());
1932 }
1933 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1934 }
1935 }
1936 }
1937
1938 // When there are duplicate tool names, disambiguate by prefixing them
1939 // with the server ID. In the rare case there isn't enough space for the
1940 // disambiguated tool name, keep only the last tool with this name.
1941 for (server_id, tool_name, tool) in context_server_tools {
1942 if duplicate_tool_names.contains(&tool_name) {
1943 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1944 if available >= 2 {
1945 let mut disambiguated = server_id.0.to_string();
1946 disambiguated.truncate(available - 1);
1947 disambiguated.push('_');
1948 disambiguated.push_str(&tool_name);
1949 tools.insert(disambiguated.into(), tool.clone());
1950 } else {
1951 tools.insert(tool_name, tool.clone());
1952 }
1953 } else {
1954 tools.insert(tool_name, tool.clone());
1955 }
1956 }
1957
1958 tools
1959 }
1960
1961 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1962 self.running_turn.as_ref()?.tools.get(name).cloned()
1963 }
1964
1965 fn build_request_messages(
1966 &self,
1967 available_tools: Vec<SharedString>,
1968 cx: &App,
1969 ) -> Vec<LanguageModelRequestMessage> {
1970 log::trace!(
1971 "Building request messages from {} thread messages",
1972 self.messages.len()
1973 );
1974
1975 let system_prompt = SystemPromptTemplate {
1976 project: self.project_context.read(cx),
1977 available_tools,
1978 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
1979 }
1980 .render(&self.templates)
1981 .context("failed to build system prompt")
1982 .expect("Invalid template");
1983 let mut messages = vec![LanguageModelRequestMessage {
1984 role: Role::System,
1985 content: vec![system_prompt.into()],
1986 cache: false,
1987 reasoning_details: None,
1988 }];
1989 for message in &self.messages {
1990 messages.extend(message.to_request());
1991 }
1992
1993 if let Some(last_message) = messages.last_mut() {
1994 last_message.cache = true;
1995 }
1996
1997 if let Some(message) = self.pending_message.as_ref() {
1998 messages.extend(message.to_request());
1999 }
2000
2001 messages
2002 }
2003
2004 pub fn to_markdown(&self) -> String {
2005 let mut markdown = String::new();
2006 for (ix, message) in self.messages.iter().enumerate() {
2007 if ix > 0 {
2008 markdown.push('\n');
2009 }
2010 markdown.push_str(&message.to_markdown());
2011 }
2012
2013 if let Some(message) = self.pending_message.as_ref() {
2014 markdown.push('\n');
2015 markdown.push_str(&message.to_markdown());
2016 }
2017
2018 markdown
2019 }
2020
2021 fn advance_prompt_id(&mut self) {
2022 self.prompt_id = PromptId::new();
2023 }
2024
2025 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2026 use LanguageModelCompletionError::*;
2027 use http_client::StatusCode;
2028
2029 // General strategy here:
2030 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2031 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2032 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2033 match error {
2034 HttpResponseError {
2035 status_code: StatusCode::TOO_MANY_REQUESTS,
2036 ..
2037 } => Some(RetryStrategy::ExponentialBackoff {
2038 initial_delay: BASE_RETRY_DELAY,
2039 max_attempts: MAX_RETRY_ATTEMPTS,
2040 }),
2041 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2042 Some(RetryStrategy::Fixed {
2043 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2044 max_attempts: MAX_RETRY_ATTEMPTS,
2045 })
2046 }
2047 UpstreamProviderError {
2048 status,
2049 retry_after,
2050 ..
2051 } => match *status {
2052 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2053 Some(RetryStrategy::Fixed {
2054 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2055 max_attempts: MAX_RETRY_ATTEMPTS,
2056 })
2057 }
2058 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2059 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2060 // Internal Server Error could be anything, retry up to 3 times.
2061 max_attempts: 3,
2062 }),
2063 status => {
2064 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2065 // but we frequently get them in practice. See https://http.dev/529
2066 if status.as_u16() == 529 {
2067 Some(RetryStrategy::Fixed {
2068 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2069 max_attempts: MAX_RETRY_ATTEMPTS,
2070 })
2071 } else {
2072 Some(RetryStrategy::Fixed {
2073 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2074 max_attempts: 2,
2075 })
2076 }
2077 }
2078 },
2079 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2080 delay: BASE_RETRY_DELAY,
2081 max_attempts: 3,
2082 }),
2083 ApiReadResponseError { .. }
2084 | HttpSend { .. }
2085 | DeserializeResponse { .. }
2086 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2087 delay: BASE_RETRY_DELAY,
2088 max_attempts: 3,
2089 }),
2090 // Retrying these errors definitely shouldn't help.
2091 HttpResponseError {
2092 status_code:
2093 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2094 ..
2095 }
2096 | AuthenticationError { .. }
2097 | PermissionError { .. }
2098 | NoApiKey { .. }
2099 | ApiEndpointNotFound { .. }
2100 | PromptTooLarge { .. } => None,
2101 // These errors might be transient, so retry them
2102 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2103 delay: BASE_RETRY_DELAY,
2104 max_attempts: 1,
2105 }),
2106 // Retry all other 4xx and 5xx errors once.
2107 HttpResponseError { status_code, .. }
2108 if status_code.is_client_error() || status_code.is_server_error() =>
2109 {
2110 Some(RetryStrategy::Fixed {
2111 delay: BASE_RETRY_DELAY,
2112 max_attempts: 3,
2113 })
2114 }
2115 Other(err)
2116 if err.is::<language_model::PaymentRequiredError>()
2117 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2118 {
2119 // Retrying won't help for Payment Required or Model Request Limit errors (where
2120 // the user must upgrade to usage-based billing to get more requests, or else wait
2121 // for a significant amount of time for the request limit to reset).
2122 None
2123 }
2124 // Conservatively assume that any other errors are non-retryable
2125 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2126 delay: BASE_RETRY_DELAY,
2127 max_attempts: 2,
2128 }),
2129 }
2130 }
2131}
2132
2133struct RunningTurn {
2134 /// Holds the task that handles agent interaction until the end of the turn.
2135 /// Survives across multiple requests as the model performs tool calls and
2136 /// we run tools, report their results.
2137 _task: Task<()>,
2138 /// The current event stream for the running turn. Used to report a final
2139 /// cancellation event if we cancel the turn.
2140 event_stream: ThreadEventStream,
2141 /// The tools that were enabled for this turn.
2142 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2143}
2144
2145impl RunningTurn {
2146 fn cancel(self) {
2147 log::debug!("Cancelling in progress turn");
2148 self.event_stream.send_canceled();
2149 }
2150}
2151
2152pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2153
2154impl EventEmitter<TokenUsageUpdated> for Thread {}
2155
2156pub struct TitleUpdated;
2157
2158impl EventEmitter<TitleUpdated> for Thread {}
2159
2160pub trait AgentTool
2161where
2162 Self: 'static + Sized,
2163{
2164 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2165 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2166
2167 fn name() -> &'static str;
2168
2169 fn description() -> SharedString {
2170 let schema = schemars::schema_for!(Self::Input);
2171 SharedString::new(
2172 schema
2173 .get("description")
2174 .and_then(|description| description.as_str())
2175 .unwrap_or_default(),
2176 )
2177 }
2178
2179 fn kind() -> acp::ToolKind;
2180
2181 /// The initial tool title to display. Can be updated during the tool run.
2182 fn initial_title(
2183 &self,
2184 input: Result<Self::Input, serde_json::Value>,
2185 cx: &mut App,
2186 ) -> SharedString;
2187
2188 /// Returns the JSON schema that describes the tool's input.
2189 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2190 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2191 }
2192
2193 /// Some tools rely on a provider for the underlying billing or other reasons.
2194 /// Allow the tool to check if they are compatible, or should be filtered out.
2195 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2196 true
2197 }
2198
2199 /// Runs the tool with the provided input.
2200 fn run(
2201 self: Arc<Self>,
2202 input: Self::Input,
2203 event_stream: ToolCallEventStream,
2204 cx: &mut App,
2205 ) -> Task<Result<Self::Output>>;
2206
2207 /// Emits events for a previous execution of the tool.
2208 fn replay(
2209 &self,
2210 _input: Self::Input,
2211 _output: Self::Output,
2212 _event_stream: ToolCallEventStream,
2213 _cx: &mut App,
2214 ) -> Result<()> {
2215 Ok(())
2216 }
2217
2218 fn erase(self) -> Arc<dyn AnyAgentTool> {
2219 Arc::new(Erased(Arc::new(self)))
2220 }
2221}
2222
2223pub struct Erased<T>(T);
2224
2225pub struct AgentToolOutput {
2226 pub llm_output: LanguageModelToolResultContent,
2227 pub raw_output: serde_json::Value,
2228}
2229
2230pub trait AnyAgentTool {
2231 fn name(&self) -> SharedString;
2232 fn description(&self) -> SharedString;
2233 fn kind(&self) -> acp::ToolKind;
2234 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2235 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2236 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2237 true
2238 }
2239 fn run(
2240 self: Arc<Self>,
2241 input: serde_json::Value,
2242 event_stream: ToolCallEventStream,
2243 cx: &mut App,
2244 ) -> Task<Result<AgentToolOutput>>;
2245 fn replay(
2246 &self,
2247 input: serde_json::Value,
2248 output: serde_json::Value,
2249 event_stream: ToolCallEventStream,
2250 cx: &mut App,
2251 ) -> Result<()>;
2252}
2253
2254impl<T> AnyAgentTool for Erased<Arc<T>>
2255where
2256 T: AgentTool,
2257{
2258 fn name(&self) -> SharedString {
2259 T::name().into()
2260 }
2261
2262 fn description(&self) -> SharedString {
2263 T::description()
2264 }
2265
2266 fn kind(&self) -> agent_client_protocol::ToolKind {
2267 T::kind()
2268 }
2269
2270 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2271 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2272 self.0.initial_title(parsed_input, _cx)
2273 }
2274
2275 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2276 let mut json = serde_json::to_value(T::input_schema(format))?;
2277 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2278 Ok(json)
2279 }
2280
2281 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2282 T::supports_provider(provider)
2283 }
2284
2285 fn run(
2286 self: Arc<Self>,
2287 input: serde_json::Value,
2288 event_stream: ToolCallEventStream,
2289 cx: &mut App,
2290 ) -> Task<Result<AgentToolOutput>> {
2291 cx.spawn(async move |cx| {
2292 let input = serde_json::from_value(input)?;
2293 let output = cx
2294 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2295 .await?;
2296 let raw_output = serde_json::to_value(&output)?;
2297 Ok(AgentToolOutput {
2298 llm_output: output.into(),
2299 raw_output,
2300 })
2301 })
2302 }
2303
2304 fn replay(
2305 &self,
2306 input: serde_json::Value,
2307 output: serde_json::Value,
2308 event_stream: ToolCallEventStream,
2309 cx: &mut App,
2310 ) -> Result<()> {
2311 let input = serde_json::from_value(input)?;
2312 let output = serde_json::from_value(output)?;
2313 self.0.replay(input, output, event_stream, cx)
2314 }
2315}
2316
2317#[derive(Clone)]
2318struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2319
2320impl ThreadEventStream {
2321 fn send_user_message(&self, message: &UserMessage) {
2322 self.0
2323 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2324 .ok();
2325 }
2326
2327 fn send_text(&self, text: &str) {
2328 self.0
2329 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2330 .ok();
2331 }
2332
2333 fn send_thinking(&self, text: &str) {
2334 self.0
2335 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2336 .ok();
2337 }
2338
2339 fn send_tool_call(
2340 &self,
2341 id: &LanguageModelToolUseId,
2342 tool_name: &str,
2343 title: SharedString,
2344 kind: acp::ToolKind,
2345 input: serde_json::Value,
2346 ) {
2347 self.0
2348 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2349 id,
2350 tool_name,
2351 title.to_string(),
2352 kind,
2353 input,
2354 ))))
2355 .ok();
2356 }
2357
2358 fn initial_tool_call(
2359 id: &LanguageModelToolUseId,
2360 tool_name: &str,
2361 title: String,
2362 kind: acp::ToolKind,
2363 input: serde_json::Value,
2364 ) -> acp::ToolCall {
2365 acp::ToolCall::new(id.to_string(), title)
2366 .kind(kind)
2367 .raw_input(input)
2368 .meta(acp::Meta::from_iter([(
2369 "tool_name".into(),
2370 tool_name.into(),
2371 )]))
2372 }
2373
2374 fn update_tool_call_fields(
2375 &self,
2376 tool_use_id: &LanguageModelToolUseId,
2377 fields: acp::ToolCallUpdateFields,
2378 ) {
2379 self.0
2380 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2381 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2382 )))
2383 .ok();
2384 }
2385
2386 fn send_retry(&self, status: acp_thread::RetryStatus) {
2387 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2388 }
2389
2390 fn send_stop(&self, reason: acp::StopReason) {
2391 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2392 }
2393
2394 fn send_canceled(&self) {
2395 self.0
2396 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2397 .ok();
2398 }
2399
2400 fn send_error(&self, error: impl Into<anyhow::Error>) {
2401 self.0.unbounded_send(Err(error.into())).ok();
2402 }
2403}
2404
2405#[derive(Clone)]
2406pub struct ToolCallEventStream {
2407 tool_use_id: LanguageModelToolUseId,
2408 stream: ThreadEventStream,
2409 fs: Option<Arc<dyn Fs>>,
2410}
2411
2412impl ToolCallEventStream {
2413 #[cfg(any(test, feature = "test-support"))]
2414 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2415 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2416
2417 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2418
2419 (stream, ToolCallEventStreamReceiver(events_rx))
2420 }
2421
2422 fn new(
2423 tool_use_id: LanguageModelToolUseId,
2424 stream: ThreadEventStream,
2425 fs: Option<Arc<dyn Fs>>,
2426 ) -> Self {
2427 Self {
2428 tool_use_id,
2429 stream,
2430 fs,
2431 }
2432 }
2433
2434 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2435 self.stream
2436 .update_tool_call_fields(&self.tool_use_id, fields);
2437 }
2438
2439 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2440 self.stream
2441 .0
2442 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2443 acp_thread::ToolCallUpdateDiff {
2444 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2445 diff,
2446 }
2447 .into(),
2448 )))
2449 .ok();
2450 }
2451
2452 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2453 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2454 return Task::ready(Ok(()));
2455 }
2456
2457 let (response_tx, response_rx) = oneshot::channel();
2458 self.stream
2459 .0
2460 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2461 ToolCallAuthorization {
2462 tool_call: acp::ToolCallUpdate::new(
2463 self.tool_use_id.to_string(),
2464 acp::ToolCallUpdateFields::new().title(title),
2465 ),
2466 options: vec![
2467 acp::PermissionOption::new(
2468 acp::PermissionOptionId::new("always_allow"),
2469 "Always Allow",
2470 acp::PermissionOptionKind::AllowAlways,
2471 ),
2472 acp::PermissionOption::new(
2473 acp::PermissionOptionId::new("allow"),
2474 "Allow",
2475 acp::PermissionOptionKind::AllowOnce,
2476 ),
2477 acp::PermissionOption::new(
2478 acp::PermissionOptionId::new("deny"),
2479 "Deny",
2480 acp::PermissionOptionKind::RejectOnce,
2481 ),
2482 ],
2483 response: response_tx,
2484 },
2485 )))
2486 .ok();
2487 let fs = self.fs.clone();
2488 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2489 "always_allow" => {
2490 if let Some(fs) = fs.clone() {
2491 cx.update(|cx| {
2492 update_settings_file(fs, cx, |settings, _| {
2493 settings
2494 .agent
2495 .get_or_insert_default()
2496 .set_always_allow_tool_actions(true);
2497 });
2498 })?;
2499 }
2500
2501 Ok(())
2502 }
2503 "allow" => Ok(()),
2504 _ => Err(anyhow!("Permission to run tool denied by user")),
2505 })
2506 }
2507}
2508
2509#[cfg(any(test, feature = "test-support"))]
2510pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2511
2512#[cfg(any(test, feature = "test-support"))]
2513impl ToolCallEventStreamReceiver {
2514 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2515 let event = self.0.next().await;
2516 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2517 auth
2518 } else {
2519 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2520 }
2521 }
2522
2523 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2524 let event = self.0.next().await;
2525 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2526 update,
2527 )))) = event
2528 {
2529 update.fields
2530 } else {
2531 panic!("Expected update fields but got: {:?}", event);
2532 }
2533 }
2534
2535 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2536 let event = self.0.next().await;
2537 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2538 update,
2539 )))) = event
2540 {
2541 update.diff
2542 } else {
2543 panic!("Expected diff but got: {:?}", event);
2544 }
2545 }
2546
2547 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2548 let event = self.0.next().await;
2549 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2550 update,
2551 )))) = event
2552 {
2553 update.terminal
2554 } else {
2555 panic!("Expected terminal but got: {:?}", event);
2556 }
2557 }
2558}
2559
2560#[cfg(any(test, feature = "test-support"))]
2561impl std::ops::Deref for ToolCallEventStreamReceiver {
2562 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2563
2564 fn deref(&self) -> &Self::Target {
2565 &self.0
2566 }
2567}
2568
2569#[cfg(any(test, feature = "test-support"))]
2570impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2571 fn deref_mut(&mut self) -> &mut Self::Target {
2572 &mut self.0
2573 }
2574}
2575
2576impl From<&str> for UserMessageContent {
2577 fn from(text: &str) -> Self {
2578 Self::Text(text.into())
2579 }
2580}
2581
2582impl UserMessageContent {
2583 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
2584 match value {
2585 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2586 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2587 acp::ContentBlock::Audio(_) => {
2588 // TODO
2589 Self::Text("[audio]".to_string())
2590 }
2591 acp::ContentBlock::ResourceLink(resource_link) => {
2592 match MentionUri::parse(&resource_link.uri, path_style) {
2593 Ok(uri) => Self::Mention {
2594 uri,
2595 content: String::new(),
2596 },
2597 Err(err) => {
2598 log::error!("Failed to parse mention link: {}", err);
2599 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2600 }
2601 }
2602 }
2603 acp::ContentBlock::Resource(resource) => match resource.resource {
2604 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2605 match MentionUri::parse(&resource.uri, path_style) {
2606 Ok(uri) => Self::Mention {
2607 uri,
2608 content: resource.text,
2609 },
2610 Err(err) => {
2611 log::error!("Failed to parse mention link: {}", err);
2612 Self::Text(
2613 MarkdownCodeBlock {
2614 tag: &resource.uri,
2615 text: &resource.text,
2616 }
2617 .to_string(),
2618 )
2619 }
2620 }
2621 }
2622 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2623 // TODO
2624 Self::Text("[blob]".to_string())
2625 }
2626 other => {
2627 log::warn!("Unexpected content type: {:?}", other);
2628 Self::Text("[unknown]".to_string())
2629 }
2630 },
2631 other => {
2632 log::warn!("Unexpected content type: {:?}", other);
2633 Self::Text("[unknown]".to_string())
2634 }
2635 }
2636 }
2637}
2638
2639impl From<UserMessageContent> for acp::ContentBlock {
2640 fn from(content: UserMessageContent) -> Self {
2641 match content {
2642 UserMessageContent::Text(text) => text.into(),
2643 UserMessageContent::Image(image) => {
2644 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
2645 }
2646 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
2647 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
2648 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
2649 )),
2650 ),
2651 }
2652 }
2653}
2654
2655fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2656 LanguageModelImage {
2657 source: image_content.data.into(),
2658 // TODO: make this optional?
2659 size: gpui::Size::new(0.into(), 0.into()),
2660 }
2661}