1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 RestoreFileFromDiskTool, SaveFileTool, SubagentTool, SystemPromptTemplate, Template, Templates,
6 TerminalTool, ThinkingTool, WebSearchTool,
7};
8use acp_thread::{MentionUri, UserMessageId};
9use action_log::ActionLog;
10use feature_flags::{FeatureFlagAppExt as _, SubagentsFeatureFlag};
11
12use agent_client_protocol as acp;
13use agent_settings::{
14 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
15 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
16};
17use anyhow::{Context as _, Result, anyhow};
18use chrono::{DateTime, Utc};
19use client::{ModelRequestUsage, RequestUsage, UserStore};
20use cloud_llm_client::{CompletionIntent, Plan, UsageLimit};
21use collections::{HashMap, HashSet, IndexMap};
22use fs::Fs;
23use futures::stream;
24use futures::{
25 FutureExt,
26 channel::{mpsc, oneshot},
27 future::Shared,
28 stream::FuturesUnordered,
29};
30use gpui::{
31 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
32};
33use language_model::{
34 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
35 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
36 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
37 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
38 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
39 ZED_CLOUD_PROVIDER_ID,
40};
41use project::Project;
42use prompt_store::ProjectContext;
43use schemars::{JsonSchema, Schema};
44use serde::{Deserialize, Serialize};
45use settings::{LanguageModelSelection, Settings, update_settings_file};
46use smol::stream::StreamExt;
47use std::{
48 collections::BTreeMap,
49 ops::RangeInclusive,
50 path::Path,
51 rc::Rc,
52 sync::Arc,
53 time::{Duration, Instant},
54};
55use std::{fmt::Write, path::PathBuf};
56use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
57use uuid::Uuid;
58
59const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
60pub const MAX_TOOL_NAME_LENGTH: usize = 64;
61
62/// The ID of the user prompt that initiated a request.
63///
64/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
65#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
66pub struct PromptId(Arc<str>);
67
68impl PromptId {
69 pub fn new() -> Self {
70 Self(Uuid::new_v4().to_string().into())
71 }
72}
73
74impl std::fmt::Display for PromptId {
75 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
76 write!(f, "{}", self.0)
77 }
78}
79
80pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
81pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
82
83#[derive(Debug, Clone)]
84enum RetryStrategy {
85 ExponentialBackoff {
86 initial_delay: Duration,
87 max_attempts: u8,
88 },
89 Fixed {
90 delay: Duration,
91 max_attempts: u8,
92 },
93}
94
95#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
96pub enum Message {
97 User(UserMessage),
98 Agent(AgentMessage),
99 Resume,
100}
101
102impl Message {
103 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
104 match self {
105 Message::Agent(agent_message) => Some(agent_message),
106 _ => None,
107 }
108 }
109
110 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
111 match self {
112 Message::User(message) => {
113 if message.content.is_empty() {
114 vec![]
115 } else {
116 vec![message.to_request()]
117 }
118 }
119 Message::Agent(message) => message.to_request(),
120 Message::Resume => vec![LanguageModelRequestMessage {
121 role: Role::User,
122 content: vec!["Continue where you left off".into()],
123 cache: false,
124 reasoning_details: None,
125 }],
126 }
127 }
128
129 pub fn to_markdown(&self) -> String {
130 match self {
131 Message::User(message) => message.to_markdown(),
132 Message::Agent(message) => message.to_markdown(),
133 Message::Resume => "[resume]\n".into(),
134 }
135 }
136
137 pub fn role(&self) -> Role {
138 match self {
139 Message::User(_) | Message::Resume => Role::User,
140 Message::Agent(_) => Role::Assistant,
141 }
142 }
143}
144
145#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
146pub struct UserMessage {
147 pub id: UserMessageId,
148 pub content: Vec<UserMessageContent>,
149}
150
151#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
152pub enum UserMessageContent {
153 Text(String),
154 Mention { uri: MentionUri, content: String },
155 Image(LanguageModelImage),
156}
157
158impl UserMessage {
159 pub fn to_markdown(&self) -> String {
160 let mut markdown = String::from("## User\n\n");
161
162 for content in &self.content {
163 match content {
164 UserMessageContent::Text(text) => {
165 markdown.push_str(text);
166 markdown.push('\n');
167 }
168 UserMessageContent::Image(_) => {
169 markdown.push_str("<image />\n");
170 }
171 UserMessageContent::Mention { uri, content } => {
172 if !content.is_empty() {
173 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
174 } else {
175 let _ = writeln!(&mut markdown, "{}", uri.as_link());
176 }
177 }
178 }
179 }
180
181 markdown
182 }
183
184 fn to_request(&self) -> LanguageModelRequestMessage {
185 let mut message = LanguageModelRequestMessage {
186 role: Role::User,
187 content: Vec::with_capacity(self.content.len()),
188 cache: false,
189 reasoning_details: None,
190 };
191
192 const OPEN_CONTEXT: &str = "<context>\n\
193 The following items were attached by the user. \
194 They are up-to-date and don't need to be re-read.\n\n";
195
196 const OPEN_FILES_TAG: &str = "<files>";
197 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
198 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
199 const OPEN_SELECTIONS_TAG: &str = "<selections>";
200 const OPEN_THREADS_TAG: &str = "<threads>";
201 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
202 const OPEN_RULES_TAG: &str =
203 "<rules>\nThe user has specified the following rules that should be applied:\n";
204
205 let mut file_context = OPEN_FILES_TAG.to_string();
206 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
207 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
208 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
209 let mut thread_context = OPEN_THREADS_TAG.to_string();
210 let mut fetch_context = OPEN_FETCH_TAG.to_string();
211 let mut rules_context = OPEN_RULES_TAG.to_string();
212
213 for chunk in &self.content {
214 let chunk = match chunk {
215 UserMessageContent::Text(text) => {
216 language_model::MessageContent::Text(text.clone())
217 }
218 UserMessageContent::Image(value) => {
219 language_model::MessageContent::Image(value.clone())
220 }
221 UserMessageContent::Mention { uri, content } => {
222 match uri {
223 MentionUri::File { abs_path } => {
224 write!(
225 &mut file_context,
226 "\n{}",
227 MarkdownCodeBlock {
228 tag: &codeblock_tag(abs_path, None),
229 text: &content.to_string(),
230 }
231 )
232 .ok();
233 }
234 MentionUri::PastedImage => {
235 debug_panic!("pasted image URI should not be used in mention content")
236 }
237 MentionUri::Directory { .. } => {
238 write!(&mut directory_context, "\n{}\n", content).ok();
239 }
240 MentionUri::Symbol {
241 abs_path: path,
242 line_range,
243 ..
244 } => {
245 write!(
246 &mut symbol_context,
247 "\n{}",
248 MarkdownCodeBlock {
249 tag: &codeblock_tag(path, Some(line_range)),
250 text: content
251 }
252 )
253 .ok();
254 }
255 MentionUri::Selection {
256 abs_path: path,
257 line_range,
258 ..
259 } => {
260 write!(
261 &mut selection_context,
262 "\n{}",
263 MarkdownCodeBlock {
264 tag: &codeblock_tag(
265 path.as_deref().unwrap_or("Untitled".as_ref()),
266 Some(line_range)
267 ),
268 text: content
269 }
270 )
271 .ok();
272 }
273 MentionUri::Thread { .. } => {
274 write!(&mut thread_context, "\n{}\n", content).ok();
275 }
276 MentionUri::TextThread { .. } => {
277 write!(&mut thread_context, "\n{}\n", content).ok();
278 }
279 MentionUri::Rule { .. } => {
280 write!(
281 &mut rules_context,
282 "\n{}",
283 MarkdownCodeBlock {
284 tag: "",
285 text: content
286 }
287 )
288 .ok();
289 }
290 MentionUri::Fetch { url } => {
291 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
292 }
293 }
294
295 language_model::MessageContent::Text(uri.as_link().to_string())
296 }
297 };
298
299 message.content.push(chunk);
300 }
301
302 let len_before_context = message.content.len();
303
304 if file_context.len() > OPEN_FILES_TAG.len() {
305 file_context.push_str("</files>\n");
306 message
307 .content
308 .push(language_model::MessageContent::Text(file_context));
309 }
310
311 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
312 directory_context.push_str("</directories>\n");
313 message
314 .content
315 .push(language_model::MessageContent::Text(directory_context));
316 }
317
318 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
319 symbol_context.push_str("</symbols>\n");
320 message
321 .content
322 .push(language_model::MessageContent::Text(symbol_context));
323 }
324
325 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
326 selection_context.push_str("</selections>\n");
327 message
328 .content
329 .push(language_model::MessageContent::Text(selection_context));
330 }
331
332 if thread_context.len() > OPEN_THREADS_TAG.len() {
333 thread_context.push_str("</threads>\n");
334 message
335 .content
336 .push(language_model::MessageContent::Text(thread_context));
337 }
338
339 if fetch_context.len() > OPEN_FETCH_TAG.len() {
340 fetch_context.push_str("</fetched_urls>\n");
341 message
342 .content
343 .push(language_model::MessageContent::Text(fetch_context));
344 }
345
346 if rules_context.len() > OPEN_RULES_TAG.len() {
347 rules_context.push_str("</user_rules>\n");
348 message
349 .content
350 .push(language_model::MessageContent::Text(rules_context));
351 }
352
353 if message.content.len() > len_before_context {
354 message.content.insert(
355 len_before_context,
356 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
357 );
358 message
359 .content
360 .push(language_model::MessageContent::Text("</context>".into()));
361 }
362
363 message
364 }
365}
366
367fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
368 let mut result = String::new();
369
370 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
371 let _ = write!(result, "{} ", extension);
372 }
373
374 let _ = write!(result, "{}", full_path.display());
375
376 if let Some(range) = line_range {
377 if range.start() == range.end() {
378 let _ = write!(result, ":{}", range.start() + 1);
379 } else {
380 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
381 }
382 }
383
384 result
385}
386
387impl AgentMessage {
388 pub fn to_markdown(&self) -> String {
389 let mut markdown = String::from("## Assistant\n\n");
390
391 for content in &self.content {
392 match content {
393 AgentMessageContent::Text(text) => {
394 markdown.push_str(text);
395 markdown.push('\n');
396 }
397 AgentMessageContent::Thinking { text, .. } => {
398 markdown.push_str("<think>");
399 markdown.push_str(text);
400 markdown.push_str("</think>\n");
401 }
402 AgentMessageContent::RedactedThinking(_) => {
403 markdown.push_str("<redacted_thinking />\n")
404 }
405 AgentMessageContent::ToolUse(tool_use) => {
406 markdown.push_str(&format!(
407 "**Tool Use**: {} (ID: {})\n",
408 tool_use.name, tool_use.id
409 ));
410 markdown.push_str(&format!(
411 "{}\n",
412 MarkdownCodeBlock {
413 tag: "json",
414 text: &format!("{:#}", tool_use.input)
415 }
416 ));
417 }
418 }
419 }
420
421 for tool_result in self.tool_results.values() {
422 markdown.push_str(&format!(
423 "**Tool Result**: {} (ID: {})\n\n",
424 tool_result.tool_name, tool_result.tool_use_id
425 ));
426 if tool_result.is_error {
427 markdown.push_str("**ERROR:**\n");
428 }
429
430 match &tool_result.content {
431 LanguageModelToolResultContent::Text(text) => {
432 writeln!(markdown, "{text}\n").ok();
433 }
434 LanguageModelToolResultContent::Image(_) => {
435 writeln!(markdown, "<image />\n").ok();
436 }
437 }
438
439 if let Some(output) = tool_result.output.as_ref() {
440 writeln!(
441 markdown,
442 "**Debug Output**:\n\n```json\n{}\n```\n",
443 serde_json::to_string_pretty(output).unwrap()
444 )
445 .unwrap();
446 }
447 }
448
449 markdown
450 }
451
452 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
453 let mut assistant_message = LanguageModelRequestMessage {
454 role: Role::Assistant,
455 content: Vec::with_capacity(self.content.len()),
456 cache: false,
457 reasoning_details: self.reasoning_details.clone(),
458 };
459 for chunk in &self.content {
460 match chunk {
461 AgentMessageContent::Text(text) => {
462 assistant_message
463 .content
464 .push(language_model::MessageContent::Text(text.clone()));
465 }
466 AgentMessageContent::Thinking { text, signature } => {
467 assistant_message
468 .content
469 .push(language_model::MessageContent::Thinking {
470 text: text.clone(),
471 signature: signature.clone(),
472 });
473 }
474 AgentMessageContent::RedactedThinking(value) => {
475 assistant_message.content.push(
476 language_model::MessageContent::RedactedThinking(value.clone()),
477 );
478 }
479 AgentMessageContent::ToolUse(tool_use) => {
480 if self.tool_results.contains_key(&tool_use.id) {
481 assistant_message
482 .content
483 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
484 }
485 }
486 };
487 }
488
489 let mut user_message = LanguageModelRequestMessage {
490 role: Role::User,
491 content: Vec::new(),
492 cache: false,
493 reasoning_details: None,
494 };
495
496 for tool_result in self.tool_results.values() {
497 let mut tool_result = tool_result.clone();
498 // Surprisingly, the API fails if we return an empty string here.
499 // It thinks we are sending a tool use without a tool result.
500 if tool_result.content.is_empty() {
501 tool_result.content = "<Tool returned an empty string>".into();
502 }
503 user_message
504 .content
505 .push(language_model::MessageContent::ToolResult(tool_result));
506 }
507
508 let mut messages = Vec::new();
509 if !assistant_message.content.is_empty() {
510 messages.push(assistant_message);
511 }
512 if !user_message.content.is_empty() {
513 messages.push(user_message);
514 }
515 messages
516 }
517}
518
519#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
520pub struct AgentMessage {
521 pub content: Vec<AgentMessageContent>,
522 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
523 pub reasoning_details: Option<serde_json::Value>,
524}
525
526#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
527pub enum AgentMessageContent {
528 Text(String),
529 Thinking {
530 text: String,
531 signature: Option<String>,
532 },
533 RedactedThinking(String),
534 ToolUse(LanguageModelToolUse),
535}
536
537pub trait TerminalHandle {
538 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
539 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
540 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
541 fn kill(&self, cx: &AsyncApp) -> Result<()>;
542 fn was_stopped_by_user(&self, cx: &AsyncApp) -> Result<bool>;
543}
544
545pub trait ThreadEnvironment {
546 fn create_terminal(
547 &self,
548 command: String,
549 cwd: Option<PathBuf>,
550 output_byte_limit: Option<u64>,
551 cx: &mut AsyncApp,
552 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
553}
554
555#[derive(Debug)]
556pub enum ThreadEvent {
557 UserMessage(UserMessage),
558 AgentText(String),
559 AgentThinking(String),
560 ToolCall(acp::ToolCall),
561 ToolCallUpdate(acp_thread::ToolCallUpdate),
562 ToolCallAuthorization(ToolCallAuthorization),
563 Retry(acp_thread::RetryStatus),
564 Stop(acp::StopReason),
565}
566
567#[derive(Debug)]
568pub struct NewTerminal {
569 pub command: String,
570 pub output_byte_limit: Option<u64>,
571 pub cwd: Option<PathBuf>,
572 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
573}
574
575#[derive(Debug)]
576pub struct ToolCallAuthorization {
577 pub tool_call: acp::ToolCallUpdate,
578 pub options: Vec<acp::PermissionOption>,
579 pub response: oneshot::Sender<acp::PermissionOptionId>,
580}
581
582#[derive(Debug, thiserror::Error)]
583enum CompletionError {
584 #[error("max tokens")]
585 MaxTokens,
586 #[error("refusal")]
587 Refusal,
588 #[error(transparent)]
589 Other(#[from] anyhow::Error),
590}
591
592pub struct Thread {
593 id: acp::SessionId,
594 prompt_id: PromptId,
595 updated_at: DateTime<Utc>,
596 title: Option<SharedString>,
597 pending_title_generation: Option<Task<()>>,
598 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
599 summary: Option<SharedString>,
600 messages: Vec<Message>,
601 user_store: Entity<UserStore>,
602 completion_mode: CompletionMode,
603 /// Holds the task that handles agent interaction until the end of the turn.
604 /// Survives across multiple requests as the model performs tool calls and
605 /// we run tools, report their results.
606 running_turn: Option<RunningTurn>,
607 pending_message: Option<AgentMessage>,
608 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
609 tool_use_limit_reached: bool,
610 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
611 #[allow(unused)]
612 cumulative_token_usage: TokenUsage,
613 #[allow(unused)]
614 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
615 context_server_registry: Entity<ContextServerRegistry>,
616 profile_id: AgentProfileId,
617 project_context: Entity<ProjectContext>,
618 templates: Arc<Templates>,
619 model: Option<Arc<dyn LanguageModel>>,
620 summarization_model: Option<Arc<dyn LanguageModel>>,
621 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
622 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
623 pub(crate) project: Entity<Project>,
624 pub(crate) action_log: Entity<ActionLog>,
625 /// Tracks the last time files were read by the agent, to detect external modifications
626 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
627 /// True if this thread was imported from a shared thread and can be synced.
628 imported: bool,
629}
630
631impl Thread {
632 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
633 let image = model.map_or(true, |model| model.supports_images());
634 acp::PromptCapabilities::new()
635 .image(image)
636 .embedded_context(true)
637 }
638
639 pub fn new(
640 project: Entity<Project>,
641 project_context: Entity<ProjectContext>,
642 context_server_registry: Entity<ContextServerRegistry>,
643 templates: Arc<Templates>,
644 model: Option<Arc<dyn LanguageModel>>,
645 cx: &mut Context<Self>,
646 ) -> Self {
647 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
648 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
649 let (prompt_capabilities_tx, prompt_capabilities_rx) =
650 watch::channel(Self::prompt_capabilities(model.as_deref()));
651 Self {
652 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
653 prompt_id: PromptId::new(),
654 updated_at: Utc::now(),
655 title: None,
656 pending_title_generation: None,
657 pending_summary_generation: None,
658 summary: None,
659 messages: Vec::new(),
660 user_store: project.read(cx).user_store(),
661 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
662 running_turn: None,
663 pending_message: None,
664 tools: BTreeMap::default(),
665 tool_use_limit_reached: false,
666 request_token_usage: HashMap::default(),
667 cumulative_token_usage: TokenUsage::default(),
668 initial_project_snapshot: {
669 let project_snapshot = Self::project_snapshot(project.clone(), cx);
670 cx.foreground_executor()
671 .spawn(async move { Some(project_snapshot.await) })
672 .shared()
673 },
674 context_server_registry,
675 profile_id,
676 project_context,
677 templates,
678 model,
679 summarization_model: None,
680 prompt_capabilities_tx,
681 prompt_capabilities_rx,
682 project,
683 action_log,
684 file_read_times: HashMap::default(),
685 imported: false,
686 }
687 }
688
689 pub fn id(&self) -> &acp::SessionId {
690 &self.id
691 }
692
693 /// Returns true if this thread was imported from a shared thread.
694 pub fn is_imported(&self) -> bool {
695 self.imported
696 }
697
698 pub fn replay(
699 &mut self,
700 cx: &mut Context<Self>,
701 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
702 let (tx, rx) = mpsc::unbounded();
703 let stream = ThreadEventStream(tx);
704 for message in &self.messages {
705 match message {
706 Message::User(user_message) => stream.send_user_message(user_message),
707 Message::Agent(assistant_message) => {
708 for content in &assistant_message.content {
709 match content {
710 AgentMessageContent::Text(text) => stream.send_text(text),
711 AgentMessageContent::Thinking { text, .. } => {
712 stream.send_thinking(text)
713 }
714 AgentMessageContent::RedactedThinking(_) => {}
715 AgentMessageContent::ToolUse(tool_use) => {
716 self.replay_tool_call(
717 tool_use,
718 assistant_message.tool_results.get(&tool_use.id),
719 &stream,
720 cx,
721 );
722 }
723 }
724 }
725 }
726 Message::Resume => {}
727 }
728 }
729 rx
730 }
731
732 fn replay_tool_call(
733 &self,
734 tool_use: &LanguageModelToolUse,
735 tool_result: Option<&LanguageModelToolResult>,
736 stream: &ThreadEventStream,
737 cx: &mut Context<Self>,
738 ) {
739 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
740 self.context_server_registry
741 .read(cx)
742 .servers()
743 .find_map(|(_, tools)| {
744 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
745 Some(tool.clone())
746 } else {
747 None
748 }
749 })
750 });
751
752 let Some(tool) = tool else {
753 stream
754 .0
755 .unbounded_send(Ok(ThreadEvent::ToolCall(
756 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
757 .status(acp::ToolCallStatus::Failed)
758 .raw_input(tool_use.input.clone()),
759 )))
760 .ok();
761 return;
762 };
763
764 let title = tool.initial_title(tool_use.input.clone(), cx);
765 let kind = tool.kind();
766 stream.send_tool_call(
767 &tool_use.id,
768 &tool_use.name,
769 title,
770 kind,
771 tool_use.input.clone(),
772 );
773
774 let output = tool_result
775 .as_ref()
776 .and_then(|result| result.output.clone());
777 if let Some(output) = output.clone() {
778 // For replay, we use a dummy cancellation receiver since the tool already completed
779 let (_cancellation_tx, cancellation_rx) = watch::channel(false);
780 let tool_event_stream = ToolCallEventStream::new(
781 tool_use.id.clone(),
782 stream.clone(),
783 Some(self.project.read(cx).fs().clone()),
784 cancellation_rx,
785 );
786 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
787 .log_err();
788 }
789
790 stream.update_tool_call_fields(
791 &tool_use.id,
792 acp::ToolCallUpdateFields::new()
793 .status(
794 tool_result
795 .as_ref()
796 .map_or(acp::ToolCallStatus::Failed, |result| {
797 if result.is_error {
798 acp::ToolCallStatus::Failed
799 } else {
800 acp::ToolCallStatus::Completed
801 }
802 }),
803 )
804 .raw_output(output),
805 );
806 }
807
808 pub fn from_db(
809 id: acp::SessionId,
810 db_thread: DbThread,
811 project: Entity<Project>,
812 project_context: Entity<ProjectContext>,
813 context_server_registry: Entity<ContextServerRegistry>,
814 templates: Arc<Templates>,
815 cx: &mut Context<Self>,
816 ) -> Self {
817 let profile_id = db_thread
818 .profile
819 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
820
821 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
822 db_thread
823 .model
824 .and_then(|model| {
825 let model = SelectedModel {
826 provider: model.provider.clone().into(),
827 model: model.model.into(),
828 };
829 registry.select_model(&model, cx)
830 })
831 .or_else(|| registry.default_model())
832 .map(|model| model.model)
833 });
834
835 if model.is_none() {
836 model = Self::resolve_profile_model(&profile_id, cx);
837 }
838 if model.is_none() {
839 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
840 registry.default_model().map(|model| model.model)
841 });
842 }
843
844 let (prompt_capabilities_tx, prompt_capabilities_rx) =
845 watch::channel(Self::prompt_capabilities(model.as_deref()));
846
847 let action_log = cx.new(|_| ActionLog::new(project.clone()));
848
849 Self {
850 id,
851 prompt_id: PromptId::new(),
852 title: if db_thread.title.is_empty() {
853 None
854 } else {
855 Some(db_thread.title.clone())
856 },
857 pending_title_generation: None,
858 pending_summary_generation: None,
859 summary: db_thread.detailed_summary,
860 messages: db_thread.messages,
861 user_store: project.read(cx).user_store(),
862 completion_mode: db_thread.completion_mode.unwrap_or_default(),
863 running_turn: None,
864 pending_message: None,
865 tools: BTreeMap::default(),
866 tool_use_limit_reached: false,
867 request_token_usage: db_thread.request_token_usage.clone(),
868 cumulative_token_usage: db_thread.cumulative_token_usage,
869 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
870 context_server_registry,
871 profile_id,
872 project_context,
873 templates,
874 model,
875 summarization_model: None,
876 project,
877 action_log,
878 updated_at: db_thread.updated_at,
879 prompt_capabilities_tx,
880 prompt_capabilities_rx,
881 file_read_times: HashMap::default(),
882 imported: db_thread.imported,
883 }
884 }
885
886 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
887 let initial_project_snapshot = self.initial_project_snapshot.clone();
888 let mut thread = DbThread {
889 title: self.title(),
890 messages: self.messages.clone(),
891 updated_at: self.updated_at,
892 detailed_summary: self.summary.clone(),
893 initial_project_snapshot: None,
894 cumulative_token_usage: self.cumulative_token_usage,
895 request_token_usage: self.request_token_usage.clone(),
896 model: self.model.as_ref().map(|model| DbLanguageModel {
897 provider: model.provider_id().to_string(),
898 model: model.name().0.to_string(),
899 }),
900 completion_mode: Some(self.completion_mode),
901 profile: Some(self.profile_id.clone()),
902 imported: self.imported,
903 };
904
905 cx.background_spawn(async move {
906 let initial_project_snapshot = initial_project_snapshot.await;
907 thread.initial_project_snapshot = initial_project_snapshot;
908 thread
909 })
910 }
911
912 /// Create a snapshot of the current project state including git information and unsaved buffers.
913 fn project_snapshot(
914 project: Entity<Project>,
915 cx: &mut Context<Self>,
916 ) -> Task<Arc<ProjectSnapshot>> {
917 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
918 cx.spawn(async move |_, _| {
919 let snapshot = task.await;
920
921 Arc::new(ProjectSnapshot {
922 worktree_snapshots: snapshot.worktree_snapshots,
923 timestamp: Utc::now(),
924 })
925 })
926 }
927
928 pub fn project_context(&self) -> &Entity<ProjectContext> {
929 &self.project_context
930 }
931
932 pub fn project(&self) -> &Entity<Project> {
933 &self.project
934 }
935
936 pub fn action_log(&self) -> &Entity<ActionLog> {
937 &self.action_log
938 }
939
940 pub fn is_empty(&self) -> bool {
941 self.messages.is_empty() && self.title.is_none()
942 }
943
944 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
945 self.model.as_ref()
946 }
947
948 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
949 let old_usage = self.latest_token_usage();
950 self.model = Some(model);
951 let new_caps = Self::prompt_capabilities(self.model.as_deref());
952 let new_usage = self.latest_token_usage();
953 if old_usage != new_usage {
954 cx.emit(TokenUsageUpdated(new_usage));
955 }
956 self.prompt_capabilities_tx.send(new_caps).log_err();
957 cx.notify()
958 }
959
960 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
961 self.summarization_model.as_ref()
962 }
963
964 pub fn set_summarization_model(
965 &mut self,
966 model: Option<Arc<dyn LanguageModel>>,
967 cx: &mut Context<Self>,
968 ) {
969 self.summarization_model = model;
970 cx.notify()
971 }
972
973 pub fn completion_mode(&self) -> CompletionMode {
974 self.completion_mode
975 }
976
977 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
978 let old_usage = self.latest_token_usage();
979 self.completion_mode = mode;
980 let new_usage = self.latest_token_usage();
981 if old_usage != new_usage {
982 cx.emit(TokenUsageUpdated(new_usage));
983 }
984 cx.notify()
985 }
986
987 #[cfg(any(test, feature = "test-support"))]
988 pub fn last_message(&self) -> Option<Message> {
989 if let Some(message) = self.pending_message.clone() {
990 Some(Message::Agent(message))
991 } else {
992 self.messages.last().cloned()
993 }
994 }
995
996 pub fn add_default_tools(
997 &mut self,
998 environment: Rc<dyn ThreadEnvironment>,
999 cx: &mut Context<Self>,
1000 ) {
1001 let language_registry = self.project.read(cx).languages().clone();
1002 self.add_tool(CopyPathTool::new(self.project.clone()));
1003 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
1004 self.add_tool(DeletePathTool::new(
1005 self.project.clone(),
1006 self.action_log.clone(),
1007 ));
1008 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1009 self.add_tool(EditFileTool::new(
1010 self.project.clone(),
1011 cx.weak_entity(),
1012 language_registry,
1013 Templates::new(),
1014 ));
1015 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1016 self.add_tool(FindPathTool::new(self.project.clone()));
1017 self.add_tool(GrepTool::new(self.project.clone()));
1018 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1019 self.add_tool(MovePathTool::new(self.project.clone()));
1020 self.add_tool(NowTool);
1021 self.add_tool(OpenTool::new(self.project.clone()));
1022 self.add_tool(ReadFileTool::new(
1023 cx.weak_entity(),
1024 self.project.clone(),
1025 self.action_log.clone(),
1026 ));
1027 self.add_tool(SaveFileTool::new(self.project.clone()));
1028 self.add_tool(RestoreFileFromDiskTool::new(self.project.clone()));
1029 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1030 self.add_tool(ThinkingTool);
1031 self.add_tool(WebSearchTool);
1032
1033 if cx.has_flag::<SubagentsFeatureFlag>() {
1034 self.add_tool(SubagentTool::new());
1035 }
1036 }
1037
1038 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1039 self.tools.insert(T::name().into(), tool.erase());
1040 }
1041
1042 pub fn remove_tool(&mut self, name: &str) -> bool {
1043 self.tools.remove(name).is_some()
1044 }
1045
1046 pub fn profile(&self) -> &AgentProfileId {
1047 &self.profile_id
1048 }
1049
1050 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1051 if self.profile_id == profile_id {
1052 return;
1053 }
1054
1055 self.profile_id = profile_id;
1056
1057 // Swap to the profile's preferred model when available.
1058 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1059 self.set_model(model, cx);
1060 }
1061 }
1062
1063 pub fn cancel(&mut self, cx: &mut Context<Self>) -> Task<()> {
1064 let Some(running_turn) = self.running_turn.take() else {
1065 self.flush_pending_message(cx);
1066 return Task::ready(());
1067 };
1068
1069 let turn_task = running_turn.cancel();
1070
1071 cx.spawn(async move |this, cx| {
1072 turn_task.await;
1073 this.update(cx, |this, cx| {
1074 this.flush_pending_message(cx);
1075 })
1076 .ok();
1077 })
1078 }
1079
1080 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1081 let Some(last_user_message) = self.last_user_message() else {
1082 return;
1083 };
1084
1085 self.request_token_usage
1086 .insert(last_user_message.id.clone(), update);
1087 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1088 cx.notify();
1089 }
1090
1091 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1092 self.cancel(cx).detach();
1093 // Clear pending message since cancel will try to flush it asynchronously,
1094 // and we don't want that content to be added after we truncate
1095 self.pending_message.take();
1096 let Some(position) = self.messages.iter().position(
1097 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1098 ) else {
1099 return Err(anyhow!("Message not found"));
1100 };
1101
1102 for message in self.messages.drain(position..) {
1103 match message {
1104 Message::User(message) => {
1105 self.request_token_usage.remove(&message.id);
1106 }
1107 Message::Agent(_) | Message::Resume => {}
1108 }
1109 }
1110 self.clear_summary();
1111 cx.notify();
1112 Ok(())
1113 }
1114
1115 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1116 let last_user_message = self.last_user_message()?;
1117 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1118 Some(*tokens)
1119 }
1120
1121 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1122 let usage = self.latest_request_token_usage()?;
1123 let model = self.model.clone()?;
1124 Some(acp_thread::TokenUsage {
1125 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1126 used_tokens: usage.total_tokens(),
1127 output_tokens: usage.output_tokens,
1128 })
1129 }
1130
1131 /// Get the total input token count as of the message before the given message.
1132 ///
1133 /// Returns `None` if:
1134 /// - `target_id` is the first message (no previous message)
1135 /// - The previous message hasn't received a response yet (no usage data)
1136 /// - `target_id` is not found in the messages
1137 pub fn tokens_before_message(&self, target_id: &UserMessageId) -> Option<u64> {
1138 let mut previous_user_message_id: Option<&UserMessageId> = None;
1139
1140 for message in &self.messages {
1141 if let Message::User(user_msg) = message {
1142 if &user_msg.id == target_id {
1143 let prev_id = previous_user_message_id?;
1144 let usage = self.request_token_usage.get(prev_id)?;
1145 return Some(usage.input_tokens);
1146 }
1147 previous_user_message_id = Some(&user_msg.id);
1148 }
1149 }
1150 None
1151 }
1152
1153 /// Look up the active profile and resolve its preferred model if one is configured.
1154 fn resolve_profile_model(
1155 profile_id: &AgentProfileId,
1156 cx: &mut Context<Self>,
1157 ) -> Option<Arc<dyn LanguageModel>> {
1158 let selection = AgentSettings::get_global(cx)
1159 .profiles
1160 .get(profile_id)?
1161 .default_model
1162 .clone()?;
1163 Self::resolve_model_from_selection(&selection, cx)
1164 }
1165
1166 /// Translate a stored model selection into the configured model from the registry.
1167 fn resolve_model_from_selection(
1168 selection: &LanguageModelSelection,
1169 cx: &mut Context<Self>,
1170 ) -> Option<Arc<dyn LanguageModel>> {
1171 let selected = SelectedModel {
1172 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1173 model: LanguageModelId::from(selection.model.clone()),
1174 };
1175 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1176 registry
1177 .select_model(&selected, cx)
1178 .map(|configured| configured.model)
1179 })
1180 }
1181
1182 pub fn resume(
1183 &mut self,
1184 cx: &mut Context<Self>,
1185 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1186 self.messages.push(Message::Resume);
1187 cx.notify();
1188
1189 log::debug!("Total messages in thread: {}", self.messages.len());
1190 self.run_turn(cx)
1191 }
1192
1193 /// Sending a message results in the model streaming a response, which could include tool calls.
1194 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1195 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1196 pub fn send<T>(
1197 &mut self,
1198 id: UserMessageId,
1199 content: impl IntoIterator<Item = T>,
1200 cx: &mut Context<Self>,
1201 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1202 where
1203 T: Into<UserMessageContent>,
1204 {
1205 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1206 log::debug!("Thread::send content: {:?}", content);
1207
1208 self.messages
1209 .push(Message::User(UserMessage { id, content }));
1210 cx.notify();
1211
1212 self.send_existing(cx)
1213 }
1214
1215 pub fn send_existing(
1216 &mut self,
1217 cx: &mut Context<Self>,
1218 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1219 let model = self.model().context("No language model configured")?;
1220
1221 log::info!("Thread::send called with model: {}", model.name().0);
1222 self.advance_prompt_id();
1223
1224 log::debug!("Total messages in thread: {}", self.messages.len());
1225 self.run_turn(cx)
1226 }
1227
1228 pub fn push_acp_user_block(
1229 &mut self,
1230 id: UserMessageId,
1231 blocks: impl IntoIterator<Item = acp::ContentBlock>,
1232 path_style: PathStyle,
1233 cx: &mut Context<Self>,
1234 ) {
1235 let content = blocks
1236 .into_iter()
1237 .map(|block| UserMessageContent::from_content_block(block, path_style))
1238 .collect::<Vec<_>>();
1239 self.messages
1240 .push(Message::User(UserMessage { id, content }));
1241 cx.notify();
1242 }
1243
1244 pub fn push_acp_agent_block(&mut self, block: acp::ContentBlock, cx: &mut Context<Self>) {
1245 let text = match block {
1246 acp::ContentBlock::Text(text_content) => text_content.text,
1247 acp::ContentBlock::Image(_) => "[image]".to_string(),
1248 acp::ContentBlock::Audio(_) => "[audio]".to_string(),
1249 acp::ContentBlock::ResourceLink(resource_link) => resource_link.uri,
1250 acp::ContentBlock::Resource(resource) => match resource.resource {
1251 acp::EmbeddedResourceResource::TextResourceContents(resource) => resource.uri,
1252 acp::EmbeddedResourceResource::BlobResourceContents(resource) => resource.uri,
1253 _ => "[resource]".to_string(),
1254 },
1255 _ => "[unknown]".to_string(),
1256 };
1257
1258 self.messages.push(Message::Agent(AgentMessage {
1259 content: vec![AgentMessageContent::Text(text)],
1260 ..Default::default()
1261 }));
1262 cx.notify();
1263 }
1264
1265 #[cfg(feature = "eval")]
1266 pub fn proceed(
1267 &mut self,
1268 cx: &mut Context<Self>,
1269 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1270 self.run_turn(cx)
1271 }
1272
1273 fn run_turn(
1274 &mut self,
1275 cx: &mut Context<Self>,
1276 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1277 // Flush the old pending message synchronously before cancelling,
1278 // to avoid a race where the detached cancel task might flush the NEW
1279 // turn's pending message instead of the old one.
1280 self.flush_pending_message(cx);
1281 self.cancel(cx).detach();
1282
1283 let model = self.model.clone().context("No language model configured")?;
1284 let profile = AgentSettings::get_global(cx)
1285 .profiles
1286 .get(&self.profile_id)
1287 .context("Profile not found")?;
1288 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1289 let event_stream = ThreadEventStream(events_tx);
1290 let message_ix = self.messages.len().saturating_sub(1);
1291 self.tool_use_limit_reached = false;
1292 self.clear_summary();
1293 let (cancellation_tx, mut cancellation_rx) = watch::channel(false);
1294 self.running_turn = Some(RunningTurn {
1295 event_stream: event_stream.clone(),
1296 tools: self.enabled_tools(profile, &model, cx),
1297 cancellation_tx,
1298 _task: cx.spawn(async move |this, cx| {
1299 log::debug!("Starting agent turn execution");
1300
1301 let turn_result = Self::run_turn_internal(
1302 &this,
1303 model,
1304 &event_stream,
1305 cancellation_rx.clone(),
1306 cx,
1307 )
1308 .await;
1309
1310 // Check if we were cancelled - if so, cancel() already took running_turn
1311 // and we shouldn't touch it (it might be a NEW turn now)
1312 let was_cancelled = *cancellation_rx.borrow();
1313 if was_cancelled {
1314 log::debug!("Turn was cancelled, skipping cleanup");
1315 return;
1316 }
1317
1318 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1319
1320 match turn_result {
1321 Ok(()) => {
1322 log::debug!("Turn execution completed");
1323 event_stream.send_stop(acp::StopReason::EndTurn);
1324 }
1325 Err(error) => {
1326 log::error!("Turn execution failed: {:?}", error);
1327 match error.downcast::<CompletionError>() {
1328 Ok(CompletionError::Refusal) => {
1329 event_stream.send_stop(acp::StopReason::Refusal);
1330 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1331 }
1332 Ok(CompletionError::MaxTokens) => {
1333 event_stream.send_stop(acp::StopReason::MaxTokens);
1334 }
1335 Ok(CompletionError::Other(error)) | Err(error) => {
1336 event_stream.send_error(error);
1337 }
1338 }
1339 }
1340 }
1341
1342 _ = this.update(cx, |this, _| this.running_turn.take());
1343 }),
1344 });
1345 Ok(events_rx)
1346 }
1347
1348 async fn run_turn_internal(
1349 this: &WeakEntity<Self>,
1350 model: Arc<dyn LanguageModel>,
1351 event_stream: &ThreadEventStream,
1352 mut cancellation_rx: watch::Receiver<bool>,
1353 cx: &mut AsyncApp,
1354 ) -> Result<()> {
1355 let mut attempt = 0;
1356 let mut intent = CompletionIntent::UserPrompt;
1357 loop {
1358 let request =
1359 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1360
1361 telemetry::event!(
1362 "Agent Thread Completion",
1363 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1364 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1365 model = model.telemetry_id(),
1366 model_provider = model.provider_id().to_string(),
1367 attempt
1368 );
1369
1370 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1371
1372 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1373 Ok(events) => (events, None),
1374 Err(err) => (stream::empty().boxed(), Some(err)),
1375 };
1376 let mut tool_results = FuturesUnordered::new();
1377 let mut cancelled = false;
1378 loop {
1379 // Race between getting the next event and cancellation
1380 let event = futures::select! {
1381 event = events.next().fuse() => event,
1382 _ = cancellation_rx.changed().fuse() => {
1383 if *cancellation_rx.borrow() {
1384 cancelled = true;
1385 break;
1386 }
1387 continue;
1388 }
1389 };
1390 let Some(event) = event else {
1391 break;
1392 };
1393 log::trace!("Received completion event: {:?}", event);
1394 match event {
1395 Ok(event) => {
1396 tool_results.extend(this.update(cx, |this, cx| {
1397 this.handle_completion_event(
1398 event,
1399 event_stream,
1400 cancellation_rx.clone(),
1401 cx,
1402 )
1403 })??);
1404 }
1405 Err(err) => {
1406 error = Some(err);
1407 break;
1408 }
1409 }
1410 }
1411
1412 let end_turn = tool_results.is_empty();
1413 while let Some(tool_result) = tool_results.next().await {
1414 log::debug!("Tool finished {:?}", tool_result);
1415
1416 event_stream.update_tool_call_fields(
1417 &tool_result.tool_use_id,
1418 acp::ToolCallUpdateFields::new()
1419 .status(if tool_result.is_error {
1420 acp::ToolCallStatus::Failed
1421 } else {
1422 acp::ToolCallStatus::Completed
1423 })
1424 .raw_output(tool_result.output.clone()),
1425 );
1426 this.update(cx, |this, _cx| {
1427 this.pending_message()
1428 .tool_results
1429 .insert(tool_result.tool_use_id.clone(), tool_result);
1430 })?;
1431 }
1432
1433 this.update(cx, |this, cx| {
1434 this.flush_pending_message(cx);
1435 if this.title.is_none() && this.pending_title_generation.is_none() {
1436 this.generate_title(cx);
1437 }
1438 })?;
1439
1440 if cancelled {
1441 log::debug!("Turn cancelled by user, exiting");
1442 return Ok(());
1443 }
1444
1445 if let Some(error) = error {
1446 attempt += 1;
1447 let retry = this.update(cx, |this, cx| {
1448 let user_store = this.user_store.read(cx);
1449 this.handle_completion_error(error, attempt, user_store.plan())
1450 })??;
1451 let timer = cx.background_executor().timer(retry.duration);
1452 event_stream.send_retry(retry);
1453 timer.await;
1454 this.update(cx, |this, _cx| {
1455 if let Some(Message::Agent(message)) = this.messages.last() {
1456 if message.tool_results.is_empty() {
1457 intent = CompletionIntent::UserPrompt;
1458 this.messages.push(Message::Resume);
1459 }
1460 }
1461 })?;
1462 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1463 return Err(language_model::ToolUseLimitReachedError.into());
1464 } else if end_turn {
1465 return Ok(());
1466 } else {
1467 intent = CompletionIntent::ToolResults;
1468 attempt = 0;
1469 }
1470 }
1471 }
1472
1473 fn handle_completion_error(
1474 &mut self,
1475 error: LanguageModelCompletionError,
1476 attempt: u8,
1477 plan: Option<Plan>,
1478 ) -> Result<acp_thread::RetryStatus> {
1479 let Some(model) = self.model.as_ref() else {
1480 return Err(anyhow!(error));
1481 };
1482
1483 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1484 match plan {
1485 Some(Plan::V2(_)) => true,
1486 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1487 None => false,
1488 }
1489 } else {
1490 true
1491 };
1492
1493 if !auto_retry {
1494 return Err(anyhow!(error));
1495 }
1496
1497 let Some(strategy) = Self::retry_strategy_for(&error) else {
1498 return Err(anyhow!(error));
1499 };
1500
1501 let max_attempts = match &strategy {
1502 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1503 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1504 };
1505
1506 if attempt > max_attempts {
1507 return Err(anyhow!(error));
1508 }
1509
1510 let delay = match &strategy {
1511 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1512 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1513 Duration::from_secs(delay_secs)
1514 }
1515 RetryStrategy::Fixed { delay, .. } => *delay,
1516 };
1517 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1518
1519 Ok(acp_thread::RetryStatus {
1520 last_error: error.to_string().into(),
1521 attempt: attempt as usize,
1522 max_attempts: max_attempts as usize,
1523 started_at: Instant::now(),
1524 duration: delay,
1525 })
1526 }
1527
1528 /// A helper method that's called on every streamed completion event.
1529 /// Returns an optional tool result task, which the main agentic loop will
1530 /// send back to the model when it resolves.
1531 fn handle_completion_event(
1532 &mut self,
1533 event: LanguageModelCompletionEvent,
1534 event_stream: &ThreadEventStream,
1535 cancellation_rx: watch::Receiver<bool>,
1536 cx: &mut Context<Self>,
1537 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1538 log::trace!("Handling streamed completion event: {:?}", event);
1539 use LanguageModelCompletionEvent::*;
1540
1541 match event {
1542 StartMessage { .. } => {
1543 self.flush_pending_message(cx);
1544 self.pending_message = Some(AgentMessage::default());
1545 }
1546 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1547 Thinking { text, signature } => {
1548 self.handle_thinking_event(text, signature, event_stream, cx)
1549 }
1550 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1551 ReasoningDetails(details) => {
1552 let last_message = self.pending_message();
1553 // Store the last non-empty reasoning_details (overwrites earlier ones)
1554 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1555 if let serde_json::Value::Array(ref arr) = details {
1556 if !arr.is_empty() {
1557 last_message.reasoning_details = Some(details);
1558 }
1559 } else {
1560 last_message.reasoning_details = Some(details);
1561 }
1562 }
1563 ToolUse(tool_use) => {
1564 return Ok(self.handle_tool_use_event(tool_use, event_stream, cancellation_rx, cx));
1565 }
1566 ToolUseJsonParseError {
1567 id,
1568 tool_name,
1569 raw_input,
1570 json_parse_error,
1571 } => {
1572 return Ok(Some(Task::ready(
1573 self.handle_tool_use_json_parse_error_event(
1574 id,
1575 tool_name,
1576 raw_input,
1577 json_parse_error,
1578 ),
1579 )));
1580 }
1581 UsageUpdate(usage) => {
1582 telemetry::event!(
1583 "Agent Thread Completion Usage Updated",
1584 thread_id = self.id.to_string(),
1585 prompt_id = self.prompt_id.to_string(),
1586 model = self.model.as_ref().map(|m| m.telemetry_id()),
1587 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1588 input_tokens = usage.input_tokens,
1589 output_tokens = usage.output_tokens,
1590 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1591 cache_read_input_tokens = usage.cache_read_input_tokens,
1592 );
1593 self.update_token_usage(usage, cx);
1594 }
1595 UsageUpdated { amount, limit } => {
1596 self.update_model_request_usage(amount, limit, cx);
1597 }
1598 ToolUseLimitReached => {
1599 self.tool_use_limit_reached = true;
1600 }
1601 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1602 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1603 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1604 Started | Queued { .. } => {}
1605 }
1606
1607 Ok(None)
1608 }
1609
1610 fn handle_text_event(
1611 &mut self,
1612 new_text: String,
1613 event_stream: &ThreadEventStream,
1614 cx: &mut Context<Self>,
1615 ) {
1616 event_stream.send_text(&new_text);
1617
1618 let last_message = self.pending_message();
1619 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1620 text.push_str(&new_text);
1621 } else {
1622 last_message
1623 .content
1624 .push(AgentMessageContent::Text(new_text));
1625 }
1626
1627 cx.notify();
1628 }
1629
1630 fn handle_thinking_event(
1631 &mut self,
1632 new_text: String,
1633 new_signature: Option<String>,
1634 event_stream: &ThreadEventStream,
1635 cx: &mut Context<Self>,
1636 ) {
1637 event_stream.send_thinking(&new_text);
1638
1639 let last_message = self.pending_message();
1640 if let Some(AgentMessageContent::Thinking { text, signature }) =
1641 last_message.content.last_mut()
1642 {
1643 text.push_str(&new_text);
1644 *signature = new_signature.or(signature.take());
1645 } else {
1646 last_message.content.push(AgentMessageContent::Thinking {
1647 text: new_text,
1648 signature: new_signature,
1649 });
1650 }
1651
1652 cx.notify();
1653 }
1654
1655 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1656 let last_message = self.pending_message();
1657 last_message
1658 .content
1659 .push(AgentMessageContent::RedactedThinking(data));
1660 cx.notify();
1661 }
1662
1663 fn handle_tool_use_event(
1664 &mut self,
1665 tool_use: LanguageModelToolUse,
1666 event_stream: &ThreadEventStream,
1667 cancellation_rx: watch::Receiver<bool>,
1668 cx: &mut Context<Self>,
1669 ) -> Option<Task<LanguageModelToolResult>> {
1670 cx.notify();
1671
1672 let tool = self.tool(tool_use.name.as_ref());
1673 let mut title = SharedString::from(&tool_use.name);
1674 let mut kind = acp::ToolKind::Other;
1675 if let Some(tool) = tool.as_ref() {
1676 title = tool.initial_title(tool_use.input.clone(), cx);
1677 kind = tool.kind();
1678 }
1679
1680 // Ensure the last message ends in the current tool use
1681 let last_message = self.pending_message();
1682 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1683 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1684 if last_tool_use.id == tool_use.id {
1685 *last_tool_use = tool_use.clone();
1686 false
1687 } else {
1688 true
1689 }
1690 } else {
1691 true
1692 }
1693 });
1694
1695 if push_new_tool_use {
1696 event_stream.send_tool_call(
1697 &tool_use.id,
1698 &tool_use.name,
1699 title,
1700 kind,
1701 tool_use.input.clone(),
1702 );
1703 last_message
1704 .content
1705 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1706 } else {
1707 event_stream.update_tool_call_fields(
1708 &tool_use.id,
1709 acp::ToolCallUpdateFields::new()
1710 .title(title.as_str())
1711 .kind(kind)
1712 .raw_input(tool_use.input.clone()),
1713 );
1714 }
1715
1716 if !tool_use.is_input_complete {
1717 return None;
1718 }
1719
1720 let Some(tool) = tool else {
1721 let content = format!("No tool named {} exists", tool_use.name);
1722 return Some(Task::ready(LanguageModelToolResult {
1723 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1724 tool_use_id: tool_use.id,
1725 tool_name: tool_use.name,
1726 is_error: true,
1727 output: None,
1728 }));
1729 };
1730
1731 let fs = self.project.read(cx).fs().clone();
1732 let tool_event_stream = ToolCallEventStream::new(
1733 tool_use.id.clone(),
1734 event_stream.clone(),
1735 Some(fs),
1736 cancellation_rx,
1737 );
1738 tool_event_stream.update_fields(
1739 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1740 );
1741 let supports_images = self.model().is_some_and(|model| model.supports_images());
1742 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1743 log::debug!("Running tool {}", tool_use.name);
1744 Some(cx.foreground_executor().spawn(async move {
1745 let tool_result = tool_result.await.and_then(|output| {
1746 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1747 && !supports_images
1748 {
1749 return Err(anyhow!(
1750 "Attempted to read an image, but this model doesn't support it.",
1751 ));
1752 }
1753 Ok(output)
1754 });
1755
1756 match tool_result {
1757 Ok(output) => LanguageModelToolResult {
1758 tool_use_id: tool_use.id,
1759 tool_name: tool_use.name,
1760 is_error: false,
1761 content: output.llm_output,
1762 output: Some(output.raw_output),
1763 },
1764 Err(error) => LanguageModelToolResult {
1765 tool_use_id: tool_use.id,
1766 tool_name: tool_use.name,
1767 is_error: true,
1768 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1769 output: Some(error.to_string().into()),
1770 },
1771 }
1772 }))
1773 }
1774
1775 fn handle_tool_use_json_parse_error_event(
1776 &mut self,
1777 tool_use_id: LanguageModelToolUseId,
1778 tool_name: Arc<str>,
1779 raw_input: Arc<str>,
1780 json_parse_error: String,
1781 ) -> LanguageModelToolResult {
1782 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1783 LanguageModelToolResult {
1784 tool_use_id,
1785 tool_name,
1786 is_error: true,
1787 content: LanguageModelToolResultContent::Text(tool_output.into()),
1788 output: Some(serde_json::Value::String(raw_input.to_string())),
1789 }
1790 }
1791
1792 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1793 self.project
1794 .read(cx)
1795 .user_store()
1796 .update(cx, |user_store, cx| {
1797 user_store.update_model_request_usage(
1798 ModelRequestUsage(RequestUsage {
1799 amount: amount as i32,
1800 limit,
1801 }),
1802 cx,
1803 )
1804 });
1805 }
1806
1807 pub fn title(&self) -> SharedString {
1808 self.title.clone().unwrap_or("New Thread".into())
1809 }
1810
1811 pub fn is_generating_summary(&self) -> bool {
1812 self.pending_summary_generation.is_some()
1813 }
1814
1815 pub fn is_generating_title(&self) -> bool {
1816 self.pending_title_generation.is_some()
1817 }
1818
1819 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1820 if let Some(summary) = self.summary.as_ref() {
1821 return Task::ready(Some(summary.clone())).shared();
1822 }
1823 if let Some(task) = self.pending_summary_generation.clone() {
1824 return task;
1825 }
1826 let Some(model) = self.summarization_model.clone() else {
1827 log::error!("No summarization model available");
1828 return Task::ready(None).shared();
1829 };
1830 let mut request = LanguageModelRequest {
1831 intent: Some(CompletionIntent::ThreadContextSummarization),
1832 temperature: AgentSettings::temperature_for_model(&model, cx),
1833 ..Default::default()
1834 };
1835
1836 for message in &self.messages {
1837 request.messages.extend(message.to_request());
1838 }
1839
1840 request.messages.push(LanguageModelRequestMessage {
1841 role: Role::User,
1842 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1843 cache: false,
1844 reasoning_details: None,
1845 });
1846
1847 let task = cx
1848 .spawn(async move |this, cx| {
1849 let mut summary = String::new();
1850 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1851 while let Some(event) = messages.next().await {
1852 let event = event.log_err()?;
1853 let text = match event {
1854 LanguageModelCompletionEvent::Text(text) => text,
1855 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1856 this.update(cx, |thread, cx| {
1857 thread.update_model_request_usage(amount, limit, cx);
1858 })
1859 .ok()?;
1860 continue;
1861 }
1862 _ => continue,
1863 };
1864
1865 let mut lines = text.lines();
1866 summary.extend(lines.next());
1867 }
1868
1869 log::debug!("Setting summary: {}", summary);
1870 let summary = SharedString::from(summary);
1871
1872 this.update(cx, |this, cx| {
1873 this.summary = Some(summary.clone());
1874 this.pending_summary_generation = None;
1875 cx.notify()
1876 })
1877 .ok()?;
1878
1879 Some(summary)
1880 })
1881 .shared();
1882 self.pending_summary_generation = Some(task.clone());
1883 task
1884 }
1885
1886 pub fn generate_title(&mut self, cx: &mut Context<Self>) {
1887 let Some(model) = self.summarization_model.clone() else {
1888 return;
1889 };
1890
1891 log::debug!(
1892 "Generating title with model: {:?}",
1893 self.summarization_model.as_ref().map(|model| model.name())
1894 );
1895 let mut request = LanguageModelRequest {
1896 intent: Some(CompletionIntent::ThreadSummarization),
1897 temperature: AgentSettings::temperature_for_model(&model, cx),
1898 ..Default::default()
1899 };
1900
1901 for message in &self.messages {
1902 request.messages.extend(message.to_request());
1903 }
1904
1905 request.messages.push(LanguageModelRequestMessage {
1906 role: Role::User,
1907 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1908 cache: false,
1909 reasoning_details: None,
1910 });
1911 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1912 let mut title = String::new();
1913
1914 let generate = async {
1915 let mut messages = model.stream_completion(request, cx).await?;
1916 while let Some(event) = messages.next().await {
1917 let event = event?;
1918 let text = match event {
1919 LanguageModelCompletionEvent::Text(text) => text,
1920 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1921 this.update(cx, |thread, cx| {
1922 thread.update_model_request_usage(amount, limit, cx);
1923 })?;
1924 continue;
1925 }
1926 _ => continue,
1927 };
1928
1929 let mut lines = text.lines();
1930 title.extend(lines.next());
1931
1932 // Stop if the LLM generated multiple lines.
1933 if lines.next().is_some() {
1934 break;
1935 }
1936 }
1937 anyhow::Ok(())
1938 };
1939
1940 if generate.await.context("failed to generate title").is_ok() {
1941 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1942 }
1943 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1944 }));
1945 }
1946
1947 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1948 self.pending_title_generation = None;
1949 if Some(&title) != self.title.as_ref() {
1950 self.title = Some(title);
1951 cx.emit(TitleUpdated);
1952 cx.notify();
1953 }
1954 }
1955
1956 fn clear_summary(&mut self) {
1957 self.summary = None;
1958 self.pending_summary_generation = None;
1959 }
1960
1961 fn last_user_message(&self) -> Option<&UserMessage> {
1962 self.messages
1963 .iter()
1964 .rev()
1965 .find_map(|message| match message {
1966 Message::User(user_message) => Some(user_message),
1967 Message::Agent(_) => None,
1968 Message::Resume => None,
1969 })
1970 }
1971
1972 fn pending_message(&mut self) -> &mut AgentMessage {
1973 self.pending_message.get_or_insert_default()
1974 }
1975
1976 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1977 let Some(mut message) = self.pending_message.take() else {
1978 return;
1979 };
1980
1981 if message.content.is_empty() {
1982 return;
1983 }
1984
1985 for content in &message.content {
1986 let AgentMessageContent::ToolUse(tool_use) = content else {
1987 continue;
1988 };
1989
1990 if !message.tool_results.contains_key(&tool_use.id) {
1991 message.tool_results.insert(
1992 tool_use.id.clone(),
1993 LanguageModelToolResult {
1994 tool_use_id: tool_use.id.clone(),
1995 tool_name: tool_use.name.clone(),
1996 is_error: true,
1997 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1998 output: None,
1999 },
2000 );
2001 }
2002 }
2003
2004 self.messages.push(Message::Agent(message));
2005 self.updated_at = Utc::now();
2006 self.clear_summary();
2007 cx.notify()
2008 }
2009
2010 pub(crate) fn build_completion_request(
2011 &self,
2012 completion_intent: CompletionIntent,
2013 cx: &App,
2014 ) -> Result<LanguageModelRequest> {
2015 let model = self.model().context("No language model configured")?;
2016 let tools = if let Some(turn) = self.running_turn.as_ref() {
2017 turn.tools
2018 .iter()
2019 .filter_map(|(tool_name, tool)| {
2020 log::trace!("Including tool: {}", tool_name);
2021 Some(LanguageModelRequestTool {
2022 name: tool_name.to_string(),
2023 description: tool.description().to_string(),
2024 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
2025 })
2026 })
2027 .collect::<Vec<_>>()
2028 } else {
2029 Vec::new()
2030 };
2031
2032 log::debug!("Building completion request");
2033 log::debug!("Completion intent: {:?}", completion_intent);
2034 log::debug!("Completion mode: {:?}", self.completion_mode);
2035
2036 let available_tools: Vec<_> = self
2037 .running_turn
2038 .as_ref()
2039 .map(|turn| turn.tools.keys().cloned().collect())
2040 .unwrap_or_default();
2041
2042 log::debug!("Request includes {} tools", available_tools.len());
2043 let messages = self.build_request_messages(available_tools, cx);
2044 log::debug!("Request will include {} messages", messages.len());
2045
2046 let request = LanguageModelRequest {
2047 thread_id: Some(self.id.to_string()),
2048 prompt_id: Some(self.prompt_id.to_string()),
2049 intent: Some(completion_intent),
2050 mode: Some(self.completion_mode.into()),
2051 messages,
2052 tools,
2053 tool_choice: None,
2054 stop: Vec::new(),
2055 temperature: AgentSettings::temperature_for_model(model, cx),
2056 thinking_allowed: true,
2057 };
2058
2059 log::debug!("Completion request built successfully");
2060 Ok(request)
2061 }
2062
2063 fn enabled_tools(
2064 &self,
2065 profile: &AgentProfileSettings,
2066 model: &Arc<dyn LanguageModel>,
2067 cx: &App,
2068 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
2069 fn truncate(tool_name: &SharedString) -> SharedString {
2070 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
2071 let mut truncated = tool_name.to_string();
2072 truncated.truncate(MAX_TOOL_NAME_LENGTH);
2073 truncated.into()
2074 } else {
2075 tool_name.clone()
2076 }
2077 }
2078
2079 let mut tools = self
2080 .tools
2081 .iter()
2082 .filter_map(|(tool_name, tool)| {
2083 if tool.supports_provider(&model.provider_id())
2084 && profile.is_tool_enabled(tool_name)
2085 {
2086 Some((truncate(tool_name), tool.clone()))
2087 } else {
2088 None
2089 }
2090 })
2091 .collect::<BTreeMap<_, _>>();
2092
2093 let mut context_server_tools = Vec::new();
2094 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
2095 let mut duplicate_tool_names = HashSet::default();
2096 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
2097 for (tool_name, tool) in server_tools {
2098 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
2099 let tool_name = truncate(tool_name);
2100 if !seen_tools.insert(tool_name.clone()) {
2101 duplicate_tool_names.insert(tool_name.clone());
2102 }
2103 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
2104 }
2105 }
2106 }
2107
2108 // When there are duplicate tool names, disambiguate by prefixing them
2109 // with the server ID. In the rare case there isn't enough space for the
2110 // disambiguated tool name, keep only the last tool with this name.
2111 for (server_id, tool_name, tool) in context_server_tools {
2112 if duplicate_tool_names.contains(&tool_name) {
2113 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
2114 if available >= 2 {
2115 let mut disambiguated = server_id.0.to_string();
2116 disambiguated.truncate(available - 1);
2117 disambiguated.push('_');
2118 disambiguated.push_str(&tool_name);
2119 tools.insert(disambiguated.into(), tool.clone());
2120 } else {
2121 tools.insert(tool_name, tool.clone());
2122 }
2123 } else {
2124 tools.insert(tool_name, tool.clone());
2125 }
2126 }
2127
2128 tools
2129 }
2130
2131 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
2132 self.running_turn.as_ref()?.tools.get(name).cloned()
2133 }
2134
2135 pub fn has_tool(&self, name: &str) -> bool {
2136 self.running_turn
2137 .as_ref()
2138 .is_some_and(|turn| turn.tools.contains_key(name))
2139 }
2140
2141 fn build_request_messages(
2142 &self,
2143 available_tools: Vec<SharedString>,
2144 cx: &App,
2145 ) -> Vec<LanguageModelRequestMessage> {
2146 log::trace!(
2147 "Building request messages from {} thread messages",
2148 self.messages.len()
2149 );
2150
2151 let system_prompt = SystemPromptTemplate {
2152 project: self.project_context.read(cx),
2153 available_tools,
2154 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
2155 }
2156 .render(&self.templates)
2157 .context("failed to build system prompt")
2158 .expect("Invalid template");
2159 let mut messages = vec![LanguageModelRequestMessage {
2160 role: Role::System,
2161 content: vec![system_prompt.into()],
2162 cache: false,
2163 reasoning_details: None,
2164 }];
2165 for message in &self.messages {
2166 messages.extend(message.to_request());
2167 }
2168
2169 if let Some(last_message) = messages.last_mut() {
2170 last_message.cache = true;
2171 }
2172
2173 if let Some(message) = self.pending_message.as_ref() {
2174 messages.extend(message.to_request());
2175 }
2176
2177 messages
2178 }
2179
2180 pub fn to_markdown(&self) -> String {
2181 let mut markdown = String::new();
2182 for (ix, message) in self.messages.iter().enumerate() {
2183 if ix > 0 {
2184 markdown.push('\n');
2185 }
2186 markdown.push_str(&message.to_markdown());
2187 }
2188
2189 if let Some(message) = self.pending_message.as_ref() {
2190 markdown.push('\n');
2191 markdown.push_str(&message.to_markdown());
2192 }
2193
2194 markdown
2195 }
2196
2197 fn advance_prompt_id(&mut self) {
2198 self.prompt_id = PromptId::new();
2199 }
2200
2201 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2202 use LanguageModelCompletionError::*;
2203 use http_client::StatusCode;
2204
2205 // General strategy here:
2206 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2207 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2208 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2209 match error {
2210 HttpResponseError {
2211 status_code: StatusCode::TOO_MANY_REQUESTS,
2212 ..
2213 } => Some(RetryStrategy::ExponentialBackoff {
2214 initial_delay: BASE_RETRY_DELAY,
2215 max_attempts: MAX_RETRY_ATTEMPTS,
2216 }),
2217 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2218 Some(RetryStrategy::Fixed {
2219 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2220 max_attempts: MAX_RETRY_ATTEMPTS,
2221 })
2222 }
2223 UpstreamProviderError {
2224 status,
2225 retry_after,
2226 ..
2227 } => match *status {
2228 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2229 Some(RetryStrategy::Fixed {
2230 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2231 max_attempts: MAX_RETRY_ATTEMPTS,
2232 })
2233 }
2234 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2235 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2236 // Internal Server Error could be anything, retry up to 3 times.
2237 max_attempts: 3,
2238 }),
2239 status => {
2240 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2241 // but we frequently get them in practice. See https://http.dev/529
2242 if status.as_u16() == 529 {
2243 Some(RetryStrategy::Fixed {
2244 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2245 max_attempts: MAX_RETRY_ATTEMPTS,
2246 })
2247 } else {
2248 Some(RetryStrategy::Fixed {
2249 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2250 max_attempts: 2,
2251 })
2252 }
2253 }
2254 },
2255 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2256 delay: BASE_RETRY_DELAY,
2257 max_attempts: 3,
2258 }),
2259 ApiReadResponseError { .. }
2260 | HttpSend { .. }
2261 | DeserializeResponse { .. }
2262 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2263 delay: BASE_RETRY_DELAY,
2264 max_attempts: 3,
2265 }),
2266 // Retrying these errors definitely shouldn't help.
2267 HttpResponseError {
2268 status_code:
2269 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2270 ..
2271 }
2272 | AuthenticationError { .. }
2273 | PermissionError { .. }
2274 | NoApiKey { .. }
2275 | ApiEndpointNotFound { .. }
2276 | PromptTooLarge { .. } => None,
2277 // These errors might be transient, so retry them
2278 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2279 delay: BASE_RETRY_DELAY,
2280 max_attempts: 1,
2281 }),
2282 // Retry all other 4xx and 5xx errors once.
2283 HttpResponseError { status_code, .. }
2284 if status_code.is_client_error() || status_code.is_server_error() =>
2285 {
2286 Some(RetryStrategy::Fixed {
2287 delay: BASE_RETRY_DELAY,
2288 max_attempts: 3,
2289 })
2290 }
2291 Other(err)
2292 if err.is::<language_model::PaymentRequiredError>()
2293 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2294 {
2295 // Retrying won't help for Payment Required or Model Request Limit errors (where
2296 // the user must upgrade to usage-based billing to get more requests, or else wait
2297 // for a significant amount of time for the request limit to reset).
2298 None
2299 }
2300 // Conservatively assume that any other errors are non-retryable
2301 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2302 delay: BASE_RETRY_DELAY,
2303 max_attempts: 2,
2304 }),
2305 }
2306 }
2307}
2308
2309struct RunningTurn {
2310 /// Holds the task that handles agent interaction until the end of the turn.
2311 /// Survives across multiple requests as the model performs tool calls and
2312 /// we run tools, report their results.
2313 _task: Task<()>,
2314 /// The current event stream for the running turn. Used to report a final
2315 /// cancellation event if we cancel the turn.
2316 event_stream: ThreadEventStream,
2317 /// The tools that were enabled for this turn.
2318 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2319 /// Sender to signal tool cancellation. When cancel is called, this is
2320 /// set to true so all tools can detect user-initiated cancellation.
2321 cancellation_tx: watch::Sender<bool>,
2322}
2323
2324impl RunningTurn {
2325 fn cancel(mut self) -> Task<()> {
2326 log::debug!("Cancelling in progress turn");
2327 self.cancellation_tx.send(true).ok();
2328 self.event_stream.send_canceled();
2329 self._task
2330 }
2331}
2332
2333pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2334
2335impl EventEmitter<TokenUsageUpdated> for Thread {}
2336
2337pub struct TitleUpdated;
2338
2339impl EventEmitter<TitleUpdated> for Thread {}
2340
2341pub trait AgentTool
2342where
2343 Self: 'static + Sized,
2344{
2345 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2346 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2347
2348 fn name() -> &'static str;
2349
2350 fn description() -> SharedString {
2351 let schema = schemars::schema_for!(Self::Input);
2352 SharedString::new(
2353 schema
2354 .get("description")
2355 .and_then(|description| description.as_str())
2356 .unwrap_or_default(),
2357 )
2358 }
2359
2360 fn kind() -> acp::ToolKind;
2361
2362 /// The initial tool title to display. Can be updated during the tool run.
2363 fn initial_title(
2364 &self,
2365 input: Result<Self::Input, serde_json::Value>,
2366 cx: &mut App,
2367 ) -> SharedString;
2368
2369 /// Returns the JSON schema that describes the tool's input.
2370 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2371 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2372 }
2373
2374 /// Some tools rely on a provider for the underlying billing or other reasons.
2375 /// Allow the tool to check if they are compatible, or should be filtered out.
2376 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2377 true
2378 }
2379
2380 /// Runs the tool with the provided input.
2381 fn run(
2382 self: Arc<Self>,
2383 input: Self::Input,
2384 event_stream: ToolCallEventStream,
2385 cx: &mut App,
2386 ) -> Task<Result<Self::Output>>;
2387
2388 /// Emits events for a previous execution of the tool.
2389 fn replay(
2390 &self,
2391 _input: Self::Input,
2392 _output: Self::Output,
2393 _event_stream: ToolCallEventStream,
2394 _cx: &mut App,
2395 ) -> Result<()> {
2396 Ok(())
2397 }
2398
2399 fn erase(self) -> Arc<dyn AnyAgentTool> {
2400 Arc::new(Erased(Arc::new(self)))
2401 }
2402}
2403
2404pub struct Erased<T>(T);
2405
2406pub struct AgentToolOutput {
2407 pub llm_output: LanguageModelToolResultContent,
2408 pub raw_output: serde_json::Value,
2409}
2410
2411pub trait AnyAgentTool {
2412 fn name(&self) -> SharedString;
2413 fn description(&self) -> SharedString;
2414 fn kind(&self) -> acp::ToolKind;
2415 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2416 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2417 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2418 true
2419 }
2420 fn run(
2421 self: Arc<Self>,
2422 input: serde_json::Value,
2423 event_stream: ToolCallEventStream,
2424 cx: &mut App,
2425 ) -> Task<Result<AgentToolOutput>>;
2426 fn replay(
2427 &self,
2428 input: serde_json::Value,
2429 output: serde_json::Value,
2430 event_stream: ToolCallEventStream,
2431 cx: &mut App,
2432 ) -> Result<()>;
2433}
2434
2435impl<T> AnyAgentTool for Erased<Arc<T>>
2436where
2437 T: AgentTool,
2438{
2439 fn name(&self) -> SharedString {
2440 T::name().into()
2441 }
2442
2443 fn description(&self) -> SharedString {
2444 T::description()
2445 }
2446
2447 fn kind(&self) -> agent_client_protocol::ToolKind {
2448 T::kind()
2449 }
2450
2451 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2452 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2453 self.0.initial_title(parsed_input, _cx)
2454 }
2455
2456 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2457 let mut json = serde_json::to_value(T::input_schema(format))?;
2458 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2459 Ok(json)
2460 }
2461
2462 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2463 T::supports_provider(provider)
2464 }
2465
2466 fn run(
2467 self: Arc<Self>,
2468 input: serde_json::Value,
2469 event_stream: ToolCallEventStream,
2470 cx: &mut App,
2471 ) -> Task<Result<AgentToolOutput>> {
2472 cx.spawn(async move |cx| {
2473 let input = serde_json::from_value(input)?;
2474 let output = cx
2475 .update(|cx| self.0.clone().run(input, event_stream, cx))
2476 .await?;
2477 let raw_output = serde_json::to_value(&output)?;
2478 Ok(AgentToolOutput {
2479 llm_output: output.into(),
2480 raw_output,
2481 })
2482 })
2483 }
2484
2485 fn replay(
2486 &self,
2487 input: serde_json::Value,
2488 output: serde_json::Value,
2489 event_stream: ToolCallEventStream,
2490 cx: &mut App,
2491 ) -> Result<()> {
2492 let input = serde_json::from_value(input)?;
2493 let output = serde_json::from_value(output)?;
2494 self.0.replay(input, output, event_stream, cx)
2495 }
2496}
2497
2498#[derive(Clone)]
2499struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2500
2501impl ThreadEventStream {
2502 fn send_user_message(&self, message: &UserMessage) {
2503 self.0
2504 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2505 .ok();
2506 }
2507
2508 fn send_text(&self, text: &str) {
2509 self.0
2510 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2511 .ok();
2512 }
2513
2514 fn send_thinking(&self, text: &str) {
2515 self.0
2516 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2517 .ok();
2518 }
2519
2520 fn send_tool_call(
2521 &self,
2522 id: &LanguageModelToolUseId,
2523 tool_name: &str,
2524 title: SharedString,
2525 kind: acp::ToolKind,
2526 input: serde_json::Value,
2527 ) {
2528 self.0
2529 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2530 id,
2531 tool_name,
2532 title.to_string(),
2533 kind,
2534 input,
2535 ))))
2536 .ok();
2537 }
2538
2539 fn initial_tool_call(
2540 id: &LanguageModelToolUseId,
2541 tool_name: &str,
2542 title: String,
2543 kind: acp::ToolKind,
2544 input: serde_json::Value,
2545 ) -> acp::ToolCall {
2546 acp::ToolCall::new(id.to_string(), title)
2547 .kind(kind)
2548 .raw_input(input)
2549 .meta(acp::Meta::from_iter([(
2550 "tool_name".into(),
2551 tool_name.into(),
2552 )]))
2553 }
2554
2555 fn update_tool_call_fields(
2556 &self,
2557 tool_use_id: &LanguageModelToolUseId,
2558 fields: acp::ToolCallUpdateFields,
2559 ) {
2560 self.0
2561 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2562 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2563 )))
2564 .ok();
2565 }
2566
2567 fn send_retry(&self, status: acp_thread::RetryStatus) {
2568 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2569 }
2570
2571 fn send_stop(&self, reason: acp::StopReason) {
2572 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2573 }
2574
2575 fn send_canceled(&self) {
2576 self.0
2577 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2578 .ok();
2579 }
2580
2581 fn send_error(&self, error: impl Into<anyhow::Error>) {
2582 self.0.unbounded_send(Err(error.into())).ok();
2583 }
2584}
2585
2586#[derive(Clone)]
2587pub struct ToolCallEventStream {
2588 tool_use_id: LanguageModelToolUseId,
2589 stream: ThreadEventStream,
2590 fs: Option<Arc<dyn Fs>>,
2591 cancellation_rx: watch::Receiver<bool>,
2592}
2593
2594impl ToolCallEventStream {
2595 #[cfg(any(test, feature = "test-support"))]
2596 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2597 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2598 let (_cancellation_tx, cancellation_rx) = watch::channel(false);
2599
2600 let stream = ToolCallEventStream::new(
2601 "test_id".into(),
2602 ThreadEventStream(events_tx),
2603 None,
2604 cancellation_rx,
2605 );
2606
2607 (stream, ToolCallEventStreamReceiver(events_rx))
2608 }
2609
2610 fn new(
2611 tool_use_id: LanguageModelToolUseId,
2612 stream: ThreadEventStream,
2613 fs: Option<Arc<dyn Fs>>,
2614 cancellation_rx: watch::Receiver<bool>,
2615 ) -> Self {
2616 Self {
2617 tool_use_id,
2618 stream,
2619 fs,
2620 cancellation_rx,
2621 }
2622 }
2623
2624 /// Returns a future that resolves when the user cancels the tool call.
2625 /// Tools should select on this alongside their main work to detect user cancellation.
2626 pub fn cancelled_by_user(&self) -> impl std::future::Future<Output = ()> + '_ {
2627 let mut rx = self.cancellation_rx.clone();
2628 async move {
2629 loop {
2630 if *rx.borrow() {
2631 return;
2632 }
2633 if rx.changed().await.is_err() {
2634 // Sender dropped, will never be cancelled
2635 std::future::pending::<()>().await;
2636 }
2637 }
2638 }
2639 }
2640
2641 /// Returns true if the user has cancelled this tool call.
2642 /// This is useful for checking cancellation state after an operation completes,
2643 /// to determine if the completion was due to user cancellation.
2644 pub fn was_cancelled_by_user(&self) -> bool {
2645 *self.cancellation_rx.clone().borrow()
2646 }
2647
2648 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2649 self.stream
2650 .update_tool_call_fields(&self.tool_use_id, fields);
2651 }
2652
2653 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2654 self.stream
2655 .0
2656 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2657 acp_thread::ToolCallUpdateDiff {
2658 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2659 diff,
2660 }
2661 .into(),
2662 )))
2663 .ok();
2664 }
2665
2666 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2667 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2668 return Task::ready(Ok(()));
2669 }
2670
2671 self.authorize_required(title, cx)
2672 }
2673
2674 /// Like `authorize`, but always prompts for confirmation regardless of
2675 /// the `always_allow_tool_actions` setting. Use this when tool-specific
2676 /// permission rules (like `always_confirm` patterns) have already determined
2677 /// that confirmation is required.
2678 pub fn authorize_required(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2679 let (response_tx, response_rx) = oneshot::channel();
2680 self.stream
2681 .0
2682 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2683 ToolCallAuthorization {
2684 tool_call: acp::ToolCallUpdate::new(
2685 self.tool_use_id.to_string(),
2686 acp::ToolCallUpdateFields::new().title(title.into()),
2687 ),
2688 options: vec![
2689 acp::PermissionOption::new(
2690 acp::PermissionOptionId::new("always_allow"),
2691 "Always Allow",
2692 acp::PermissionOptionKind::AllowAlways,
2693 ),
2694 acp::PermissionOption::new(
2695 acp::PermissionOptionId::new("allow"),
2696 "Allow",
2697 acp::PermissionOptionKind::AllowOnce,
2698 ),
2699 acp::PermissionOption::new(
2700 acp::PermissionOptionId::new("deny"),
2701 "Deny",
2702 acp::PermissionOptionKind::RejectOnce,
2703 ),
2704 ],
2705 response: response_tx,
2706 },
2707 )))
2708 .ok();
2709 let fs = self.fs.clone();
2710 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2711 "always_allow" => {
2712 if let Some(fs) = fs.clone() {
2713 cx.update(|cx| {
2714 update_settings_file(fs, cx, |settings, _| {
2715 settings
2716 .agent
2717 .get_or_insert_default()
2718 .set_always_allow_tool_actions(true);
2719 });
2720 });
2721 }
2722
2723 Ok(())
2724 }
2725 "allow" => Ok(()),
2726 _ => Err(anyhow!("Permission to run tool denied by user")),
2727 })
2728 }
2729}
2730
2731#[cfg(any(test, feature = "test-support"))]
2732pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2733
2734#[cfg(any(test, feature = "test-support"))]
2735impl ToolCallEventStreamReceiver {
2736 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2737 let event = self.0.next().await;
2738 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2739 auth
2740 } else {
2741 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2742 }
2743 }
2744
2745 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2746 let event = self.0.next().await;
2747 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2748 update,
2749 )))) = event
2750 {
2751 update.fields
2752 } else {
2753 panic!("Expected update fields but got: {:?}", event);
2754 }
2755 }
2756
2757 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2758 let event = self.0.next().await;
2759 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2760 update,
2761 )))) = event
2762 {
2763 update.diff
2764 } else {
2765 panic!("Expected diff but got: {:?}", event);
2766 }
2767 }
2768
2769 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2770 let event = self.0.next().await;
2771 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2772 update,
2773 )))) = event
2774 {
2775 update.terminal
2776 } else {
2777 panic!("Expected terminal but got: {:?}", event);
2778 }
2779 }
2780}
2781
2782#[cfg(any(test, feature = "test-support"))]
2783impl std::ops::Deref for ToolCallEventStreamReceiver {
2784 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2785
2786 fn deref(&self) -> &Self::Target {
2787 &self.0
2788 }
2789}
2790
2791#[cfg(any(test, feature = "test-support"))]
2792impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2793 fn deref_mut(&mut self) -> &mut Self::Target {
2794 &mut self.0
2795 }
2796}
2797
2798impl From<&str> for UserMessageContent {
2799 fn from(text: &str) -> Self {
2800 Self::Text(text.into())
2801 }
2802}
2803
2804impl UserMessageContent {
2805 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
2806 match value {
2807 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2808 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2809 acp::ContentBlock::Audio(_) => {
2810 // TODO
2811 Self::Text("[audio]".to_string())
2812 }
2813 acp::ContentBlock::ResourceLink(resource_link) => {
2814 match MentionUri::parse(&resource_link.uri, path_style) {
2815 Ok(uri) => Self::Mention {
2816 uri,
2817 content: String::new(),
2818 },
2819 Err(err) => {
2820 log::error!("Failed to parse mention link: {}", err);
2821 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2822 }
2823 }
2824 }
2825 acp::ContentBlock::Resource(resource) => match resource.resource {
2826 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2827 match MentionUri::parse(&resource.uri, path_style) {
2828 Ok(uri) => Self::Mention {
2829 uri,
2830 content: resource.text,
2831 },
2832 Err(err) => {
2833 log::error!("Failed to parse mention link: {}", err);
2834 Self::Text(
2835 MarkdownCodeBlock {
2836 tag: &resource.uri,
2837 text: &resource.text,
2838 }
2839 .to_string(),
2840 )
2841 }
2842 }
2843 }
2844 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2845 // TODO
2846 Self::Text("[blob]".to_string())
2847 }
2848 other => {
2849 log::warn!("Unexpected content type: {:?}", other);
2850 Self::Text("[unknown]".to_string())
2851 }
2852 },
2853 other => {
2854 log::warn!("Unexpected content type: {:?}", other);
2855 Self::Text("[unknown]".to_string())
2856 }
2857 }
2858 }
2859}
2860
2861impl From<UserMessageContent> for acp::ContentBlock {
2862 fn from(content: UserMessageContent) -> Self {
2863 match content {
2864 UserMessageContent::Text(text) => text.into(),
2865 UserMessageContent::Image(image) => {
2866 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
2867 }
2868 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
2869 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
2870 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
2871 )),
2872 ),
2873 }
2874 }
2875}
2876
2877fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2878 LanguageModelImage {
2879 source: image_content.data.into(),
2880 size: None,
2881 }
2882}