1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 RestoreFileFromDiskTool, SaveFileTool, SystemPromptTemplate, Template, Templates, TerminalTool,
6 ThinkingTool, WebSearchTool,
7};
8use acp_thread::{MentionUri, UserMessageId};
9use action_log::ActionLog;
10
11use agent_client_protocol as acp;
12use agent_settings::{
13 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
14 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
15};
16use anyhow::{Context as _, Result, anyhow};
17use chrono::{DateTime, Utc};
18use client::{ModelRequestUsage, RequestUsage, UserStore};
19use cloud_llm_client::{CompletionIntent, Plan, UsageLimit};
20use collections::{HashMap, HashSet, IndexMap};
21use fs::Fs;
22use futures::stream;
23use futures::{
24 FutureExt,
25 channel::{mpsc, oneshot},
26 future::Shared,
27 stream::FuturesUnordered,
28};
29use gpui::{
30 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
31};
32use language_model::{
33 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
34 LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
35 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
36 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
37 LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
38 ZED_CLOUD_PROVIDER_ID,
39};
40use project::Project;
41use prompt_store::ProjectContext;
42use schemars::{JsonSchema, Schema};
43use serde::{Deserialize, Serialize};
44use settings::{LanguageModelSelection, Settings, update_settings_file};
45use smol::stream::StreamExt;
46use std::{
47 collections::BTreeMap,
48 ops::RangeInclusive,
49 path::Path,
50 rc::Rc,
51 sync::Arc,
52 time::{Duration, Instant},
53};
54use std::{fmt::Write, path::PathBuf};
55use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock, paths::PathStyle};
56use uuid::Uuid;
57
58const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
59pub const MAX_TOOL_NAME_LENGTH: usize = 64;
60
61/// The ID of the user prompt that initiated a request.
62///
63/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
64#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
65pub struct PromptId(Arc<str>);
66
67impl PromptId {
68 pub fn new() -> Self {
69 Self(Uuid::new_v4().to_string().into())
70 }
71}
72
73impl std::fmt::Display for PromptId {
74 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
75 write!(f, "{}", self.0)
76 }
77}
78
79pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
80pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
81
82#[derive(Debug, Clone)]
83enum RetryStrategy {
84 ExponentialBackoff {
85 initial_delay: Duration,
86 max_attempts: u8,
87 },
88 Fixed {
89 delay: Duration,
90 max_attempts: u8,
91 },
92}
93
94#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
95pub enum Message {
96 User(UserMessage),
97 Agent(AgentMessage),
98 Resume,
99}
100
101impl Message {
102 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
103 match self {
104 Message::Agent(agent_message) => Some(agent_message),
105 _ => None,
106 }
107 }
108
109 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
110 match self {
111 Message::User(message) => {
112 if message.content.is_empty() {
113 vec![]
114 } else {
115 vec![message.to_request()]
116 }
117 }
118 Message::Agent(message) => message.to_request(),
119 Message::Resume => vec![LanguageModelRequestMessage {
120 role: Role::User,
121 content: vec!["Continue where you left off".into()],
122 cache: false,
123 reasoning_details: None,
124 }],
125 }
126 }
127
128 pub fn to_markdown(&self) -> String {
129 match self {
130 Message::User(message) => message.to_markdown(),
131 Message::Agent(message) => message.to_markdown(),
132 Message::Resume => "[resume]\n".into(),
133 }
134 }
135
136 pub fn role(&self) -> Role {
137 match self {
138 Message::User(_) | Message::Resume => Role::User,
139 Message::Agent(_) => Role::Assistant,
140 }
141 }
142}
143
144#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
145pub struct UserMessage {
146 pub id: UserMessageId,
147 pub content: Vec<UserMessageContent>,
148}
149
150#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
151pub enum UserMessageContent {
152 Text(String),
153 Mention { uri: MentionUri, content: String },
154 Image(LanguageModelImage),
155}
156
157impl UserMessage {
158 pub fn to_markdown(&self) -> String {
159 let mut markdown = String::from("## User\n\n");
160
161 for content in &self.content {
162 match content {
163 UserMessageContent::Text(text) => {
164 markdown.push_str(text);
165 markdown.push('\n');
166 }
167 UserMessageContent::Image(_) => {
168 markdown.push_str("<image />\n");
169 }
170 UserMessageContent::Mention { uri, content } => {
171 if !content.is_empty() {
172 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
173 } else {
174 let _ = writeln!(&mut markdown, "{}", uri.as_link());
175 }
176 }
177 }
178 }
179
180 markdown
181 }
182
183 fn to_request(&self) -> LanguageModelRequestMessage {
184 let mut message = LanguageModelRequestMessage {
185 role: Role::User,
186 content: Vec::with_capacity(self.content.len()),
187 cache: false,
188 reasoning_details: None,
189 };
190
191 const OPEN_CONTEXT: &str = "<context>\n\
192 The following items were attached by the user. \
193 They are up-to-date and don't need to be re-read.\n\n";
194
195 const OPEN_FILES_TAG: &str = "<files>";
196 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
197 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
198 const OPEN_SELECTIONS_TAG: &str = "<selections>";
199 const OPEN_THREADS_TAG: &str = "<threads>";
200 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
201 const OPEN_RULES_TAG: &str =
202 "<rules>\nThe user has specified the following rules that should be applied:\n";
203
204 let mut file_context = OPEN_FILES_TAG.to_string();
205 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
206 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
207 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
208 let mut thread_context = OPEN_THREADS_TAG.to_string();
209 let mut fetch_context = OPEN_FETCH_TAG.to_string();
210 let mut rules_context = OPEN_RULES_TAG.to_string();
211
212 for chunk in &self.content {
213 let chunk = match chunk {
214 UserMessageContent::Text(text) => {
215 language_model::MessageContent::Text(text.clone())
216 }
217 UserMessageContent::Image(value) => {
218 language_model::MessageContent::Image(value.clone())
219 }
220 UserMessageContent::Mention { uri, content } => {
221 match uri {
222 MentionUri::File { abs_path } => {
223 write!(
224 &mut file_context,
225 "\n{}",
226 MarkdownCodeBlock {
227 tag: &codeblock_tag(abs_path, None),
228 text: &content.to_string(),
229 }
230 )
231 .ok();
232 }
233 MentionUri::PastedImage => {
234 debug_panic!("pasted image URI should not be used in mention content")
235 }
236 MentionUri::Directory { .. } => {
237 write!(&mut directory_context, "\n{}\n", content).ok();
238 }
239 MentionUri::Symbol {
240 abs_path: path,
241 line_range,
242 ..
243 } => {
244 write!(
245 &mut symbol_context,
246 "\n{}",
247 MarkdownCodeBlock {
248 tag: &codeblock_tag(path, Some(line_range)),
249 text: content
250 }
251 )
252 .ok();
253 }
254 MentionUri::Selection {
255 abs_path: path,
256 line_range,
257 ..
258 } => {
259 write!(
260 &mut selection_context,
261 "\n{}",
262 MarkdownCodeBlock {
263 tag: &codeblock_tag(
264 path.as_deref().unwrap_or("Untitled".as_ref()),
265 Some(line_range)
266 ),
267 text: content
268 }
269 )
270 .ok();
271 }
272 MentionUri::Thread { .. } => {
273 write!(&mut thread_context, "\n{}\n", content).ok();
274 }
275 MentionUri::TextThread { .. } => {
276 write!(&mut thread_context, "\n{}\n", content).ok();
277 }
278 MentionUri::Rule { .. } => {
279 write!(
280 &mut rules_context,
281 "\n{}",
282 MarkdownCodeBlock {
283 tag: "",
284 text: content
285 }
286 )
287 .ok();
288 }
289 MentionUri::Fetch { url } => {
290 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
291 }
292 }
293
294 language_model::MessageContent::Text(uri.as_link().to_string())
295 }
296 };
297
298 message.content.push(chunk);
299 }
300
301 let len_before_context = message.content.len();
302
303 if file_context.len() > OPEN_FILES_TAG.len() {
304 file_context.push_str("</files>\n");
305 message
306 .content
307 .push(language_model::MessageContent::Text(file_context));
308 }
309
310 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
311 directory_context.push_str("</directories>\n");
312 message
313 .content
314 .push(language_model::MessageContent::Text(directory_context));
315 }
316
317 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
318 symbol_context.push_str("</symbols>\n");
319 message
320 .content
321 .push(language_model::MessageContent::Text(symbol_context));
322 }
323
324 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
325 selection_context.push_str("</selections>\n");
326 message
327 .content
328 .push(language_model::MessageContent::Text(selection_context));
329 }
330
331 if thread_context.len() > OPEN_THREADS_TAG.len() {
332 thread_context.push_str("</threads>\n");
333 message
334 .content
335 .push(language_model::MessageContent::Text(thread_context));
336 }
337
338 if fetch_context.len() > OPEN_FETCH_TAG.len() {
339 fetch_context.push_str("</fetched_urls>\n");
340 message
341 .content
342 .push(language_model::MessageContent::Text(fetch_context));
343 }
344
345 if rules_context.len() > OPEN_RULES_TAG.len() {
346 rules_context.push_str("</user_rules>\n");
347 message
348 .content
349 .push(language_model::MessageContent::Text(rules_context));
350 }
351
352 if message.content.len() > len_before_context {
353 message.content.insert(
354 len_before_context,
355 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
356 );
357 message
358 .content
359 .push(language_model::MessageContent::Text("</context>".into()));
360 }
361
362 message
363 }
364}
365
366fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
367 let mut result = String::new();
368
369 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
370 let _ = write!(result, "{} ", extension);
371 }
372
373 let _ = write!(result, "{}", full_path.display());
374
375 if let Some(range) = line_range {
376 if range.start() == range.end() {
377 let _ = write!(result, ":{}", range.start() + 1);
378 } else {
379 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
380 }
381 }
382
383 result
384}
385
386impl AgentMessage {
387 pub fn to_markdown(&self) -> String {
388 let mut markdown = String::from("## Assistant\n\n");
389
390 for content in &self.content {
391 match content {
392 AgentMessageContent::Text(text) => {
393 markdown.push_str(text);
394 markdown.push('\n');
395 }
396 AgentMessageContent::Thinking { text, .. } => {
397 markdown.push_str("<think>");
398 markdown.push_str(text);
399 markdown.push_str("</think>\n");
400 }
401 AgentMessageContent::RedactedThinking(_) => {
402 markdown.push_str("<redacted_thinking />\n")
403 }
404 AgentMessageContent::ToolUse(tool_use) => {
405 markdown.push_str(&format!(
406 "**Tool Use**: {} (ID: {})\n",
407 tool_use.name, tool_use.id
408 ));
409 markdown.push_str(&format!(
410 "{}\n",
411 MarkdownCodeBlock {
412 tag: "json",
413 text: &format!("{:#}", tool_use.input)
414 }
415 ));
416 }
417 }
418 }
419
420 for tool_result in self.tool_results.values() {
421 markdown.push_str(&format!(
422 "**Tool Result**: {} (ID: {})\n\n",
423 tool_result.tool_name, tool_result.tool_use_id
424 ));
425 if tool_result.is_error {
426 markdown.push_str("**ERROR:**\n");
427 }
428
429 match &tool_result.content {
430 LanguageModelToolResultContent::Text(text) => {
431 writeln!(markdown, "{text}\n").ok();
432 }
433 LanguageModelToolResultContent::Image(_) => {
434 writeln!(markdown, "<image />\n").ok();
435 }
436 }
437
438 if let Some(output) = tool_result.output.as_ref() {
439 writeln!(
440 markdown,
441 "**Debug Output**:\n\n```json\n{}\n```\n",
442 serde_json::to_string_pretty(output).unwrap()
443 )
444 .unwrap();
445 }
446 }
447
448 markdown
449 }
450
451 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
452 let mut assistant_message = LanguageModelRequestMessage {
453 role: Role::Assistant,
454 content: Vec::with_capacity(self.content.len()),
455 cache: false,
456 reasoning_details: self.reasoning_details.clone(),
457 };
458 for chunk in &self.content {
459 match chunk {
460 AgentMessageContent::Text(text) => {
461 assistant_message
462 .content
463 .push(language_model::MessageContent::Text(text.clone()));
464 }
465 AgentMessageContent::Thinking { text, signature } => {
466 assistant_message
467 .content
468 .push(language_model::MessageContent::Thinking {
469 text: text.clone(),
470 signature: signature.clone(),
471 });
472 }
473 AgentMessageContent::RedactedThinking(value) => {
474 assistant_message.content.push(
475 language_model::MessageContent::RedactedThinking(value.clone()),
476 );
477 }
478 AgentMessageContent::ToolUse(tool_use) => {
479 if self.tool_results.contains_key(&tool_use.id) {
480 assistant_message
481 .content
482 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
483 }
484 }
485 };
486 }
487
488 let mut user_message = LanguageModelRequestMessage {
489 role: Role::User,
490 content: Vec::new(),
491 cache: false,
492 reasoning_details: None,
493 };
494
495 for tool_result in self.tool_results.values() {
496 let mut tool_result = tool_result.clone();
497 // Surprisingly, the API fails if we return an empty string here.
498 // It thinks we are sending a tool use without a tool result.
499 if tool_result.content.is_empty() {
500 tool_result.content = "<Tool returned an empty string>".into();
501 }
502 user_message
503 .content
504 .push(language_model::MessageContent::ToolResult(tool_result));
505 }
506
507 let mut messages = Vec::new();
508 if !assistant_message.content.is_empty() {
509 messages.push(assistant_message);
510 }
511 if !user_message.content.is_empty() {
512 messages.push(user_message);
513 }
514 messages
515 }
516}
517
518#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
519pub struct AgentMessage {
520 pub content: Vec<AgentMessageContent>,
521 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
522 pub reasoning_details: Option<serde_json::Value>,
523}
524
525#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
526pub enum AgentMessageContent {
527 Text(String),
528 Thinking {
529 text: String,
530 signature: Option<String>,
531 },
532 RedactedThinking(String),
533 ToolUse(LanguageModelToolUse),
534}
535
536pub trait TerminalHandle {
537 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
538 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
539 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
540 fn kill(&self, cx: &AsyncApp) -> Result<()>;
541 fn was_stopped_by_user(&self, cx: &AsyncApp) -> Result<bool>;
542}
543
544pub trait ThreadEnvironment {
545 fn create_terminal(
546 &self,
547 command: String,
548 cwd: Option<PathBuf>,
549 output_byte_limit: Option<u64>,
550 cx: &mut AsyncApp,
551 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
552}
553
554#[derive(Debug)]
555pub enum ThreadEvent {
556 UserMessage(UserMessage),
557 AgentText(String),
558 AgentThinking(String),
559 ToolCall(acp::ToolCall),
560 ToolCallUpdate(acp_thread::ToolCallUpdate),
561 ToolCallAuthorization(ToolCallAuthorization),
562 Retry(acp_thread::RetryStatus),
563 Stop(acp::StopReason),
564}
565
566#[derive(Debug)]
567pub struct NewTerminal {
568 pub command: String,
569 pub output_byte_limit: Option<u64>,
570 pub cwd: Option<PathBuf>,
571 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
572}
573
574#[derive(Debug)]
575pub struct ToolCallAuthorization {
576 pub tool_call: acp::ToolCallUpdate,
577 pub options: Vec<acp::PermissionOption>,
578 pub response: oneshot::Sender<acp::PermissionOptionId>,
579}
580
581#[derive(Debug, thiserror::Error)]
582enum CompletionError {
583 #[error("max tokens")]
584 MaxTokens,
585 #[error("refusal")]
586 Refusal,
587 #[error(transparent)]
588 Other(#[from] anyhow::Error),
589}
590
591pub struct Thread {
592 id: acp::SessionId,
593 prompt_id: PromptId,
594 updated_at: DateTime<Utc>,
595 title: Option<SharedString>,
596 pending_title_generation: Option<Task<()>>,
597 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
598 summary: Option<SharedString>,
599 messages: Vec<Message>,
600 user_store: Entity<UserStore>,
601 completion_mode: CompletionMode,
602 /// Holds the task that handles agent interaction until the end of the turn.
603 /// Survives across multiple requests as the model performs tool calls and
604 /// we run tools, report their results.
605 running_turn: Option<RunningTurn>,
606 pending_message: Option<AgentMessage>,
607 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
608 tool_use_limit_reached: bool,
609 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
610 #[allow(unused)]
611 cumulative_token_usage: TokenUsage,
612 #[allow(unused)]
613 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
614 context_server_registry: Entity<ContextServerRegistry>,
615 profile_id: AgentProfileId,
616 project_context: Entity<ProjectContext>,
617 templates: Arc<Templates>,
618 model: Option<Arc<dyn LanguageModel>>,
619 summarization_model: Option<Arc<dyn LanguageModel>>,
620 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
621 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
622 pub(crate) project: Entity<Project>,
623 pub(crate) action_log: Entity<ActionLog>,
624 /// Tracks the last time files were read by the agent, to detect external modifications
625 pub(crate) file_read_times: HashMap<PathBuf, fs::MTime>,
626 /// True if this thread was imported from a shared thread and can be synced.
627 imported: bool,
628}
629
630impl Thread {
631 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
632 let image = model.map_or(true, |model| model.supports_images());
633 acp::PromptCapabilities::new()
634 .image(image)
635 .embedded_context(true)
636 }
637
638 pub fn new(
639 project: Entity<Project>,
640 project_context: Entity<ProjectContext>,
641 context_server_registry: Entity<ContextServerRegistry>,
642 templates: Arc<Templates>,
643 model: Option<Arc<dyn LanguageModel>>,
644 cx: &mut Context<Self>,
645 ) -> Self {
646 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
647 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
648 let (prompt_capabilities_tx, prompt_capabilities_rx) =
649 watch::channel(Self::prompt_capabilities(model.as_deref()));
650 Self {
651 id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
652 prompt_id: PromptId::new(),
653 updated_at: Utc::now(),
654 title: None,
655 pending_title_generation: None,
656 pending_summary_generation: None,
657 summary: None,
658 messages: Vec::new(),
659 user_store: project.read(cx).user_store(),
660 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
661 running_turn: None,
662 pending_message: None,
663 tools: BTreeMap::default(),
664 tool_use_limit_reached: false,
665 request_token_usage: HashMap::default(),
666 cumulative_token_usage: TokenUsage::default(),
667 initial_project_snapshot: {
668 let project_snapshot = Self::project_snapshot(project.clone(), cx);
669 cx.foreground_executor()
670 .spawn(async move { Some(project_snapshot.await) })
671 .shared()
672 },
673 context_server_registry,
674 profile_id,
675 project_context,
676 templates,
677 model,
678 summarization_model: None,
679 prompt_capabilities_tx,
680 prompt_capabilities_rx,
681 project,
682 action_log,
683 file_read_times: HashMap::default(),
684 imported: false,
685 }
686 }
687
688 pub fn id(&self) -> &acp::SessionId {
689 &self.id
690 }
691
692 /// Returns true if this thread was imported from a shared thread.
693 pub fn is_imported(&self) -> bool {
694 self.imported
695 }
696
697 pub fn replay(
698 &mut self,
699 cx: &mut Context<Self>,
700 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
701 let (tx, rx) = mpsc::unbounded();
702 let stream = ThreadEventStream(tx);
703 for message in &self.messages {
704 match message {
705 Message::User(user_message) => stream.send_user_message(user_message),
706 Message::Agent(assistant_message) => {
707 for content in &assistant_message.content {
708 match content {
709 AgentMessageContent::Text(text) => stream.send_text(text),
710 AgentMessageContent::Thinking { text, .. } => {
711 stream.send_thinking(text)
712 }
713 AgentMessageContent::RedactedThinking(_) => {}
714 AgentMessageContent::ToolUse(tool_use) => {
715 self.replay_tool_call(
716 tool_use,
717 assistant_message.tool_results.get(&tool_use.id),
718 &stream,
719 cx,
720 );
721 }
722 }
723 }
724 }
725 Message::Resume => {}
726 }
727 }
728 rx
729 }
730
731 fn replay_tool_call(
732 &self,
733 tool_use: &LanguageModelToolUse,
734 tool_result: Option<&LanguageModelToolResult>,
735 stream: &ThreadEventStream,
736 cx: &mut Context<Self>,
737 ) {
738 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
739 self.context_server_registry
740 .read(cx)
741 .servers()
742 .find_map(|(_, tools)| {
743 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
744 Some(tool.clone())
745 } else {
746 None
747 }
748 })
749 });
750
751 let Some(tool) = tool else {
752 stream
753 .0
754 .unbounded_send(Ok(ThreadEvent::ToolCall(
755 acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
756 .status(acp::ToolCallStatus::Failed)
757 .raw_input(tool_use.input.clone()),
758 )))
759 .ok();
760 return;
761 };
762
763 let title = tool.initial_title(tool_use.input.clone(), cx);
764 let kind = tool.kind();
765 stream.send_tool_call(
766 &tool_use.id,
767 &tool_use.name,
768 title,
769 kind,
770 tool_use.input.clone(),
771 );
772
773 let output = tool_result
774 .as_ref()
775 .and_then(|result| result.output.clone());
776 if let Some(output) = output.clone() {
777 // For replay, we use a dummy cancellation receiver since the tool already completed
778 let (_cancellation_tx, cancellation_rx) = watch::channel(false);
779 let tool_event_stream = ToolCallEventStream::new(
780 tool_use.id.clone(),
781 stream.clone(),
782 Some(self.project.read(cx).fs().clone()),
783 cancellation_rx,
784 );
785 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
786 .log_err();
787 }
788
789 stream.update_tool_call_fields(
790 &tool_use.id,
791 acp::ToolCallUpdateFields::new()
792 .status(
793 tool_result
794 .as_ref()
795 .map_or(acp::ToolCallStatus::Failed, |result| {
796 if result.is_error {
797 acp::ToolCallStatus::Failed
798 } else {
799 acp::ToolCallStatus::Completed
800 }
801 }),
802 )
803 .raw_output(output),
804 );
805 }
806
807 pub fn from_db(
808 id: acp::SessionId,
809 db_thread: DbThread,
810 project: Entity<Project>,
811 project_context: Entity<ProjectContext>,
812 context_server_registry: Entity<ContextServerRegistry>,
813 templates: Arc<Templates>,
814 cx: &mut Context<Self>,
815 ) -> Self {
816 let profile_id = db_thread
817 .profile
818 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
819
820 let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
821 db_thread
822 .model
823 .and_then(|model| {
824 let model = SelectedModel {
825 provider: model.provider.clone().into(),
826 model: model.model.into(),
827 };
828 registry.select_model(&model, cx)
829 })
830 .or_else(|| registry.default_model())
831 .map(|model| model.model)
832 });
833
834 if model.is_none() {
835 model = Self::resolve_profile_model(&profile_id, cx);
836 }
837 if model.is_none() {
838 model = LanguageModelRegistry::global(cx).update(cx, |registry, _cx| {
839 registry.default_model().map(|model| model.model)
840 });
841 }
842
843 let (prompt_capabilities_tx, prompt_capabilities_rx) =
844 watch::channel(Self::prompt_capabilities(model.as_deref()));
845
846 let action_log = cx.new(|_| ActionLog::new(project.clone()));
847
848 Self {
849 id,
850 prompt_id: PromptId::new(),
851 title: if db_thread.title.is_empty() {
852 None
853 } else {
854 Some(db_thread.title.clone())
855 },
856 pending_title_generation: None,
857 pending_summary_generation: None,
858 summary: db_thread.detailed_summary,
859 messages: db_thread.messages,
860 user_store: project.read(cx).user_store(),
861 completion_mode: db_thread.completion_mode.unwrap_or_default(),
862 running_turn: None,
863 pending_message: None,
864 tools: BTreeMap::default(),
865 tool_use_limit_reached: false,
866 request_token_usage: db_thread.request_token_usage.clone(),
867 cumulative_token_usage: db_thread.cumulative_token_usage,
868 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
869 context_server_registry,
870 profile_id,
871 project_context,
872 templates,
873 model,
874 summarization_model: None,
875 project,
876 action_log,
877 updated_at: db_thread.updated_at,
878 prompt_capabilities_tx,
879 prompt_capabilities_rx,
880 file_read_times: HashMap::default(),
881 imported: db_thread.imported,
882 }
883 }
884
885 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
886 let initial_project_snapshot = self.initial_project_snapshot.clone();
887 let mut thread = DbThread {
888 title: self.title(),
889 messages: self.messages.clone(),
890 updated_at: self.updated_at,
891 detailed_summary: self.summary.clone(),
892 initial_project_snapshot: None,
893 cumulative_token_usage: self.cumulative_token_usage,
894 request_token_usage: self.request_token_usage.clone(),
895 model: self.model.as_ref().map(|model| DbLanguageModel {
896 provider: model.provider_id().to_string(),
897 model: model.name().0.to_string(),
898 }),
899 completion_mode: Some(self.completion_mode),
900 profile: Some(self.profile_id.clone()),
901 imported: self.imported,
902 };
903
904 cx.background_spawn(async move {
905 let initial_project_snapshot = initial_project_snapshot.await;
906 thread.initial_project_snapshot = initial_project_snapshot;
907 thread
908 })
909 }
910
911 /// Create a snapshot of the current project state including git information and unsaved buffers.
912 fn project_snapshot(
913 project: Entity<Project>,
914 cx: &mut Context<Self>,
915 ) -> Task<Arc<ProjectSnapshot>> {
916 let task = project::telemetry_snapshot::TelemetrySnapshot::new(&project, cx);
917 cx.spawn(async move |_, _| {
918 let snapshot = task.await;
919
920 Arc::new(ProjectSnapshot {
921 worktree_snapshots: snapshot.worktree_snapshots,
922 timestamp: Utc::now(),
923 })
924 })
925 }
926
927 pub fn project_context(&self) -> &Entity<ProjectContext> {
928 &self.project_context
929 }
930
931 pub fn project(&self) -> &Entity<Project> {
932 &self.project
933 }
934
935 pub fn action_log(&self) -> &Entity<ActionLog> {
936 &self.action_log
937 }
938
939 pub fn is_empty(&self) -> bool {
940 self.messages.is_empty() && self.title.is_none()
941 }
942
943 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
944 self.model.as_ref()
945 }
946
947 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
948 let old_usage = self.latest_token_usage();
949 self.model = Some(model);
950 let new_caps = Self::prompt_capabilities(self.model.as_deref());
951 let new_usage = self.latest_token_usage();
952 if old_usage != new_usage {
953 cx.emit(TokenUsageUpdated(new_usage));
954 }
955 self.prompt_capabilities_tx.send(new_caps).log_err();
956 cx.notify()
957 }
958
959 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
960 self.summarization_model.as_ref()
961 }
962
963 pub fn set_summarization_model(
964 &mut self,
965 model: Option<Arc<dyn LanguageModel>>,
966 cx: &mut Context<Self>,
967 ) {
968 self.summarization_model = model;
969 cx.notify()
970 }
971
972 pub fn completion_mode(&self) -> CompletionMode {
973 self.completion_mode
974 }
975
976 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
977 let old_usage = self.latest_token_usage();
978 self.completion_mode = mode;
979 let new_usage = self.latest_token_usage();
980 if old_usage != new_usage {
981 cx.emit(TokenUsageUpdated(new_usage));
982 }
983 cx.notify()
984 }
985
986 #[cfg(any(test, feature = "test-support"))]
987 pub fn last_message(&self) -> Option<Message> {
988 if let Some(message) = self.pending_message.clone() {
989 Some(Message::Agent(message))
990 } else {
991 self.messages.last().cloned()
992 }
993 }
994
995 pub fn add_default_tools(
996 &mut self,
997 environment: Rc<dyn ThreadEnvironment>,
998 cx: &mut Context<Self>,
999 ) {
1000 let language_registry = self.project.read(cx).languages().clone();
1001 self.add_tool(CopyPathTool::new(self.project.clone()));
1002 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
1003 self.add_tool(DeletePathTool::new(
1004 self.project.clone(),
1005 self.action_log.clone(),
1006 ));
1007 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1008 self.add_tool(EditFileTool::new(
1009 self.project.clone(),
1010 cx.weak_entity(),
1011 language_registry,
1012 Templates::new(),
1013 ));
1014 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1015 self.add_tool(FindPathTool::new(self.project.clone()));
1016 self.add_tool(GrepTool::new(self.project.clone()));
1017 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1018 self.add_tool(MovePathTool::new(self.project.clone()));
1019 self.add_tool(NowTool);
1020 self.add_tool(OpenTool::new(self.project.clone()));
1021 self.add_tool(ReadFileTool::new(
1022 cx.weak_entity(),
1023 self.project.clone(),
1024 self.action_log.clone(),
1025 ));
1026 self.add_tool(SaveFileTool::new(self.project.clone()));
1027 self.add_tool(RestoreFileFromDiskTool::new(self.project.clone()));
1028 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1029 self.add_tool(ThinkingTool);
1030 self.add_tool(WebSearchTool);
1031 }
1032
1033 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1034 self.tools.insert(T::name().into(), tool.erase());
1035 }
1036
1037 pub fn remove_tool(&mut self, name: &str) -> bool {
1038 self.tools.remove(name).is_some()
1039 }
1040
1041 pub fn profile(&self) -> &AgentProfileId {
1042 &self.profile_id
1043 }
1044
1045 pub fn set_profile(&mut self, profile_id: AgentProfileId, cx: &mut Context<Self>) {
1046 if self.profile_id == profile_id {
1047 return;
1048 }
1049
1050 self.profile_id = profile_id;
1051
1052 // Swap to the profile's preferred model when available.
1053 if let Some(model) = Self::resolve_profile_model(&self.profile_id, cx) {
1054 self.set_model(model, cx);
1055 }
1056 }
1057
1058 pub fn cancel(&mut self, cx: &mut Context<Self>) -> Task<()> {
1059 let Some(running_turn) = self.running_turn.take() else {
1060 self.flush_pending_message(cx);
1061 return Task::ready(());
1062 };
1063
1064 let turn_task = running_turn.cancel();
1065
1066 cx.spawn(async move |this, cx| {
1067 turn_task.await;
1068 this.update(cx, |this, cx| {
1069 this.flush_pending_message(cx);
1070 })
1071 .ok();
1072 })
1073 }
1074
1075 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1076 let Some(last_user_message) = self.last_user_message() else {
1077 return;
1078 };
1079
1080 self.request_token_usage
1081 .insert(last_user_message.id.clone(), update);
1082 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1083 cx.notify();
1084 }
1085
1086 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1087 self.cancel(cx).detach();
1088 // Clear pending message since cancel will try to flush it asynchronously,
1089 // and we don't want that content to be added after we truncate
1090 self.pending_message.take();
1091 let Some(position) = self.messages.iter().position(
1092 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1093 ) else {
1094 return Err(anyhow!("Message not found"));
1095 };
1096
1097 for message in self.messages.drain(position..) {
1098 match message {
1099 Message::User(message) => {
1100 self.request_token_usage.remove(&message.id);
1101 }
1102 Message::Agent(_) | Message::Resume => {}
1103 }
1104 }
1105 self.clear_summary();
1106 cx.notify();
1107 Ok(())
1108 }
1109
1110 pub fn latest_request_token_usage(&self) -> Option<language_model::TokenUsage> {
1111 let last_user_message = self.last_user_message()?;
1112 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1113 Some(*tokens)
1114 }
1115
1116 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1117 let usage = self.latest_request_token_usage()?;
1118 let model = self.model.clone()?;
1119 Some(acp_thread::TokenUsage {
1120 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1121 used_tokens: usage.total_tokens(),
1122 output_tokens: usage.output_tokens,
1123 })
1124 }
1125
1126 /// Get the total input token count as of the message before the given message.
1127 ///
1128 /// Returns `None` if:
1129 /// - `target_id` is the first message (no previous message)
1130 /// - The previous message hasn't received a response yet (no usage data)
1131 /// - `target_id` is not found in the messages
1132 pub fn tokens_before_message(&self, target_id: &UserMessageId) -> Option<u64> {
1133 let mut previous_user_message_id: Option<&UserMessageId> = None;
1134
1135 for message in &self.messages {
1136 if let Message::User(user_msg) = message {
1137 if &user_msg.id == target_id {
1138 let prev_id = previous_user_message_id?;
1139 let usage = self.request_token_usage.get(prev_id)?;
1140 return Some(usage.input_tokens);
1141 }
1142 previous_user_message_id = Some(&user_msg.id);
1143 }
1144 }
1145 None
1146 }
1147
1148 /// Look up the active profile and resolve its preferred model if one is configured.
1149 fn resolve_profile_model(
1150 profile_id: &AgentProfileId,
1151 cx: &mut Context<Self>,
1152 ) -> Option<Arc<dyn LanguageModel>> {
1153 let selection = AgentSettings::get_global(cx)
1154 .profiles
1155 .get(profile_id)?
1156 .default_model
1157 .clone()?;
1158 Self::resolve_model_from_selection(&selection, cx)
1159 }
1160
1161 /// Translate a stored model selection into the configured model from the registry.
1162 fn resolve_model_from_selection(
1163 selection: &LanguageModelSelection,
1164 cx: &mut Context<Self>,
1165 ) -> Option<Arc<dyn LanguageModel>> {
1166 let selected = SelectedModel {
1167 provider: LanguageModelProviderId::from(selection.provider.0.clone()),
1168 model: LanguageModelId::from(selection.model.clone()),
1169 };
1170 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
1171 registry
1172 .select_model(&selected, cx)
1173 .map(|configured| configured.model)
1174 })
1175 }
1176
1177 pub fn resume(
1178 &mut self,
1179 cx: &mut Context<Self>,
1180 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1181 self.messages.push(Message::Resume);
1182 cx.notify();
1183
1184 log::debug!("Total messages in thread: {}", self.messages.len());
1185 self.run_turn(cx)
1186 }
1187
1188 /// Sending a message results in the model streaming a response, which could include tool calls.
1189 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1190 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1191 pub fn send<T>(
1192 &mut self,
1193 id: UserMessageId,
1194 content: impl IntoIterator<Item = T>,
1195 cx: &mut Context<Self>,
1196 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1197 where
1198 T: Into<UserMessageContent>,
1199 {
1200 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1201 log::debug!("Thread::send content: {:?}", content);
1202
1203 self.messages
1204 .push(Message::User(UserMessage { id, content }));
1205 cx.notify();
1206
1207 self.send_existing(cx)
1208 }
1209
1210 pub fn send_existing(
1211 &mut self,
1212 cx: &mut Context<Self>,
1213 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1214 let model = self.model().context("No language model configured")?;
1215
1216 log::info!("Thread::send called with model: {}", model.name().0);
1217 self.advance_prompt_id();
1218
1219 log::debug!("Total messages in thread: {}", self.messages.len());
1220 self.run_turn(cx)
1221 }
1222
1223 pub fn push_acp_user_block(
1224 &mut self,
1225 id: UserMessageId,
1226 blocks: impl IntoIterator<Item = acp::ContentBlock>,
1227 path_style: PathStyle,
1228 cx: &mut Context<Self>,
1229 ) {
1230 let content = blocks
1231 .into_iter()
1232 .map(|block| UserMessageContent::from_content_block(block, path_style))
1233 .collect::<Vec<_>>();
1234 self.messages
1235 .push(Message::User(UserMessage { id, content }));
1236 cx.notify();
1237 }
1238
1239 pub fn push_acp_agent_block(&mut self, block: acp::ContentBlock, cx: &mut Context<Self>) {
1240 let text = match block {
1241 acp::ContentBlock::Text(text_content) => text_content.text,
1242 acp::ContentBlock::Image(_) => "[image]".to_string(),
1243 acp::ContentBlock::Audio(_) => "[audio]".to_string(),
1244 acp::ContentBlock::ResourceLink(resource_link) => resource_link.uri,
1245 acp::ContentBlock::Resource(resource) => match resource.resource {
1246 acp::EmbeddedResourceResource::TextResourceContents(resource) => resource.uri,
1247 acp::EmbeddedResourceResource::BlobResourceContents(resource) => resource.uri,
1248 _ => "[resource]".to_string(),
1249 },
1250 _ => "[unknown]".to_string(),
1251 };
1252
1253 self.messages.push(Message::Agent(AgentMessage {
1254 content: vec![AgentMessageContent::Text(text)],
1255 ..Default::default()
1256 }));
1257 cx.notify();
1258 }
1259
1260 #[cfg(feature = "eval")]
1261 pub fn proceed(
1262 &mut self,
1263 cx: &mut Context<Self>,
1264 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1265 self.run_turn(cx)
1266 }
1267
1268 fn run_turn(
1269 &mut self,
1270 cx: &mut Context<Self>,
1271 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1272 // Flush the old pending message synchronously before cancelling,
1273 // to avoid a race where the detached cancel task might flush the NEW
1274 // turn's pending message instead of the old one.
1275 self.flush_pending_message(cx);
1276 self.cancel(cx).detach();
1277
1278 let model = self.model.clone().context("No language model configured")?;
1279 let profile = AgentSettings::get_global(cx)
1280 .profiles
1281 .get(&self.profile_id)
1282 .context("Profile not found")?;
1283 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1284 let event_stream = ThreadEventStream(events_tx);
1285 let message_ix = self.messages.len().saturating_sub(1);
1286 self.tool_use_limit_reached = false;
1287 self.clear_summary();
1288 let (cancellation_tx, mut cancellation_rx) = watch::channel(false);
1289 self.running_turn = Some(RunningTurn {
1290 event_stream: event_stream.clone(),
1291 tools: self.enabled_tools(profile, &model, cx),
1292 cancellation_tx,
1293 _task: cx.spawn(async move |this, cx| {
1294 log::debug!("Starting agent turn execution");
1295
1296 let turn_result = Self::run_turn_internal(
1297 &this,
1298 model,
1299 &event_stream,
1300 cancellation_rx.clone(),
1301 cx,
1302 )
1303 .await;
1304
1305 // Check if we were cancelled - if so, cancel() already took running_turn
1306 // and we shouldn't touch it (it might be a NEW turn now)
1307 let was_cancelled = *cancellation_rx.borrow();
1308 if was_cancelled {
1309 log::debug!("Turn was cancelled, skipping cleanup");
1310 return;
1311 }
1312
1313 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1314
1315 match turn_result {
1316 Ok(()) => {
1317 log::debug!("Turn execution completed");
1318 event_stream.send_stop(acp::StopReason::EndTurn);
1319 }
1320 Err(error) => {
1321 log::error!("Turn execution failed: {:?}", error);
1322 match error.downcast::<CompletionError>() {
1323 Ok(CompletionError::Refusal) => {
1324 event_stream.send_stop(acp::StopReason::Refusal);
1325 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1326 }
1327 Ok(CompletionError::MaxTokens) => {
1328 event_stream.send_stop(acp::StopReason::MaxTokens);
1329 }
1330 Ok(CompletionError::Other(error)) | Err(error) => {
1331 event_stream.send_error(error);
1332 }
1333 }
1334 }
1335 }
1336
1337 _ = this.update(cx, |this, _| this.running_turn.take());
1338 }),
1339 });
1340 Ok(events_rx)
1341 }
1342
1343 async fn run_turn_internal(
1344 this: &WeakEntity<Self>,
1345 model: Arc<dyn LanguageModel>,
1346 event_stream: &ThreadEventStream,
1347 mut cancellation_rx: watch::Receiver<bool>,
1348 cx: &mut AsyncApp,
1349 ) -> Result<()> {
1350 let mut attempt = 0;
1351 let mut intent = CompletionIntent::UserPrompt;
1352 loop {
1353 let request =
1354 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1355
1356 telemetry::event!(
1357 "Agent Thread Completion",
1358 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1359 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1360 model = model.telemetry_id(),
1361 model_provider = model.provider_id().to_string(),
1362 attempt
1363 );
1364
1365 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1366
1367 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1368 Ok(events) => (events, None),
1369 Err(err) => (stream::empty().boxed(), Some(err)),
1370 };
1371 let mut tool_results = FuturesUnordered::new();
1372 let mut cancelled = false;
1373 loop {
1374 // Race between getting the next event and cancellation
1375 let event = futures::select! {
1376 event = events.next().fuse() => event,
1377 _ = cancellation_rx.changed().fuse() => {
1378 if *cancellation_rx.borrow() {
1379 cancelled = true;
1380 break;
1381 }
1382 continue;
1383 }
1384 };
1385 let Some(event) = event else {
1386 break;
1387 };
1388 log::trace!("Received completion event: {:?}", event);
1389 match event {
1390 Ok(event) => {
1391 tool_results.extend(this.update(cx, |this, cx| {
1392 this.handle_completion_event(
1393 event,
1394 event_stream,
1395 cancellation_rx.clone(),
1396 cx,
1397 )
1398 })??);
1399 }
1400 Err(err) => {
1401 error = Some(err);
1402 break;
1403 }
1404 }
1405 }
1406
1407 let end_turn = tool_results.is_empty();
1408 while let Some(tool_result) = tool_results.next().await {
1409 log::debug!("Tool finished {:?}", tool_result);
1410
1411 event_stream.update_tool_call_fields(
1412 &tool_result.tool_use_id,
1413 acp::ToolCallUpdateFields::new()
1414 .status(if tool_result.is_error {
1415 acp::ToolCallStatus::Failed
1416 } else {
1417 acp::ToolCallStatus::Completed
1418 })
1419 .raw_output(tool_result.output.clone()),
1420 );
1421 this.update(cx, |this, _cx| {
1422 this.pending_message()
1423 .tool_results
1424 .insert(tool_result.tool_use_id.clone(), tool_result);
1425 })?;
1426 }
1427
1428 this.update(cx, |this, cx| {
1429 this.flush_pending_message(cx);
1430 if this.title.is_none() && this.pending_title_generation.is_none() {
1431 this.generate_title(cx);
1432 }
1433 })?;
1434
1435 if cancelled {
1436 log::debug!("Turn cancelled by user, exiting");
1437 return Ok(());
1438 }
1439
1440 if let Some(error) = error {
1441 attempt += 1;
1442 let retry = this.update(cx, |this, cx| {
1443 let user_store = this.user_store.read(cx);
1444 this.handle_completion_error(error, attempt, user_store.plan())
1445 })??;
1446 let timer = cx.background_executor().timer(retry.duration);
1447 event_stream.send_retry(retry);
1448 timer.await;
1449 this.update(cx, |this, _cx| {
1450 if let Some(Message::Agent(message)) = this.messages.last() {
1451 if message.tool_results.is_empty() {
1452 intent = CompletionIntent::UserPrompt;
1453 this.messages.push(Message::Resume);
1454 }
1455 }
1456 })?;
1457 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1458 return Err(language_model::ToolUseLimitReachedError.into());
1459 } else if end_turn {
1460 return Ok(());
1461 } else {
1462 intent = CompletionIntent::ToolResults;
1463 attempt = 0;
1464 }
1465 }
1466 }
1467
1468 fn handle_completion_error(
1469 &mut self,
1470 error: LanguageModelCompletionError,
1471 attempt: u8,
1472 plan: Option<Plan>,
1473 ) -> Result<acp_thread::RetryStatus> {
1474 let Some(model) = self.model.as_ref() else {
1475 return Err(anyhow!(error));
1476 };
1477
1478 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1479 match plan {
1480 Some(Plan::V2(_)) => true,
1481 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1482 None => false,
1483 }
1484 } else {
1485 true
1486 };
1487
1488 if !auto_retry {
1489 return Err(anyhow!(error));
1490 }
1491
1492 let Some(strategy) = Self::retry_strategy_for(&error) else {
1493 return Err(anyhow!(error));
1494 };
1495
1496 let max_attempts = match &strategy {
1497 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1498 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1499 };
1500
1501 if attempt > max_attempts {
1502 return Err(anyhow!(error));
1503 }
1504
1505 let delay = match &strategy {
1506 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1507 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1508 Duration::from_secs(delay_secs)
1509 }
1510 RetryStrategy::Fixed { delay, .. } => *delay,
1511 };
1512 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1513
1514 Ok(acp_thread::RetryStatus {
1515 last_error: error.to_string().into(),
1516 attempt: attempt as usize,
1517 max_attempts: max_attempts as usize,
1518 started_at: Instant::now(),
1519 duration: delay,
1520 })
1521 }
1522
1523 /// A helper method that's called on every streamed completion event.
1524 /// Returns an optional tool result task, which the main agentic loop will
1525 /// send back to the model when it resolves.
1526 fn handle_completion_event(
1527 &mut self,
1528 event: LanguageModelCompletionEvent,
1529 event_stream: &ThreadEventStream,
1530 cancellation_rx: watch::Receiver<bool>,
1531 cx: &mut Context<Self>,
1532 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1533 log::trace!("Handling streamed completion event: {:?}", event);
1534 use LanguageModelCompletionEvent::*;
1535
1536 match event {
1537 StartMessage { .. } => {
1538 self.flush_pending_message(cx);
1539 self.pending_message = Some(AgentMessage::default());
1540 }
1541 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1542 Thinking { text, signature } => {
1543 self.handle_thinking_event(text, signature, event_stream, cx)
1544 }
1545 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1546 ReasoningDetails(details) => {
1547 let last_message = self.pending_message();
1548 // Store the last non-empty reasoning_details (overwrites earlier ones)
1549 // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
1550 if let serde_json::Value::Array(ref arr) = details {
1551 if !arr.is_empty() {
1552 last_message.reasoning_details = Some(details);
1553 }
1554 } else {
1555 last_message.reasoning_details = Some(details);
1556 }
1557 }
1558 ToolUse(tool_use) => {
1559 return Ok(self.handle_tool_use_event(tool_use, event_stream, cancellation_rx, cx));
1560 }
1561 ToolUseJsonParseError {
1562 id,
1563 tool_name,
1564 raw_input,
1565 json_parse_error,
1566 } => {
1567 return Ok(Some(Task::ready(
1568 self.handle_tool_use_json_parse_error_event(
1569 id,
1570 tool_name,
1571 raw_input,
1572 json_parse_error,
1573 ),
1574 )));
1575 }
1576 UsageUpdate(usage) => {
1577 telemetry::event!(
1578 "Agent Thread Completion Usage Updated",
1579 thread_id = self.id.to_string(),
1580 prompt_id = self.prompt_id.to_string(),
1581 model = self.model.as_ref().map(|m| m.telemetry_id()),
1582 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1583 input_tokens = usage.input_tokens,
1584 output_tokens = usage.output_tokens,
1585 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1586 cache_read_input_tokens = usage.cache_read_input_tokens,
1587 );
1588 self.update_token_usage(usage, cx);
1589 }
1590 UsageUpdated { amount, limit } => {
1591 self.update_model_request_usage(amount, limit, cx);
1592 }
1593 ToolUseLimitReached => {
1594 self.tool_use_limit_reached = true;
1595 }
1596 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1597 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1598 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1599 Started | Queued { .. } => {}
1600 }
1601
1602 Ok(None)
1603 }
1604
1605 fn handle_text_event(
1606 &mut self,
1607 new_text: String,
1608 event_stream: &ThreadEventStream,
1609 cx: &mut Context<Self>,
1610 ) {
1611 event_stream.send_text(&new_text);
1612
1613 let last_message = self.pending_message();
1614 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1615 text.push_str(&new_text);
1616 } else {
1617 last_message
1618 .content
1619 .push(AgentMessageContent::Text(new_text));
1620 }
1621
1622 cx.notify();
1623 }
1624
1625 fn handle_thinking_event(
1626 &mut self,
1627 new_text: String,
1628 new_signature: Option<String>,
1629 event_stream: &ThreadEventStream,
1630 cx: &mut Context<Self>,
1631 ) {
1632 event_stream.send_thinking(&new_text);
1633
1634 let last_message = self.pending_message();
1635 if let Some(AgentMessageContent::Thinking { text, signature }) =
1636 last_message.content.last_mut()
1637 {
1638 text.push_str(&new_text);
1639 *signature = new_signature.or(signature.take());
1640 } else {
1641 last_message.content.push(AgentMessageContent::Thinking {
1642 text: new_text,
1643 signature: new_signature,
1644 });
1645 }
1646
1647 cx.notify();
1648 }
1649
1650 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1651 let last_message = self.pending_message();
1652 last_message
1653 .content
1654 .push(AgentMessageContent::RedactedThinking(data));
1655 cx.notify();
1656 }
1657
1658 fn handle_tool_use_event(
1659 &mut self,
1660 tool_use: LanguageModelToolUse,
1661 event_stream: &ThreadEventStream,
1662 cancellation_rx: watch::Receiver<bool>,
1663 cx: &mut Context<Self>,
1664 ) -> Option<Task<LanguageModelToolResult>> {
1665 cx.notify();
1666
1667 let tool = self.tool(tool_use.name.as_ref());
1668 let mut title = SharedString::from(&tool_use.name);
1669 let mut kind = acp::ToolKind::Other;
1670 if let Some(tool) = tool.as_ref() {
1671 title = tool.initial_title(tool_use.input.clone(), cx);
1672 kind = tool.kind();
1673 }
1674
1675 // Ensure the last message ends in the current tool use
1676 let last_message = self.pending_message();
1677 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1678 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1679 if last_tool_use.id == tool_use.id {
1680 *last_tool_use = tool_use.clone();
1681 false
1682 } else {
1683 true
1684 }
1685 } else {
1686 true
1687 }
1688 });
1689
1690 if push_new_tool_use {
1691 event_stream.send_tool_call(
1692 &tool_use.id,
1693 &tool_use.name,
1694 title,
1695 kind,
1696 tool_use.input.clone(),
1697 );
1698 last_message
1699 .content
1700 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1701 } else {
1702 event_stream.update_tool_call_fields(
1703 &tool_use.id,
1704 acp::ToolCallUpdateFields::new()
1705 .title(title.as_str())
1706 .kind(kind)
1707 .raw_input(tool_use.input.clone()),
1708 );
1709 }
1710
1711 if !tool_use.is_input_complete {
1712 return None;
1713 }
1714
1715 let Some(tool) = tool else {
1716 let content = format!("No tool named {} exists", tool_use.name);
1717 return Some(Task::ready(LanguageModelToolResult {
1718 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1719 tool_use_id: tool_use.id,
1720 tool_name: tool_use.name,
1721 is_error: true,
1722 output: None,
1723 }));
1724 };
1725
1726 let fs = self.project.read(cx).fs().clone();
1727 let tool_event_stream = ToolCallEventStream::new(
1728 tool_use.id.clone(),
1729 event_stream.clone(),
1730 Some(fs),
1731 cancellation_rx,
1732 );
1733 tool_event_stream.update_fields(
1734 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
1735 );
1736 let supports_images = self.model().is_some_and(|model| model.supports_images());
1737 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1738 log::debug!("Running tool {}", tool_use.name);
1739 Some(cx.foreground_executor().spawn(async move {
1740 let tool_result = tool_result.await.and_then(|output| {
1741 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1742 && !supports_images
1743 {
1744 return Err(anyhow!(
1745 "Attempted to read an image, but this model doesn't support it.",
1746 ));
1747 }
1748 Ok(output)
1749 });
1750
1751 match tool_result {
1752 Ok(output) => LanguageModelToolResult {
1753 tool_use_id: tool_use.id,
1754 tool_name: tool_use.name,
1755 is_error: false,
1756 content: output.llm_output,
1757 output: Some(output.raw_output),
1758 },
1759 Err(error) => LanguageModelToolResult {
1760 tool_use_id: tool_use.id,
1761 tool_name: tool_use.name,
1762 is_error: true,
1763 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1764 output: Some(error.to_string().into()),
1765 },
1766 }
1767 }))
1768 }
1769
1770 fn handle_tool_use_json_parse_error_event(
1771 &mut self,
1772 tool_use_id: LanguageModelToolUseId,
1773 tool_name: Arc<str>,
1774 raw_input: Arc<str>,
1775 json_parse_error: String,
1776 ) -> LanguageModelToolResult {
1777 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1778 LanguageModelToolResult {
1779 tool_use_id,
1780 tool_name,
1781 is_error: true,
1782 content: LanguageModelToolResultContent::Text(tool_output.into()),
1783 output: Some(serde_json::Value::String(raw_input.to_string())),
1784 }
1785 }
1786
1787 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1788 self.project
1789 .read(cx)
1790 .user_store()
1791 .update(cx, |user_store, cx| {
1792 user_store.update_model_request_usage(
1793 ModelRequestUsage(RequestUsage {
1794 amount: amount as i32,
1795 limit,
1796 }),
1797 cx,
1798 )
1799 });
1800 }
1801
1802 pub fn title(&self) -> SharedString {
1803 self.title.clone().unwrap_or("New Thread".into())
1804 }
1805
1806 pub fn is_generating_summary(&self) -> bool {
1807 self.pending_summary_generation.is_some()
1808 }
1809
1810 pub fn is_generating_title(&self) -> bool {
1811 self.pending_title_generation.is_some()
1812 }
1813
1814 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1815 if let Some(summary) = self.summary.as_ref() {
1816 return Task::ready(Some(summary.clone())).shared();
1817 }
1818 if let Some(task) = self.pending_summary_generation.clone() {
1819 return task;
1820 }
1821 let Some(model) = self.summarization_model.clone() else {
1822 log::error!("No summarization model available");
1823 return Task::ready(None).shared();
1824 };
1825 let mut request = LanguageModelRequest {
1826 intent: Some(CompletionIntent::ThreadContextSummarization),
1827 temperature: AgentSettings::temperature_for_model(&model, cx),
1828 ..Default::default()
1829 };
1830
1831 for message in &self.messages {
1832 request.messages.extend(message.to_request());
1833 }
1834
1835 request.messages.push(LanguageModelRequestMessage {
1836 role: Role::User,
1837 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1838 cache: false,
1839 reasoning_details: None,
1840 });
1841
1842 let task = cx
1843 .spawn(async move |this, cx| {
1844 let mut summary = String::new();
1845 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1846 while let Some(event) = messages.next().await {
1847 let event = event.log_err()?;
1848 let text = match event {
1849 LanguageModelCompletionEvent::Text(text) => text,
1850 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1851 this.update(cx, |thread, cx| {
1852 thread.update_model_request_usage(amount, limit, cx);
1853 })
1854 .ok()?;
1855 continue;
1856 }
1857 _ => continue,
1858 };
1859
1860 let mut lines = text.lines();
1861 summary.extend(lines.next());
1862 }
1863
1864 log::debug!("Setting summary: {}", summary);
1865 let summary = SharedString::from(summary);
1866
1867 this.update(cx, |this, cx| {
1868 this.summary = Some(summary.clone());
1869 this.pending_summary_generation = None;
1870 cx.notify()
1871 })
1872 .ok()?;
1873
1874 Some(summary)
1875 })
1876 .shared();
1877 self.pending_summary_generation = Some(task.clone());
1878 task
1879 }
1880
1881 pub fn generate_title(&mut self, cx: &mut Context<Self>) {
1882 let Some(model) = self.summarization_model.clone() else {
1883 return;
1884 };
1885
1886 log::debug!(
1887 "Generating title with model: {:?}",
1888 self.summarization_model.as_ref().map(|model| model.name())
1889 );
1890 let mut request = LanguageModelRequest {
1891 intent: Some(CompletionIntent::ThreadSummarization),
1892 temperature: AgentSettings::temperature_for_model(&model, cx),
1893 ..Default::default()
1894 };
1895
1896 for message in &self.messages {
1897 request.messages.extend(message.to_request());
1898 }
1899
1900 request.messages.push(LanguageModelRequestMessage {
1901 role: Role::User,
1902 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1903 cache: false,
1904 reasoning_details: None,
1905 });
1906 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1907 let mut title = String::new();
1908
1909 let generate = async {
1910 let mut messages = model.stream_completion(request, cx).await?;
1911 while let Some(event) = messages.next().await {
1912 let event = event?;
1913 let text = match event {
1914 LanguageModelCompletionEvent::Text(text) => text,
1915 LanguageModelCompletionEvent::UsageUpdated { amount, limit } => {
1916 this.update(cx, |thread, cx| {
1917 thread.update_model_request_usage(amount, limit, cx);
1918 })?;
1919 continue;
1920 }
1921 _ => continue,
1922 };
1923
1924 let mut lines = text.lines();
1925 title.extend(lines.next());
1926
1927 // Stop if the LLM generated multiple lines.
1928 if lines.next().is_some() {
1929 break;
1930 }
1931 }
1932 anyhow::Ok(())
1933 };
1934
1935 if generate.await.context("failed to generate title").is_ok() {
1936 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1937 }
1938 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1939 }));
1940 }
1941
1942 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1943 self.pending_title_generation = None;
1944 if Some(&title) != self.title.as_ref() {
1945 self.title = Some(title);
1946 cx.emit(TitleUpdated);
1947 cx.notify();
1948 }
1949 }
1950
1951 fn clear_summary(&mut self) {
1952 self.summary = None;
1953 self.pending_summary_generation = None;
1954 }
1955
1956 fn last_user_message(&self) -> Option<&UserMessage> {
1957 self.messages
1958 .iter()
1959 .rev()
1960 .find_map(|message| match message {
1961 Message::User(user_message) => Some(user_message),
1962 Message::Agent(_) => None,
1963 Message::Resume => None,
1964 })
1965 }
1966
1967 fn pending_message(&mut self) -> &mut AgentMessage {
1968 self.pending_message.get_or_insert_default()
1969 }
1970
1971 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1972 let Some(mut message) = self.pending_message.take() else {
1973 return;
1974 };
1975
1976 if message.content.is_empty() {
1977 return;
1978 }
1979
1980 for content in &message.content {
1981 let AgentMessageContent::ToolUse(tool_use) = content else {
1982 continue;
1983 };
1984
1985 if !message.tool_results.contains_key(&tool_use.id) {
1986 message.tool_results.insert(
1987 tool_use.id.clone(),
1988 LanguageModelToolResult {
1989 tool_use_id: tool_use.id.clone(),
1990 tool_name: tool_use.name.clone(),
1991 is_error: true,
1992 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1993 output: None,
1994 },
1995 );
1996 }
1997 }
1998
1999 self.messages.push(Message::Agent(message));
2000 self.updated_at = Utc::now();
2001 self.clear_summary();
2002 cx.notify()
2003 }
2004
2005 pub(crate) fn build_completion_request(
2006 &self,
2007 completion_intent: CompletionIntent,
2008 cx: &App,
2009 ) -> Result<LanguageModelRequest> {
2010 let model = self.model().context("No language model configured")?;
2011 let tools = if let Some(turn) = self.running_turn.as_ref() {
2012 turn.tools
2013 .iter()
2014 .filter_map(|(tool_name, tool)| {
2015 log::trace!("Including tool: {}", tool_name);
2016 Some(LanguageModelRequestTool {
2017 name: tool_name.to_string(),
2018 description: tool.description().to_string(),
2019 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
2020 })
2021 })
2022 .collect::<Vec<_>>()
2023 } else {
2024 Vec::new()
2025 };
2026
2027 log::debug!("Building completion request");
2028 log::debug!("Completion intent: {:?}", completion_intent);
2029 log::debug!("Completion mode: {:?}", self.completion_mode);
2030
2031 let available_tools: Vec<_> = self
2032 .running_turn
2033 .as_ref()
2034 .map(|turn| turn.tools.keys().cloned().collect())
2035 .unwrap_or_default();
2036
2037 log::debug!("Request includes {} tools", available_tools.len());
2038 let messages = self.build_request_messages(available_tools, cx);
2039 log::debug!("Request will include {} messages", messages.len());
2040
2041 let request = LanguageModelRequest {
2042 thread_id: Some(self.id.to_string()),
2043 prompt_id: Some(self.prompt_id.to_string()),
2044 intent: Some(completion_intent),
2045 mode: Some(self.completion_mode.into()),
2046 messages,
2047 tools,
2048 tool_choice: None,
2049 stop: Vec::new(),
2050 temperature: AgentSettings::temperature_for_model(model, cx),
2051 thinking_allowed: true,
2052 };
2053
2054 log::debug!("Completion request built successfully");
2055 Ok(request)
2056 }
2057
2058 fn enabled_tools(
2059 &self,
2060 profile: &AgentProfileSettings,
2061 model: &Arc<dyn LanguageModel>,
2062 cx: &App,
2063 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
2064 fn truncate(tool_name: &SharedString) -> SharedString {
2065 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
2066 let mut truncated = tool_name.to_string();
2067 truncated.truncate(MAX_TOOL_NAME_LENGTH);
2068 truncated.into()
2069 } else {
2070 tool_name.clone()
2071 }
2072 }
2073
2074 let mut tools = self
2075 .tools
2076 .iter()
2077 .filter_map(|(tool_name, tool)| {
2078 if tool.supports_provider(&model.provider_id())
2079 && profile.is_tool_enabled(tool_name)
2080 {
2081 Some((truncate(tool_name), tool.clone()))
2082 } else {
2083 None
2084 }
2085 })
2086 .collect::<BTreeMap<_, _>>();
2087
2088 let mut context_server_tools = Vec::new();
2089 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
2090 let mut duplicate_tool_names = HashSet::default();
2091 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
2092 for (tool_name, tool) in server_tools {
2093 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
2094 let tool_name = truncate(tool_name);
2095 if !seen_tools.insert(tool_name.clone()) {
2096 duplicate_tool_names.insert(tool_name.clone());
2097 }
2098 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
2099 }
2100 }
2101 }
2102
2103 // When there are duplicate tool names, disambiguate by prefixing them
2104 // with the server ID. In the rare case there isn't enough space for the
2105 // disambiguated tool name, keep only the last tool with this name.
2106 for (server_id, tool_name, tool) in context_server_tools {
2107 if duplicate_tool_names.contains(&tool_name) {
2108 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
2109 if available >= 2 {
2110 let mut disambiguated = server_id.0.to_string();
2111 disambiguated.truncate(available - 1);
2112 disambiguated.push('_');
2113 disambiguated.push_str(&tool_name);
2114 tools.insert(disambiguated.into(), tool.clone());
2115 } else {
2116 tools.insert(tool_name, tool.clone());
2117 }
2118 } else {
2119 tools.insert(tool_name, tool.clone());
2120 }
2121 }
2122
2123 tools
2124 }
2125
2126 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
2127 self.running_turn.as_ref()?.tools.get(name).cloned()
2128 }
2129
2130 pub fn has_tool(&self, name: &str) -> bool {
2131 self.running_turn
2132 .as_ref()
2133 .is_some_and(|turn| turn.tools.contains_key(name))
2134 }
2135
2136 fn build_request_messages(
2137 &self,
2138 available_tools: Vec<SharedString>,
2139 cx: &App,
2140 ) -> Vec<LanguageModelRequestMessage> {
2141 log::trace!(
2142 "Building request messages from {} thread messages",
2143 self.messages.len()
2144 );
2145
2146 let system_prompt = SystemPromptTemplate {
2147 project: self.project_context.read(cx),
2148 available_tools,
2149 model_name: self.model.as_ref().map(|m| m.name().0.to_string()),
2150 }
2151 .render(&self.templates)
2152 .context("failed to build system prompt")
2153 .expect("Invalid template");
2154 let mut messages = vec![LanguageModelRequestMessage {
2155 role: Role::System,
2156 content: vec![system_prompt.into()],
2157 cache: false,
2158 reasoning_details: None,
2159 }];
2160 for message in &self.messages {
2161 messages.extend(message.to_request());
2162 }
2163
2164 if let Some(last_message) = messages.last_mut() {
2165 last_message.cache = true;
2166 }
2167
2168 if let Some(message) = self.pending_message.as_ref() {
2169 messages.extend(message.to_request());
2170 }
2171
2172 messages
2173 }
2174
2175 pub fn to_markdown(&self) -> String {
2176 let mut markdown = String::new();
2177 for (ix, message) in self.messages.iter().enumerate() {
2178 if ix > 0 {
2179 markdown.push('\n');
2180 }
2181 markdown.push_str(&message.to_markdown());
2182 }
2183
2184 if let Some(message) = self.pending_message.as_ref() {
2185 markdown.push('\n');
2186 markdown.push_str(&message.to_markdown());
2187 }
2188
2189 markdown
2190 }
2191
2192 fn advance_prompt_id(&mut self) {
2193 self.prompt_id = PromptId::new();
2194 }
2195
2196 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2197 use LanguageModelCompletionError::*;
2198 use http_client::StatusCode;
2199
2200 // General strategy here:
2201 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2202 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2203 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2204 match error {
2205 HttpResponseError {
2206 status_code: StatusCode::TOO_MANY_REQUESTS,
2207 ..
2208 } => Some(RetryStrategy::ExponentialBackoff {
2209 initial_delay: BASE_RETRY_DELAY,
2210 max_attempts: MAX_RETRY_ATTEMPTS,
2211 }),
2212 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2213 Some(RetryStrategy::Fixed {
2214 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2215 max_attempts: MAX_RETRY_ATTEMPTS,
2216 })
2217 }
2218 UpstreamProviderError {
2219 status,
2220 retry_after,
2221 ..
2222 } => match *status {
2223 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2224 Some(RetryStrategy::Fixed {
2225 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2226 max_attempts: MAX_RETRY_ATTEMPTS,
2227 })
2228 }
2229 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2230 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2231 // Internal Server Error could be anything, retry up to 3 times.
2232 max_attempts: 3,
2233 }),
2234 status => {
2235 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2236 // but we frequently get them in practice. See https://http.dev/529
2237 if status.as_u16() == 529 {
2238 Some(RetryStrategy::Fixed {
2239 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2240 max_attempts: MAX_RETRY_ATTEMPTS,
2241 })
2242 } else {
2243 Some(RetryStrategy::Fixed {
2244 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2245 max_attempts: 2,
2246 })
2247 }
2248 }
2249 },
2250 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2251 delay: BASE_RETRY_DELAY,
2252 max_attempts: 3,
2253 }),
2254 ApiReadResponseError { .. }
2255 | HttpSend { .. }
2256 | DeserializeResponse { .. }
2257 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2258 delay: BASE_RETRY_DELAY,
2259 max_attempts: 3,
2260 }),
2261 // Retrying these errors definitely shouldn't help.
2262 HttpResponseError {
2263 status_code:
2264 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2265 ..
2266 }
2267 | AuthenticationError { .. }
2268 | PermissionError { .. }
2269 | NoApiKey { .. }
2270 | ApiEndpointNotFound { .. }
2271 | PromptTooLarge { .. } => None,
2272 // These errors might be transient, so retry them
2273 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2274 delay: BASE_RETRY_DELAY,
2275 max_attempts: 1,
2276 }),
2277 // Retry all other 4xx and 5xx errors once.
2278 HttpResponseError { status_code, .. }
2279 if status_code.is_client_error() || status_code.is_server_error() =>
2280 {
2281 Some(RetryStrategy::Fixed {
2282 delay: BASE_RETRY_DELAY,
2283 max_attempts: 3,
2284 })
2285 }
2286 Other(err)
2287 if err.is::<language_model::PaymentRequiredError>()
2288 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2289 {
2290 // Retrying won't help for Payment Required or Model Request Limit errors (where
2291 // the user must upgrade to usage-based billing to get more requests, or else wait
2292 // for a significant amount of time for the request limit to reset).
2293 None
2294 }
2295 // Conservatively assume that any other errors are non-retryable
2296 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2297 delay: BASE_RETRY_DELAY,
2298 max_attempts: 2,
2299 }),
2300 }
2301 }
2302}
2303
2304struct RunningTurn {
2305 /// Holds the task that handles agent interaction until the end of the turn.
2306 /// Survives across multiple requests as the model performs tool calls and
2307 /// we run tools, report their results.
2308 _task: Task<()>,
2309 /// The current event stream for the running turn. Used to report a final
2310 /// cancellation event if we cancel the turn.
2311 event_stream: ThreadEventStream,
2312 /// The tools that were enabled for this turn.
2313 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2314 /// Sender to signal tool cancellation. When cancel is called, this is
2315 /// set to true so all tools can detect user-initiated cancellation.
2316 cancellation_tx: watch::Sender<bool>,
2317}
2318
2319impl RunningTurn {
2320 fn cancel(mut self) -> Task<()> {
2321 log::debug!("Cancelling in progress turn");
2322 self.cancellation_tx.send(true).ok();
2323 self.event_stream.send_canceled();
2324 self._task
2325 }
2326}
2327
2328pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2329
2330impl EventEmitter<TokenUsageUpdated> for Thread {}
2331
2332pub struct TitleUpdated;
2333
2334impl EventEmitter<TitleUpdated> for Thread {}
2335
2336pub trait AgentTool
2337where
2338 Self: 'static + Sized,
2339{
2340 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2341 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2342
2343 fn name() -> &'static str;
2344
2345 fn description() -> SharedString {
2346 let schema = schemars::schema_for!(Self::Input);
2347 SharedString::new(
2348 schema
2349 .get("description")
2350 .and_then(|description| description.as_str())
2351 .unwrap_or_default(),
2352 )
2353 }
2354
2355 fn kind() -> acp::ToolKind;
2356
2357 /// The initial tool title to display. Can be updated during the tool run.
2358 fn initial_title(
2359 &self,
2360 input: Result<Self::Input, serde_json::Value>,
2361 cx: &mut App,
2362 ) -> SharedString;
2363
2364 /// Returns the JSON schema that describes the tool's input.
2365 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2366 language_model::tool_schema::root_schema_for::<Self::Input>(format)
2367 }
2368
2369 /// Some tools rely on a provider for the underlying billing or other reasons.
2370 /// Allow the tool to check if they are compatible, or should be filtered out.
2371 fn supports_provider(_provider: &LanguageModelProviderId) -> bool {
2372 true
2373 }
2374
2375 /// Runs the tool with the provided input.
2376 fn run(
2377 self: Arc<Self>,
2378 input: Self::Input,
2379 event_stream: ToolCallEventStream,
2380 cx: &mut App,
2381 ) -> Task<Result<Self::Output>>;
2382
2383 /// Emits events for a previous execution of the tool.
2384 fn replay(
2385 &self,
2386 _input: Self::Input,
2387 _output: Self::Output,
2388 _event_stream: ToolCallEventStream,
2389 _cx: &mut App,
2390 ) -> Result<()> {
2391 Ok(())
2392 }
2393
2394 fn erase(self) -> Arc<dyn AnyAgentTool> {
2395 Arc::new(Erased(Arc::new(self)))
2396 }
2397}
2398
2399pub struct Erased<T>(T);
2400
2401pub struct AgentToolOutput {
2402 pub llm_output: LanguageModelToolResultContent,
2403 pub raw_output: serde_json::Value,
2404}
2405
2406pub trait AnyAgentTool {
2407 fn name(&self) -> SharedString;
2408 fn description(&self) -> SharedString;
2409 fn kind(&self) -> acp::ToolKind;
2410 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2411 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2412 fn supports_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2413 true
2414 }
2415 fn run(
2416 self: Arc<Self>,
2417 input: serde_json::Value,
2418 event_stream: ToolCallEventStream,
2419 cx: &mut App,
2420 ) -> Task<Result<AgentToolOutput>>;
2421 fn replay(
2422 &self,
2423 input: serde_json::Value,
2424 output: serde_json::Value,
2425 event_stream: ToolCallEventStream,
2426 cx: &mut App,
2427 ) -> Result<()>;
2428}
2429
2430impl<T> AnyAgentTool for Erased<Arc<T>>
2431where
2432 T: AgentTool,
2433{
2434 fn name(&self) -> SharedString {
2435 T::name().into()
2436 }
2437
2438 fn description(&self) -> SharedString {
2439 T::description()
2440 }
2441
2442 fn kind(&self) -> agent_client_protocol::ToolKind {
2443 T::kind()
2444 }
2445
2446 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2447 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2448 self.0.initial_title(parsed_input, _cx)
2449 }
2450
2451 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2452 let mut json = serde_json::to_value(T::input_schema(format))?;
2453 language_model::tool_schema::adapt_schema_to_format(&mut json, format)?;
2454 Ok(json)
2455 }
2456
2457 fn supports_provider(&self, provider: &LanguageModelProviderId) -> bool {
2458 T::supports_provider(provider)
2459 }
2460
2461 fn run(
2462 self: Arc<Self>,
2463 input: serde_json::Value,
2464 event_stream: ToolCallEventStream,
2465 cx: &mut App,
2466 ) -> Task<Result<AgentToolOutput>> {
2467 cx.spawn(async move |cx| {
2468 let input = serde_json::from_value(input)?;
2469 let output = cx
2470 .update(|cx| self.0.clone().run(input, event_stream, cx))
2471 .await?;
2472 let raw_output = serde_json::to_value(&output)?;
2473 Ok(AgentToolOutput {
2474 llm_output: output.into(),
2475 raw_output,
2476 })
2477 })
2478 }
2479
2480 fn replay(
2481 &self,
2482 input: serde_json::Value,
2483 output: serde_json::Value,
2484 event_stream: ToolCallEventStream,
2485 cx: &mut App,
2486 ) -> Result<()> {
2487 let input = serde_json::from_value(input)?;
2488 let output = serde_json::from_value(output)?;
2489 self.0.replay(input, output, event_stream, cx)
2490 }
2491}
2492
2493#[derive(Clone)]
2494struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2495
2496impl ThreadEventStream {
2497 fn send_user_message(&self, message: &UserMessage) {
2498 self.0
2499 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2500 .ok();
2501 }
2502
2503 fn send_text(&self, text: &str) {
2504 self.0
2505 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2506 .ok();
2507 }
2508
2509 fn send_thinking(&self, text: &str) {
2510 self.0
2511 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2512 .ok();
2513 }
2514
2515 fn send_tool_call(
2516 &self,
2517 id: &LanguageModelToolUseId,
2518 tool_name: &str,
2519 title: SharedString,
2520 kind: acp::ToolKind,
2521 input: serde_json::Value,
2522 ) {
2523 self.0
2524 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2525 id,
2526 tool_name,
2527 title.to_string(),
2528 kind,
2529 input,
2530 ))))
2531 .ok();
2532 }
2533
2534 fn initial_tool_call(
2535 id: &LanguageModelToolUseId,
2536 tool_name: &str,
2537 title: String,
2538 kind: acp::ToolKind,
2539 input: serde_json::Value,
2540 ) -> acp::ToolCall {
2541 acp::ToolCall::new(id.to_string(), title)
2542 .kind(kind)
2543 .raw_input(input)
2544 .meta(acp::Meta::from_iter([(
2545 "tool_name".into(),
2546 tool_name.into(),
2547 )]))
2548 }
2549
2550 fn update_tool_call_fields(
2551 &self,
2552 tool_use_id: &LanguageModelToolUseId,
2553 fields: acp::ToolCallUpdateFields,
2554 ) {
2555 self.0
2556 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2557 acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
2558 )))
2559 .ok();
2560 }
2561
2562 fn send_retry(&self, status: acp_thread::RetryStatus) {
2563 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2564 }
2565
2566 fn send_stop(&self, reason: acp::StopReason) {
2567 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2568 }
2569
2570 fn send_canceled(&self) {
2571 self.0
2572 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2573 .ok();
2574 }
2575
2576 fn send_error(&self, error: impl Into<anyhow::Error>) {
2577 self.0.unbounded_send(Err(error.into())).ok();
2578 }
2579}
2580
2581#[derive(Clone)]
2582pub struct ToolCallEventStream {
2583 tool_use_id: LanguageModelToolUseId,
2584 stream: ThreadEventStream,
2585 fs: Option<Arc<dyn Fs>>,
2586 cancellation_rx: watch::Receiver<bool>,
2587}
2588
2589impl ToolCallEventStream {
2590 #[cfg(any(test, feature = "test-support"))]
2591 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2592 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2593 let (_cancellation_tx, cancellation_rx) = watch::channel(false);
2594
2595 let stream = ToolCallEventStream::new(
2596 "test_id".into(),
2597 ThreadEventStream(events_tx),
2598 None,
2599 cancellation_rx,
2600 );
2601
2602 (stream, ToolCallEventStreamReceiver(events_rx))
2603 }
2604
2605 fn new(
2606 tool_use_id: LanguageModelToolUseId,
2607 stream: ThreadEventStream,
2608 fs: Option<Arc<dyn Fs>>,
2609 cancellation_rx: watch::Receiver<bool>,
2610 ) -> Self {
2611 Self {
2612 tool_use_id,
2613 stream,
2614 fs,
2615 cancellation_rx,
2616 }
2617 }
2618
2619 /// Returns a future that resolves when the user cancels the tool call.
2620 /// Tools should select on this alongside their main work to detect user cancellation.
2621 pub fn cancelled_by_user(&self) -> impl std::future::Future<Output = ()> + '_ {
2622 let mut rx = self.cancellation_rx.clone();
2623 async move {
2624 loop {
2625 if *rx.borrow() {
2626 return;
2627 }
2628 if rx.changed().await.is_err() {
2629 // Sender dropped, will never be cancelled
2630 std::future::pending::<()>().await;
2631 }
2632 }
2633 }
2634 }
2635
2636 /// Returns true if the user has cancelled this tool call.
2637 /// This is useful for checking cancellation state after an operation completes,
2638 /// to determine if the completion was due to user cancellation.
2639 pub fn was_cancelled_by_user(&self) -> bool {
2640 *self.cancellation_rx.clone().borrow()
2641 }
2642
2643 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2644 self.stream
2645 .update_tool_call_fields(&self.tool_use_id, fields);
2646 }
2647
2648 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2649 self.stream
2650 .0
2651 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2652 acp_thread::ToolCallUpdateDiff {
2653 id: acp::ToolCallId::new(self.tool_use_id.to_string()),
2654 diff,
2655 }
2656 .into(),
2657 )))
2658 .ok();
2659 }
2660
2661 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2662 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2663 return Task::ready(Ok(()));
2664 }
2665
2666 let (response_tx, response_rx) = oneshot::channel();
2667 self.stream
2668 .0
2669 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2670 ToolCallAuthorization {
2671 tool_call: acp::ToolCallUpdate::new(
2672 self.tool_use_id.to_string(),
2673 acp::ToolCallUpdateFields::new().title(title.into()),
2674 ),
2675 options: vec![
2676 acp::PermissionOption::new(
2677 acp::PermissionOptionId::new("always_allow"),
2678 "Always Allow",
2679 acp::PermissionOptionKind::AllowAlways,
2680 ),
2681 acp::PermissionOption::new(
2682 acp::PermissionOptionId::new("allow"),
2683 "Allow",
2684 acp::PermissionOptionKind::AllowOnce,
2685 ),
2686 acp::PermissionOption::new(
2687 acp::PermissionOptionId::new("deny"),
2688 "Deny",
2689 acp::PermissionOptionKind::RejectOnce,
2690 ),
2691 ],
2692 response: response_tx,
2693 },
2694 )))
2695 .ok();
2696 let fs = self.fs.clone();
2697 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2698 "always_allow" => {
2699 if let Some(fs) = fs.clone() {
2700 cx.update(|cx| {
2701 update_settings_file(fs, cx, |settings, _| {
2702 settings
2703 .agent
2704 .get_or_insert_default()
2705 .set_always_allow_tool_actions(true);
2706 });
2707 });
2708 }
2709
2710 Ok(())
2711 }
2712 "allow" => Ok(()),
2713 _ => Err(anyhow!("Permission to run tool denied by user")),
2714 })
2715 }
2716}
2717
2718#[cfg(any(test, feature = "test-support"))]
2719pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2720
2721#[cfg(any(test, feature = "test-support"))]
2722impl ToolCallEventStreamReceiver {
2723 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2724 let event = self.0.next().await;
2725 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2726 auth
2727 } else {
2728 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2729 }
2730 }
2731
2732 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2733 let event = self.0.next().await;
2734 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2735 update,
2736 )))) = event
2737 {
2738 update.fields
2739 } else {
2740 panic!("Expected update fields but got: {:?}", event);
2741 }
2742 }
2743
2744 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2745 let event = self.0.next().await;
2746 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2747 update,
2748 )))) = event
2749 {
2750 update.diff
2751 } else {
2752 panic!("Expected diff but got: {:?}", event);
2753 }
2754 }
2755
2756 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2757 let event = self.0.next().await;
2758 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2759 update,
2760 )))) = event
2761 {
2762 update.terminal
2763 } else {
2764 panic!("Expected terminal but got: {:?}", event);
2765 }
2766 }
2767}
2768
2769#[cfg(any(test, feature = "test-support"))]
2770impl std::ops::Deref for ToolCallEventStreamReceiver {
2771 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2772
2773 fn deref(&self) -> &Self::Target {
2774 &self.0
2775 }
2776}
2777
2778#[cfg(any(test, feature = "test-support"))]
2779impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2780 fn deref_mut(&mut self) -> &mut Self::Target {
2781 &mut self.0
2782 }
2783}
2784
2785impl From<&str> for UserMessageContent {
2786 fn from(text: &str) -> Self {
2787 Self::Text(text.into())
2788 }
2789}
2790
2791impl UserMessageContent {
2792 pub fn from_content_block(value: acp::ContentBlock, path_style: PathStyle) -> Self {
2793 match value {
2794 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2795 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2796 acp::ContentBlock::Audio(_) => {
2797 // TODO
2798 Self::Text("[audio]".to_string())
2799 }
2800 acp::ContentBlock::ResourceLink(resource_link) => {
2801 match MentionUri::parse(&resource_link.uri, path_style) {
2802 Ok(uri) => Self::Mention {
2803 uri,
2804 content: String::new(),
2805 },
2806 Err(err) => {
2807 log::error!("Failed to parse mention link: {}", err);
2808 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2809 }
2810 }
2811 }
2812 acp::ContentBlock::Resource(resource) => match resource.resource {
2813 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2814 match MentionUri::parse(&resource.uri, path_style) {
2815 Ok(uri) => Self::Mention {
2816 uri,
2817 content: resource.text,
2818 },
2819 Err(err) => {
2820 log::error!("Failed to parse mention link: {}", err);
2821 Self::Text(
2822 MarkdownCodeBlock {
2823 tag: &resource.uri,
2824 text: &resource.text,
2825 }
2826 .to_string(),
2827 )
2828 }
2829 }
2830 }
2831 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2832 // TODO
2833 Self::Text("[blob]".to_string())
2834 }
2835 other => {
2836 log::warn!("Unexpected content type: {:?}", other);
2837 Self::Text("[unknown]".to_string())
2838 }
2839 },
2840 other => {
2841 log::warn!("Unexpected content type: {:?}", other);
2842 Self::Text("[unknown]".to_string())
2843 }
2844 }
2845 }
2846}
2847
2848impl From<UserMessageContent> for acp::ContentBlock {
2849 fn from(content: UserMessageContent) -> Self {
2850 match content {
2851 UserMessageContent::Text(text) => text.into(),
2852 UserMessageContent::Image(image) => {
2853 acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
2854 }
2855 UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
2856 acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
2857 acp::TextResourceContents::new(content, uri.to_uri().to_string()),
2858 )),
2859 ),
2860 }
2861 }
2862}
2863
2864fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2865 LanguageModelImage {
2866 source: image_content.data.into(),
2867 size: None,
2868 }
2869}