1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ReadFileTool, SystemPromptTemplate,
5 Template, Templates, TerminalTool, ThinkingTool, WebSearchTool,
6};
7use acp_thread::{MentionUri, UserMessageId};
8use action_log::ActionLog;
9use agent::thread::{GitState, ProjectSnapshot, WorktreeSnapshot};
10use agent_client_protocol as acp;
11use agent_settings::{
12 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
13 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
14};
15use anyhow::{Context as _, Result, anyhow};
16use assistant_tool::adapt_schema_to_format;
17use chrono::{DateTime, Utc};
18use client::{ModelRequestUsage, RequestUsage};
19use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
20use collections::{HashMap, HashSet, IndexMap};
21use fs::Fs;
22use futures::{
23 FutureExt,
24 channel::{mpsc, oneshot},
25 future::Shared,
26 stream::FuturesUnordered,
27};
28use git::repository::DiffType;
29use gpui::{
30 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
31};
32use language_model::{
33 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
34 LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry, LanguageModelRequest,
35 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
36 LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
37 LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
38};
39use project::{
40 Project,
41 git_store::{GitStore, RepositoryState},
42};
43use prompt_store::ProjectContext;
44use schemars::{JsonSchema, Schema};
45use serde::{Deserialize, Serialize};
46use settings::{Settings, update_settings_file};
47use smol::stream::StreamExt;
48use std::{
49 collections::BTreeMap,
50 ops::RangeInclusive,
51 path::Path,
52 rc::Rc,
53 sync::Arc,
54 time::{Duration, Instant},
55};
56use std::{fmt::Write, path::PathBuf};
57use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock};
58use uuid::Uuid;
59
60const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
61pub const MAX_TOOL_NAME_LENGTH: usize = 64;
62
63/// The ID of the user prompt that initiated a request.
64///
65/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
66#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
67pub struct PromptId(Arc<str>);
68
69impl PromptId {
70 pub fn new() -> Self {
71 Self(Uuid::new_v4().to_string().into())
72 }
73}
74
75impl std::fmt::Display for PromptId {
76 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
77 write!(f, "{}", self.0)
78 }
79}
80
81pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
82pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
83
84#[derive(Debug, Clone)]
85enum RetryStrategy {
86 ExponentialBackoff {
87 initial_delay: Duration,
88 max_attempts: u8,
89 },
90 Fixed {
91 delay: Duration,
92 max_attempts: u8,
93 },
94}
95
96#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
97pub enum Message {
98 User(UserMessage),
99 Agent(AgentMessage),
100 Resume,
101}
102
103impl Message {
104 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
105 match self {
106 Message::Agent(agent_message) => Some(agent_message),
107 _ => None,
108 }
109 }
110
111 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
112 match self {
113 Message::User(message) => vec![message.to_request()],
114 Message::Agent(message) => message.to_request(),
115 Message::Resume => vec![LanguageModelRequestMessage {
116 role: Role::User,
117 content: vec!["Continue where you left off".into()],
118 cache: false,
119 }],
120 }
121 }
122
123 pub fn to_markdown(&self) -> String {
124 match self {
125 Message::User(message) => message.to_markdown(),
126 Message::Agent(message) => message.to_markdown(),
127 Message::Resume => "[resume]\n".into(),
128 }
129 }
130
131 pub fn role(&self) -> Role {
132 match self {
133 Message::User(_) | Message::Resume => Role::User,
134 Message::Agent(_) => Role::Assistant,
135 }
136 }
137}
138
139#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
140pub struct UserMessage {
141 pub id: UserMessageId,
142 pub content: Vec<UserMessageContent>,
143}
144
145#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
146pub enum UserMessageContent {
147 Text(String),
148 Mention { uri: MentionUri, content: String },
149 Image(LanguageModelImage),
150}
151
152impl UserMessage {
153 pub fn to_markdown(&self) -> String {
154 let mut markdown = String::from("## User\n\n");
155
156 for content in &self.content {
157 match content {
158 UserMessageContent::Text(text) => {
159 markdown.push_str(text);
160 markdown.push('\n');
161 }
162 UserMessageContent::Image(_) => {
163 markdown.push_str("<image />\n");
164 }
165 UserMessageContent::Mention { uri, content } => {
166 if !content.is_empty() {
167 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
168 } else {
169 let _ = writeln!(&mut markdown, "{}", uri.as_link());
170 }
171 }
172 }
173 }
174
175 markdown
176 }
177
178 fn to_request(&self) -> LanguageModelRequestMessage {
179 let mut message = LanguageModelRequestMessage {
180 role: Role::User,
181 content: Vec::with_capacity(self.content.len()),
182 cache: false,
183 };
184
185 const OPEN_CONTEXT: &str = "<context>\n\
186 The following items were attached by the user. \
187 They are up-to-date and don't need to be re-read.\n\n";
188
189 const OPEN_FILES_TAG: &str = "<files>";
190 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
191 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
192 const OPEN_SELECTIONS_TAG: &str = "<selections>";
193 const OPEN_THREADS_TAG: &str = "<threads>";
194 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
195 const OPEN_RULES_TAG: &str =
196 "<rules>\nThe user has specified the following rules that should be applied:\n";
197
198 let mut file_context = OPEN_FILES_TAG.to_string();
199 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
200 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
201 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
202 let mut thread_context = OPEN_THREADS_TAG.to_string();
203 let mut fetch_context = OPEN_FETCH_TAG.to_string();
204 let mut rules_context = OPEN_RULES_TAG.to_string();
205
206 for chunk in &self.content {
207 let chunk = match chunk {
208 UserMessageContent::Text(text) => {
209 language_model::MessageContent::Text(text.clone())
210 }
211 UserMessageContent::Image(value) => {
212 language_model::MessageContent::Image(value.clone())
213 }
214 UserMessageContent::Mention { uri, content } => {
215 match uri {
216 MentionUri::File { abs_path } => {
217 write!(
218 &mut file_context,
219 "\n{}",
220 MarkdownCodeBlock {
221 tag: &codeblock_tag(abs_path, None),
222 text: &content.to_string(),
223 }
224 )
225 .ok();
226 }
227 MentionUri::PastedImage => {
228 debug_panic!("pasted image URI should not be used in mention content")
229 }
230 MentionUri::Directory { .. } => {
231 write!(&mut directory_context, "\n{}\n", content).ok();
232 }
233 MentionUri::Symbol {
234 abs_path: path,
235 line_range,
236 ..
237 } => {
238 write!(
239 &mut symbol_context,
240 "\n{}",
241 MarkdownCodeBlock {
242 tag: &codeblock_tag(path, Some(line_range)),
243 text: content
244 }
245 )
246 .ok();
247 }
248 MentionUri::Selection {
249 abs_path: path,
250 line_range,
251 ..
252 } => {
253 write!(
254 &mut selection_context,
255 "\n{}",
256 MarkdownCodeBlock {
257 tag: &codeblock_tag(
258 path.as_deref().unwrap_or("Untitled".as_ref()),
259 Some(line_range)
260 ),
261 text: content
262 }
263 )
264 .ok();
265 }
266 MentionUri::Thread { .. } => {
267 write!(&mut thread_context, "\n{}\n", content).ok();
268 }
269 MentionUri::TextThread { .. } => {
270 write!(&mut thread_context, "\n{}\n", content).ok();
271 }
272 MentionUri::Rule { .. } => {
273 write!(
274 &mut rules_context,
275 "\n{}",
276 MarkdownCodeBlock {
277 tag: "",
278 text: content
279 }
280 )
281 .ok();
282 }
283 MentionUri::Fetch { url } => {
284 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
285 }
286 }
287
288 language_model::MessageContent::Text(uri.as_link().to_string())
289 }
290 };
291
292 message.content.push(chunk);
293 }
294
295 let len_before_context = message.content.len();
296
297 if file_context.len() > OPEN_FILES_TAG.len() {
298 file_context.push_str("</files>\n");
299 message
300 .content
301 .push(language_model::MessageContent::Text(file_context));
302 }
303
304 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
305 directory_context.push_str("</directories>\n");
306 message
307 .content
308 .push(language_model::MessageContent::Text(directory_context));
309 }
310
311 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
312 symbol_context.push_str("</symbols>\n");
313 message
314 .content
315 .push(language_model::MessageContent::Text(symbol_context));
316 }
317
318 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
319 selection_context.push_str("</selections>\n");
320 message
321 .content
322 .push(language_model::MessageContent::Text(selection_context));
323 }
324
325 if thread_context.len() > OPEN_THREADS_TAG.len() {
326 thread_context.push_str("</threads>\n");
327 message
328 .content
329 .push(language_model::MessageContent::Text(thread_context));
330 }
331
332 if fetch_context.len() > OPEN_FETCH_TAG.len() {
333 fetch_context.push_str("</fetched_urls>\n");
334 message
335 .content
336 .push(language_model::MessageContent::Text(fetch_context));
337 }
338
339 if rules_context.len() > OPEN_RULES_TAG.len() {
340 rules_context.push_str("</user_rules>\n");
341 message
342 .content
343 .push(language_model::MessageContent::Text(rules_context));
344 }
345
346 if message.content.len() > len_before_context {
347 message.content.insert(
348 len_before_context,
349 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
350 );
351 message
352 .content
353 .push(language_model::MessageContent::Text("</context>".into()));
354 }
355
356 message
357 }
358}
359
360fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
361 let mut result = String::new();
362
363 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
364 let _ = write!(result, "{} ", extension);
365 }
366
367 let _ = write!(result, "{}", full_path.display());
368
369 if let Some(range) = line_range {
370 if range.start() == range.end() {
371 let _ = write!(result, ":{}", range.start() + 1);
372 } else {
373 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
374 }
375 }
376
377 result
378}
379
380impl AgentMessage {
381 pub fn to_markdown(&self) -> String {
382 let mut markdown = String::from("## Assistant\n\n");
383
384 for content in &self.content {
385 match content {
386 AgentMessageContent::Text(text) => {
387 markdown.push_str(text);
388 markdown.push('\n');
389 }
390 AgentMessageContent::Thinking { text, .. } => {
391 markdown.push_str("<think>");
392 markdown.push_str(text);
393 markdown.push_str("</think>\n");
394 }
395 AgentMessageContent::RedactedThinking(_) => {
396 markdown.push_str("<redacted_thinking />\n")
397 }
398 AgentMessageContent::ToolUse(tool_use) => {
399 markdown.push_str(&format!(
400 "**Tool Use**: {} (ID: {})\n",
401 tool_use.name, tool_use.id
402 ));
403 markdown.push_str(&format!(
404 "{}\n",
405 MarkdownCodeBlock {
406 tag: "json",
407 text: &format!("{:#}", tool_use.input)
408 }
409 ));
410 }
411 }
412 }
413
414 for tool_result in self.tool_results.values() {
415 markdown.push_str(&format!(
416 "**Tool Result**: {} (ID: {})\n\n",
417 tool_result.tool_name, tool_result.tool_use_id
418 ));
419 if tool_result.is_error {
420 markdown.push_str("**ERROR:**\n");
421 }
422
423 match &tool_result.content {
424 LanguageModelToolResultContent::Text(text) => {
425 writeln!(markdown, "{text}\n").ok();
426 }
427 LanguageModelToolResultContent::Image(_) => {
428 writeln!(markdown, "<image />\n").ok();
429 }
430 }
431
432 if let Some(output) = tool_result.output.as_ref() {
433 writeln!(
434 markdown,
435 "**Debug Output**:\n\n```json\n{}\n```\n",
436 serde_json::to_string_pretty(output).unwrap()
437 )
438 .unwrap();
439 }
440 }
441
442 markdown
443 }
444
445 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
446 let mut assistant_message = LanguageModelRequestMessage {
447 role: Role::Assistant,
448 content: Vec::with_capacity(self.content.len()),
449 cache: false,
450 };
451 for chunk in &self.content {
452 match chunk {
453 AgentMessageContent::Text(text) => {
454 assistant_message
455 .content
456 .push(language_model::MessageContent::Text(text.clone()));
457 }
458 AgentMessageContent::Thinking { text, signature } => {
459 assistant_message
460 .content
461 .push(language_model::MessageContent::Thinking {
462 text: text.clone(),
463 signature: signature.clone(),
464 });
465 }
466 AgentMessageContent::RedactedThinking(value) => {
467 assistant_message.content.push(
468 language_model::MessageContent::RedactedThinking(value.clone()),
469 );
470 }
471 AgentMessageContent::ToolUse(tool_use) => {
472 if self.tool_results.contains_key(&tool_use.id) {
473 assistant_message
474 .content
475 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
476 }
477 }
478 };
479 }
480
481 let mut user_message = LanguageModelRequestMessage {
482 role: Role::User,
483 content: Vec::new(),
484 cache: false,
485 };
486
487 for tool_result in self.tool_results.values() {
488 let mut tool_result = tool_result.clone();
489 // Surprisingly, the API fails if we return an empty string here.
490 // It thinks we are sending a tool use without a tool result.
491 if tool_result.content.is_empty() {
492 tool_result.content = "<Tool returned an empty string>".into();
493 }
494 user_message
495 .content
496 .push(language_model::MessageContent::ToolResult(tool_result));
497 }
498
499 let mut messages = Vec::new();
500 if !assistant_message.content.is_empty() {
501 messages.push(assistant_message);
502 }
503 if !user_message.content.is_empty() {
504 messages.push(user_message);
505 }
506 messages
507 }
508}
509
510#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
511pub struct AgentMessage {
512 pub content: Vec<AgentMessageContent>,
513 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
514}
515
516#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
517pub enum AgentMessageContent {
518 Text(String),
519 Thinking {
520 text: String,
521 signature: Option<String>,
522 },
523 RedactedThinking(String),
524 ToolUse(LanguageModelToolUse),
525}
526
527pub trait TerminalHandle {
528 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
529 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
530 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
531}
532
533pub trait ThreadEnvironment {
534 fn create_terminal(
535 &self,
536 command: String,
537 cwd: Option<PathBuf>,
538 output_byte_limit: Option<u64>,
539 cx: &mut AsyncApp,
540 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
541}
542
543#[derive(Debug)]
544pub enum ThreadEvent {
545 UserMessage(UserMessage),
546 AgentText(String),
547 AgentThinking(String),
548 ToolCall(acp::ToolCall),
549 ToolCallUpdate(acp_thread::ToolCallUpdate),
550 ToolCallAuthorization(ToolCallAuthorization),
551 Retry(acp_thread::RetryStatus),
552 Stop(acp::StopReason),
553}
554
555#[derive(Debug)]
556pub struct NewTerminal {
557 pub command: String,
558 pub output_byte_limit: Option<u64>,
559 pub cwd: Option<PathBuf>,
560 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
561}
562
563#[derive(Debug)]
564pub struct ToolCallAuthorization {
565 pub tool_call: acp::ToolCallUpdate,
566 pub options: Vec<acp::PermissionOption>,
567 pub response: oneshot::Sender<acp::PermissionOptionId>,
568}
569
570#[derive(Debug, thiserror::Error)]
571enum CompletionError {
572 #[error("max tokens")]
573 MaxTokens,
574 #[error("refusal")]
575 Refusal,
576 #[error(transparent)]
577 Other(#[from] anyhow::Error),
578}
579
580pub struct Thread {
581 id: acp::SessionId,
582 prompt_id: PromptId,
583 updated_at: DateTime<Utc>,
584 title: Option<SharedString>,
585 pending_title_generation: Option<Task<()>>,
586 summary: Option<SharedString>,
587 messages: Vec<Message>,
588 completion_mode: CompletionMode,
589 /// Holds the task that handles agent interaction until the end of the turn.
590 /// Survives across multiple requests as the model performs tool calls and
591 /// we run tools, report their results.
592 running_turn: Option<RunningTurn>,
593 pending_message: Option<AgentMessage>,
594 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
595 tool_use_limit_reached: bool,
596 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
597 #[allow(unused)]
598 cumulative_token_usage: TokenUsage,
599 #[allow(unused)]
600 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
601 context_server_registry: Entity<ContextServerRegistry>,
602 profile_id: AgentProfileId,
603 project_context: Entity<ProjectContext>,
604 templates: Arc<Templates>,
605 model: Option<Arc<dyn LanguageModel>>,
606 summarization_model: Option<Arc<dyn LanguageModel>>,
607 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
608 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
609 pub(crate) project: Entity<Project>,
610 pub(crate) action_log: Entity<ActionLog>,
611}
612
613impl Thread {
614 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
615 let image = model.map_or(true, |model| model.supports_images());
616 acp::PromptCapabilities {
617 image,
618 audio: false,
619 embedded_context: true,
620 }
621 }
622
623 pub fn new(
624 project: Entity<Project>,
625 project_context: Entity<ProjectContext>,
626 context_server_registry: Entity<ContextServerRegistry>,
627 templates: Arc<Templates>,
628 model: Option<Arc<dyn LanguageModel>>,
629 cx: &mut Context<Self>,
630 ) -> Self {
631 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
632 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
633 let (prompt_capabilities_tx, prompt_capabilities_rx) =
634 watch::channel(Self::prompt_capabilities(model.as_deref()));
635 Self {
636 id: acp::SessionId(uuid::Uuid::new_v4().to_string().into()),
637 prompt_id: PromptId::new(),
638 updated_at: Utc::now(),
639 title: None,
640 pending_title_generation: None,
641 summary: None,
642 messages: Vec::new(),
643 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
644 running_turn: None,
645 pending_message: None,
646 tools: BTreeMap::default(),
647 tool_use_limit_reached: false,
648 request_token_usage: HashMap::default(),
649 cumulative_token_usage: TokenUsage::default(),
650 initial_project_snapshot: {
651 let project_snapshot = Self::project_snapshot(project.clone(), cx);
652 cx.foreground_executor()
653 .spawn(async move { Some(project_snapshot.await) })
654 .shared()
655 },
656 context_server_registry,
657 profile_id,
658 project_context,
659 templates,
660 model,
661 summarization_model: None,
662 prompt_capabilities_tx,
663 prompt_capabilities_rx,
664 project,
665 action_log,
666 }
667 }
668
669 pub fn id(&self) -> &acp::SessionId {
670 &self.id
671 }
672
673 pub fn replay(
674 &mut self,
675 cx: &mut Context<Self>,
676 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
677 let (tx, rx) = mpsc::unbounded();
678 let stream = ThreadEventStream(tx);
679 for message in &self.messages {
680 match message {
681 Message::User(user_message) => stream.send_user_message(user_message),
682 Message::Agent(assistant_message) => {
683 for content in &assistant_message.content {
684 match content {
685 AgentMessageContent::Text(text) => stream.send_text(text),
686 AgentMessageContent::Thinking { text, .. } => {
687 stream.send_thinking(text)
688 }
689 AgentMessageContent::RedactedThinking(_) => {}
690 AgentMessageContent::ToolUse(tool_use) => {
691 self.replay_tool_call(
692 tool_use,
693 assistant_message.tool_results.get(&tool_use.id),
694 &stream,
695 cx,
696 );
697 }
698 }
699 }
700 }
701 Message::Resume => {}
702 }
703 }
704 rx
705 }
706
707 fn replay_tool_call(
708 &self,
709 tool_use: &LanguageModelToolUse,
710 tool_result: Option<&LanguageModelToolResult>,
711 stream: &ThreadEventStream,
712 cx: &mut Context<Self>,
713 ) {
714 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
715 self.context_server_registry
716 .read(cx)
717 .servers()
718 .find_map(|(_, tools)| {
719 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
720 Some(tool.clone())
721 } else {
722 None
723 }
724 })
725 });
726
727 let Some(tool) = tool else {
728 stream
729 .0
730 .unbounded_send(Ok(ThreadEvent::ToolCall(acp::ToolCall {
731 id: acp::ToolCallId(tool_use.id.to_string().into()),
732 title: tool_use.name.to_string(),
733 kind: acp::ToolKind::Other,
734 status: acp::ToolCallStatus::Failed,
735 content: Vec::new(),
736 locations: Vec::new(),
737 raw_input: Some(tool_use.input.clone()),
738 raw_output: None,
739 })))
740 .ok();
741 return;
742 };
743
744 let title = tool.initial_title(tool_use.input.clone());
745 let kind = tool.kind();
746 stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
747
748 let output = tool_result
749 .as_ref()
750 .and_then(|result| result.output.clone());
751 if let Some(output) = output.clone() {
752 let tool_event_stream = ToolCallEventStream::new(
753 tool_use.id.clone(),
754 stream.clone(),
755 Some(self.project.read(cx).fs().clone()),
756 );
757 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
758 .log_err();
759 }
760
761 stream.update_tool_call_fields(
762 &tool_use.id,
763 acp::ToolCallUpdateFields {
764 status: Some(
765 tool_result
766 .as_ref()
767 .map_or(acp::ToolCallStatus::Failed, |result| {
768 if result.is_error {
769 acp::ToolCallStatus::Failed
770 } else {
771 acp::ToolCallStatus::Completed
772 }
773 }),
774 ),
775 raw_output: output,
776 ..Default::default()
777 },
778 );
779 }
780
781 pub fn from_db(
782 id: acp::SessionId,
783 db_thread: DbThread,
784 project: Entity<Project>,
785 project_context: Entity<ProjectContext>,
786 context_server_registry: Entity<ContextServerRegistry>,
787 action_log: Entity<ActionLog>,
788 templates: Arc<Templates>,
789 cx: &mut Context<Self>,
790 ) -> Self {
791 let profile_id = db_thread
792 .profile
793 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
794 let model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
795 db_thread
796 .model
797 .and_then(|model| {
798 let model = SelectedModel {
799 provider: model.provider.clone().into(),
800 model: model.model.into(),
801 };
802 registry.select_model(&model, cx)
803 })
804 .or_else(|| registry.default_model())
805 .map(|model| model.model)
806 });
807 let (prompt_capabilities_tx, prompt_capabilities_rx) =
808 watch::channel(Self::prompt_capabilities(model.as_deref()));
809
810 Self {
811 id,
812 prompt_id: PromptId::new(),
813 title: if db_thread.title.is_empty() {
814 None
815 } else {
816 Some(db_thread.title.clone())
817 },
818 pending_title_generation: None,
819 summary: db_thread.detailed_summary,
820 messages: db_thread.messages,
821 completion_mode: db_thread.completion_mode.unwrap_or_default(),
822 running_turn: None,
823 pending_message: None,
824 tools: BTreeMap::default(),
825 tool_use_limit_reached: false,
826 request_token_usage: db_thread.request_token_usage.clone(),
827 cumulative_token_usage: db_thread.cumulative_token_usage,
828 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
829 context_server_registry,
830 profile_id,
831 project_context,
832 templates,
833 model,
834 summarization_model: None,
835 project,
836 action_log,
837 updated_at: db_thread.updated_at,
838 prompt_capabilities_tx,
839 prompt_capabilities_rx,
840 }
841 }
842
843 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
844 let initial_project_snapshot = self.initial_project_snapshot.clone();
845 let mut thread = DbThread {
846 title: self.title(),
847 messages: self.messages.clone(),
848 updated_at: self.updated_at,
849 detailed_summary: self.summary.clone(),
850 initial_project_snapshot: None,
851 cumulative_token_usage: self.cumulative_token_usage,
852 request_token_usage: self.request_token_usage.clone(),
853 model: self.model.as_ref().map(|model| DbLanguageModel {
854 provider: model.provider_id().to_string(),
855 model: model.name().0.to_string(),
856 }),
857 completion_mode: Some(self.completion_mode),
858 profile: Some(self.profile_id.clone()),
859 };
860
861 cx.background_spawn(async move {
862 let initial_project_snapshot = initial_project_snapshot.await;
863 thread.initial_project_snapshot = initial_project_snapshot;
864 thread
865 })
866 }
867
868 /// Create a snapshot of the current project state including git information and unsaved buffers.
869 fn project_snapshot(
870 project: Entity<Project>,
871 cx: &mut Context<Self>,
872 ) -> Task<Arc<agent::thread::ProjectSnapshot>> {
873 let git_store = project.read(cx).git_store().clone();
874 let worktree_snapshots: Vec<_> = project
875 .read(cx)
876 .visible_worktrees(cx)
877 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
878 .collect();
879
880 cx.spawn(async move |_, cx| {
881 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
882
883 let mut unsaved_buffers = Vec::new();
884 cx.update(|app_cx| {
885 let buffer_store = project.read(app_cx).buffer_store();
886 for buffer_handle in buffer_store.read(app_cx).buffers() {
887 let buffer = buffer_handle.read(app_cx);
888 if buffer.is_dirty()
889 && let Some(file) = buffer.file()
890 {
891 let path = file.path().to_string_lossy().to_string();
892 unsaved_buffers.push(path);
893 }
894 }
895 })
896 .ok();
897
898 Arc::new(ProjectSnapshot {
899 worktree_snapshots,
900 unsaved_buffer_paths: unsaved_buffers,
901 timestamp: Utc::now(),
902 })
903 })
904 }
905
906 fn worktree_snapshot(
907 worktree: Entity<project::Worktree>,
908 git_store: Entity<GitStore>,
909 cx: &App,
910 ) -> Task<agent::thread::WorktreeSnapshot> {
911 cx.spawn(async move |cx| {
912 // Get worktree path and snapshot
913 let worktree_info = cx.update(|app_cx| {
914 let worktree = worktree.read(app_cx);
915 let path = worktree.abs_path().to_string_lossy().to_string();
916 let snapshot = worktree.snapshot();
917 (path, snapshot)
918 });
919
920 let Ok((worktree_path, _snapshot)) = worktree_info else {
921 return WorktreeSnapshot {
922 worktree_path: String::new(),
923 git_state: None,
924 };
925 };
926
927 let git_state = git_store
928 .update(cx, |git_store, cx| {
929 git_store
930 .repositories()
931 .values()
932 .find(|repo| {
933 repo.read(cx)
934 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
935 .is_some()
936 })
937 .cloned()
938 })
939 .ok()
940 .flatten()
941 .map(|repo| {
942 repo.update(cx, |repo, _| {
943 let current_branch =
944 repo.branch.as_ref().map(|branch| branch.name().to_owned());
945 repo.send_job(None, |state, _| async move {
946 let RepositoryState::Local { backend, .. } = state else {
947 return GitState {
948 remote_url: None,
949 head_sha: None,
950 current_branch,
951 diff: None,
952 };
953 };
954
955 let remote_url = backend.remote_url("origin");
956 let head_sha = backend.head_sha().await;
957 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
958
959 GitState {
960 remote_url,
961 head_sha,
962 current_branch,
963 diff,
964 }
965 })
966 })
967 });
968
969 let git_state = match git_state {
970 Some(git_state) => match git_state.ok() {
971 Some(git_state) => git_state.await.ok(),
972 None => None,
973 },
974 None => None,
975 };
976
977 WorktreeSnapshot {
978 worktree_path,
979 git_state,
980 }
981 })
982 }
983
984 pub fn project_context(&self) -> &Entity<ProjectContext> {
985 &self.project_context
986 }
987
988 pub fn project(&self) -> &Entity<Project> {
989 &self.project
990 }
991
992 pub fn action_log(&self) -> &Entity<ActionLog> {
993 &self.action_log
994 }
995
996 pub fn is_empty(&self) -> bool {
997 self.messages.is_empty() && self.title.is_none()
998 }
999
1000 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
1001 self.model.as_ref()
1002 }
1003
1004 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
1005 let old_usage = self.latest_token_usage();
1006 self.model = Some(model);
1007 let new_caps = Self::prompt_capabilities(self.model.as_deref());
1008 let new_usage = self.latest_token_usage();
1009 if old_usage != new_usage {
1010 cx.emit(TokenUsageUpdated(new_usage));
1011 }
1012 self.prompt_capabilities_tx.send(new_caps).log_err();
1013 cx.notify()
1014 }
1015
1016 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
1017 self.summarization_model.as_ref()
1018 }
1019
1020 pub fn set_summarization_model(
1021 &mut self,
1022 model: Option<Arc<dyn LanguageModel>>,
1023 cx: &mut Context<Self>,
1024 ) {
1025 self.summarization_model = model;
1026 cx.notify()
1027 }
1028
1029 pub fn completion_mode(&self) -> CompletionMode {
1030 self.completion_mode
1031 }
1032
1033 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
1034 let old_usage = self.latest_token_usage();
1035 self.completion_mode = mode;
1036 let new_usage = self.latest_token_usage();
1037 if old_usage != new_usage {
1038 cx.emit(TokenUsageUpdated(new_usage));
1039 }
1040 cx.notify()
1041 }
1042
1043 #[cfg(any(test, feature = "test-support"))]
1044 pub fn last_message(&self) -> Option<Message> {
1045 if let Some(message) = self.pending_message.clone() {
1046 Some(Message::Agent(message))
1047 } else {
1048 self.messages.last().cloned()
1049 }
1050 }
1051
1052 pub fn add_default_tools(
1053 &mut self,
1054 environment: Rc<dyn ThreadEnvironment>,
1055 cx: &mut Context<Self>,
1056 ) {
1057 let language_registry = self.project.read(cx).languages().clone();
1058 self.add_tool(CopyPathTool::new(self.project.clone()));
1059 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
1060 self.add_tool(DeletePathTool::new(
1061 self.project.clone(),
1062 self.action_log.clone(),
1063 ));
1064 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1065 self.add_tool(EditFileTool::new(cx.weak_entity(), language_registry));
1066 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1067 self.add_tool(FindPathTool::new(self.project.clone()));
1068 self.add_tool(GrepTool::new(self.project.clone()));
1069 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1070 self.add_tool(MovePathTool::new(self.project.clone()));
1071 self.add_tool(NowTool);
1072 self.add_tool(OpenTool::new(self.project.clone()));
1073 self.add_tool(ReadFileTool::new(
1074 self.project.clone(),
1075 self.action_log.clone(),
1076 ));
1077 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1078 self.add_tool(ThinkingTool);
1079 self.add_tool(WebSearchTool);
1080 }
1081
1082 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1083 self.tools.insert(T::name().into(), tool.erase());
1084 }
1085
1086 pub fn remove_tool(&mut self, name: &str) -> bool {
1087 self.tools.remove(name).is_some()
1088 }
1089
1090 pub fn profile(&self) -> &AgentProfileId {
1091 &self.profile_id
1092 }
1093
1094 pub fn set_profile(&mut self, profile_id: AgentProfileId) {
1095 self.profile_id = profile_id;
1096 }
1097
1098 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1099 if let Some(running_turn) = self.running_turn.take() {
1100 running_turn.cancel();
1101 }
1102 self.flush_pending_message(cx);
1103 }
1104
1105 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1106 let Some(last_user_message) = self.last_user_message() else {
1107 return;
1108 };
1109
1110 self.request_token_usage
1111 .insert(last_user_message.id.clone(), update);
1112 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1113 cx.notify();
1114 }
1115
1116 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1117 self.cancel(cx);
1118 let Some(position) = self.messages.iter().position(
1119 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1120 ) else {
1121 return Err(anyhow!("Message not found"));
1122 };
1123
1124 for message in self.messages.drain(position..) {
1125 match message {
1126 Message::User(message) => {
1127 self.request_token_usage.remove(&message.id);
1128 }
1129 Message::Agent(_) | Message::Resume => {}
1130 }
1131 }
1132 self.summary = None;
1133 cx.notify();
1134 Ok(())
1135 }
1136
1137 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1138 let last_user_message = self.last_user_message()?;
1139 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1140 let model = self.model.clone()?;
1141
1142 Some(acp_thread::TokenUsage {
1143 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1144 used_tokens: tokens.total_tokens(),
1145 })
1146 }
1147
1148 pub fn resume(
1149 &mut self,
1150 cx: &mut Context<Self>,
1151 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1152 self.messages.push(Message::Resume);
1153 cx.notify();
1154
1155 log::debug!("Total messages in thread: {}", self.messages.len());
1156 self.run_turn(cx)
1157 }
1158
1159 /// Sending a message results in the model streaming a response, which could include tool calls.
1160 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1161 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1162 pub fn send<T>(
1163 &mut self,
1164 id: UserMessageId,
1165 content: impl IntoIterator<Item = T>,
1166 cx: &mut Context<Self>,
1167 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1168 where
1169 T: Into<UserMessageContent>,
1170 {
1171 let model = self.model().context("No language model configured")?;
1172
1173 log::info!("Thread::send called with model: {}", model.name().0);
1174 self.advance_prompt_id();
1175
1176 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1177 log::debug!("Thread::send content: {:?}", content);
1178
1179 self.messages
1180 .push(Message::User(UserMessage { id, content }));
1181 cx.notify();
1182
1183 log::debug!("Total messages in thread: {}", self.messages.len());
1184 self.run_turn(cx)
1185 }
1186
1187 fn run_turn(
1188 &mut self,
1189 cx: &mut Context<Self>,
1190 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1191 self.cancel(cx);
1192
1193 let model = self.model.clone().context("No language model configured")?;
1194 let profile = AgentSettings::get_global(cx)
1195 .profiles
1196 .get(&self.profile_id)
1197 .context("Profile not found")?;
1198 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1199 let event_stream = ThreadEventStream(events_tx);
1200 let message_ix = self.messages.len().saturating_sub(1);
1201 self.tool_use_limit_reached = false;
1202 self.summary = None;
1203 self.running_turn = Some(RunningTurn {
1204 event_stream: event_stream.clone(),
1205 tools: self.enabled_tools(profile, &model, cx),
1206 _task: cx.spawn(async move |this, cx| {
1207 log::debug!("Starting agent turn execution");
1208
1209 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1210 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1211
1212 match turn_result {
1213 Ok(()) => {
1214 log::debug!("Turn execution completed");
1215 event_stream.send_stop(acp::StopReason::EndTurn);
1216 }
1217 Err(error) => {
1218 log::error!("Turn execution failed: {:?}", error);
1219 match error.downcast::<CompletionError>() {
1220 Ok(CompletionError::Refusal) => {
1221 event_stream.send_stop(acp::StopReason::Refusal);
1222 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1223 }
1224 Ok(CompletionError::MaxTokens) => {
1225 event_stream.send_stop(acp::StopReason::MaxTokens);
1226 }
1227 Ok(CompletionError::Other(error)) | Err(error) => {
1228 event_stream.send_error(error);
1229 }
1230 }
1231 }
1232 }
1233
1234 _ = this.update(cx, |this, _| this.running_turn.take());
1235 }),
1236 });
1237 Ok(events_rx)
1238 }
1239
1240 async fn run_turn_internal(
1241 this: &WeakEntity<Self>,
1242 model: Arc<dyn LanguageModel>,
1243 event_stream: &ThreadEventStream,
1244 cx: &mut AsyncApp,
1245 ) -> Result<()> {
1246 let mut attempt = 0;
1247 let mut intent = CompletionIntent::UserPrompt;
1248 loop {
1249 let request =
1250 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1251
1252 telemetry::event!(
1253 "Agent Thread Completion",
1254 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1255 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1256 model = model.telemetry_id(),
1257 model_provider = model.provider_id().to_string(),
1258 attempt
1259 );
1260
1261 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1262 let mut events = model
1263 .stream_completion(request, cx)
1264 .await
1265 .map_err(|error| anyhow!(error))?;
1266 let mut tool_results = FuturesUnordered::new();
1267 let mut error = None;
1268 while let Some(event) = events.next().await {
1269 log::trace!("Received completion event: {:?}", event);
1270 match event {
1271 Ok(event) => {
1272 tool_results.extend(this.update(cx, |this, cx| {
1273 this.handle_completion_event(event, event_stream, cx)
1274 })??);
1275 }
1276 Err(err) => {
1277 error = Some(err);
1278 break;
1279 }
1280 }
1281 }
1282
1283 let end_turn = tool_results.is_empty();
1284 while let Some(tool_result) = tool_results.next().await {
1285 log::debug!("Tool finished {:?}", tool_result);
1286
1287 event_stream.update_tool_call_fields(
1288 &tool_result.tool_use_id,
1289 acp::ToolCallUpdateFields {
1290 status: Some(if tool_result.is_error {
1291 acp::ToolCallStatus::Failed
1292 } else {
1293 acp::ToolCallStatus::Completed
1294 }),
1295 raw_output: tool_result.output.clone(),
1296 ..Default::default()
1297 },
1298 );
1299 this.update(cx, |this, _cx| {
1300 this.pending_message()
1301 .tool_results
1302 .insert(tool_result.tool_use_id.clone(), tool_result);
1303 })?;
1304 }
1305
1306 this.update(cx, |this, cx| {
1307 this.flush_pending_message(cx);
1308 if this.title.is_none() && this.pending_title_generation.is_none() {
1309 this.generate_title(cx);
1310 }
1311 })?;
1312
1313 if let Some(error) = error {
1314 attempt += 1;
1315 let retry =
1316 this.update(cx, |this, _| this.handle_completion_error(error, attempt))??;
1317 let timer = cx.background_executor().timer(retry.duration);
1318 event_stream.send_retry(retry);
1319 timer.await;
1320 this.update(cx, |this, _cx| {
1321 if let Some(Message::Agent(message)) = this.messages.last() {
1322 if message.tool_results.is_empty() {
1323 intent = CompletionIntent::UserPrompt;
1324 this.messages.push(Message::Resume);
1325 }
1326 }
1327 })?;
1328 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1329 return Err(language_model::ToolUseLimitReachedError.into());
1330 } else if end_turn {
1331 return Ok(());
1332 } else {
1333 intent = CompletionIntent::ToolResults;
1334 attempt = 0;
1335 }
1336 }
1337 }
1338
1339 fn handle_completion_error(
1340 &mut self,
1341 error: LanguageModelCompletionError,
1342 attempt: u8,
1343 ) -> Result<acp_thread::RetryStatus> {
1344 if self.completion_mode == CompletionMode::Normal {
1345 return Err(anyhow!(error));
1346 }
1347
1348 let Some(strategy) = Self::retry_strategy_for(&error) else {
1349 return Err(anyhow!(error));
1350 };
1351
1352 let max_attempts = match &strategy {
1353 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1354 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1355 };
1356
1357 if attempt > max_attempts {
1358 return Err(anyhow!(error));
1359 }
1360
1361 let delay = match &strategy {
1362 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1363 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1364 Duration::from_secs(delay_secs)
1365 }
1366 RetryStrategy::Fixed { delay, .. } => *delay,
1367 };
1368 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1369
1370 Ok(acp_thread::RetryStatus {
1371 last_error: error.to_string().into(),
1372 attempt: attempt as usize,
1373 max_attempts: max_attempts as usize,
1374 started_at: Instant::now(),
1375 duration: delay,
1376 })
1377 }
1378
1379 /// A helper method that's called on every streamed completion event.
1380 /// Returns an optional tool result task, which the main agentic loop will
1381 /// send back to the model when it resolves.
1382 fn handle_completion_event(
1383 &mut self,
1384 event: LanguageModelCompletionEvent,
1385 event_stream: &ThreadEventStream,
1386 cx: &mut Context<Self>,
1387 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1388 log::trace!("Handling streamed completion event: {:?}", event);
1389 use LanguageModelCompletionEvent::*;
1390
1391 match event {
1392 StartMessage { .. } => {
1393 self.flush_pending_message(cx);
1394 self.pending_message = Some(AgentMessage::default());
1395 }
1396 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1397 Thinking { text, signature } => {
1398 self.handle_thinking_event(text, signature, event_stream, cx)
1399 }
1400 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1401 ToolUse(tool_use) => {
1402 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1403 }
1404 ToolUseJsonParseError {
1405 id,
1406 tool_name,
1407 raw_input,
1408 json_parse_error,
1409 } => {
1410 return Ok(Some(Task::ready(
1411 self.handle_tool_use_json_parse_error_event(
1412 id,
1413 tool_name,
1414 raw_input,
1415 json_parse_error,
1416 ),
1417 )));
1418 }
1419 UsageUpdate(usage) => {
1420 telemetry::event!(
1421 "Agent Thread Completion Usage Updated",
1422 thread_id = self.id.to_string(),
1423 prompt_id = self.prompt_id.to_string(),
1424 model = self.model.as_ref().map(|m| m.telemetry_id()),
1425 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1426 input_tokens = usage.input_tokens,
1427 output_tokens = usage.output_tokens,
1428 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1429 cache_read_input_tokens = usage.cache_read_input_tokens,
1430 );
1431 self.update_token_usage(usage, cx);
1432 }
1433 StatusUpdate(CompletionRequestStatus::UsageUpdated { amount, limit }) => {
1434 self.update_model_request_usage(amount, limit, cx);
1435 }
1436 StatusUpdate(
1437 CompletionRequestStatus::Started
1438 | CompletionRequestStatus::Queued { .. }
1439 | CompletionRequestStatus::Failed { .. },
1440 ) => {}
1441 StatusUpdate(CompletionRequestStatus::ToolUseLimitReached) => {
1442 self.tool_use_limit_reached = true;
1443 }
1444 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1445 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1446 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1447 }
1448
1449 Ok(None)
1450 }
1451
1452 fn handle_text_event(
1453 &mut self,
1454 new_text: String,
1455 event_stream: &ThreadEventStream,
1456 cx: &mut Context<Self>,
1457 ) {
1458 event_stream.send_text(&new_text);
1459
1460 let last_message = self.pending_message();
1461 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1462 text.push_str(&new_text);
1463 } else {
1464 last_message
1465 .content
1466 .push(AgentMessageContent::Text(new_text));
1467 }
1468
1469 cx.notify();
1470 }
1471
1472 fn handle_thinking_event(
1473 &mut self,
1474 new_text: String,
1475 new_signature: Option<String>,
1476 event_stream: &ThreadEventStream,
1477 cx: &mut Context<Self>,
1478 ) {
1479 event_stream.send_thinking(&new_text);
1480
1481 let last_message = self.pending_message();
1482 if let Some(AgentMessageContent::Thinking { text, signature }) =
1483 last_message.content.last_mut()
1484 {
1485 text.push_str(&new_text);
1486 *signature = new_signature.or(signature.take());
1487 } else {
1488 last_message.content.push(AgentMessageContent::Thinking {
1489 text: new_text,
1490 signature: new_signature,
1491 });
1492 }
1493
1494 cx.notify();
1495 }
1496
1497 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1498 let last_message = self.pending_message();
1499 last_message
1500 .content
1501 .push(AgentMessageContent::RedactedThinking(data));
1502 cx.notify();
1503 }
1504
1505 fn handle_tool_use_event(
1506 &mut self,
1507 tool_use: LanguageModelToolUse,
1508 event_stream: &ThreadEventStream,
1509 cx: &mut Context<Self>,
1510 ) -> Option<Task<LanguageModelToolResult>> {
1511 cx.notify();
1512
1513 let tool = self.tool(tool_use.name.as_ref());
1514 let mut title = SharedString::from(&tool_use.name);
1515 let mut kind = acp::ToolKind::Other;
1516 if let Some(tool) = tool.as_ref() {
1517 title = tool.initial_title(tool_use.input.clone());
1518 kind = tool.kind();
1519 }
1520
1521 // Ensure the last message ends in the current tool use
1522 let last_message = self.pending_message();
1523 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1524 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1525 if last_tool_use.id == tool_use.id {
1526 *last_tool_use = tool_use.clone();
1527 false
1528 } else {
1529 true
1530 }
1531 } else {
1532 true
1533 }
1534 });
1535
1536 if push_new_tool_use {
1537 event_stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
1538 last_message
1539 .content
1540 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1541 } else {
1542 event_stream.update_tool_call_fields(
1543 &tool_use.id,
1544 acp::ToolCallUpdateFields {
1545 title: Some(title.into()),
1546 kind: Some(kind),
1547 raw_input: Some(tool_use.input.clone()),
1548 ..Default::default()
1549 },
1550 );
1551 }
1552
1553 if !tool_use.is_input_complete {
1554 return None;
1555 }
1556
1557 let Some(tool) = tool else {
1558 let content = format!("No tool named {} exists", tool_use.name);
1559 return Some(Task::ready(LanguageModelToolResult {
1560 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1561 tool_use_id: tool_use.id,
1562 tool_name: tool_use.name,
1563 is_error: true,
1564 output: None,
1565 }));
1566 };
1567
1568 let fs = self.project.read(cx).fs().clone();
1569 let tool_event_stream =
1570 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1571 tool_event_stream.update_fields(acp::ToolCallUpdateFields {
1572 status: Some(acp::ToolCallStatus::InProgress),
1573 ..Default::default()
1574 });
1575 let supports_images = self.model().is_some_and(|model| model.supports_images());
1576 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1577 log::debug!("Running tool {}", tool_use.name);
1578 Some(cx.foreground_executor().spawn(async move {
1579 let tool_result = tool_result.await.and_then(|output| {
1580 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1581 && !supports_images
1582 {
1583 return Err(anyhow!(
1584 "Attempted to read an image, but this model doesn't support it.",
1585 ));
1586 }
1587 Ok(output)
1588 });
1589
1590 match tool_result {
1591 Ok(output) => LanguageModelToolResult {
1592 tool_use_id: tool_use.id,
1593 tool_name: tool_use.name,
1594 is_error: false,
1595 content: output.llm_output,
1596 output: Some(output.raw_output),
1597 },
1598 Err(error) => LanguageModelToolResult {
1599 tool_use_id: tool_use.id,
1600 tool_name: tool_use.name,
1601 is_error: true,
1602 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1603 output: Some(error.to_string().into()),
1604 },
1605 }
1606 }))
1607 }
1608
1609 fn handle_tool_use_json_parse_error_event(
1610 &mut self,
1611 tool_use_id: LanguageModelToolUseId,
1612 tool_name: Arc<str>,
1613 raw_input: Arc<str>,
1614 json_parse_error: String,
1615 ) -> LanguageModelToolResult {
1616 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1617 LanguageModelToolResult {
1618 tool_use_id,
1619 tool_name,
1620 is_error: true,
1621 content: LanguageModelToolResultContent::Text(tool_output.into()),
1622 output: Some(serde_json::Value::String(raw_input.to_string())),
1623 }
1624 }
1625
1626 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1627 self.project
1628 .read(cx)
1629 .user_store()
1630 .update(cx, |user_store, cx| {
1631 user_store.update_model_request_usage(
1632 ModelRequestUsage(RequestUsage {
1633 amount: amount as i32,
1634 limit,
1635 }),
1636 cx,
1637 )
1638 });
1639 }
1640
1641 pub fn title(&self) -> SharedString {
1642 self.title.clone().unwrap_or("New Thread".into())
1643 }
1644
1645 pub fn summary(&mut self, cx: &mut Context<Self>) -> Task<Result<SharedString>> {
1646 if let Some(summary) = self.summary.as_ref() {
1647 return Task::ready(Ok(summary.clone()));
1648 }
1649 let Some(model) = self.summarization_model.clone() else {
1650 return Task::ready(Err(anyhow!("No summarization model available")));
1651 };
1652 let mut request = LanguageModelRequest {
1653 intent: Some(CompletionIntent::ThreadContextSummarization),
1654 temperature: AgentSettings::temperature_for_model(&model, cx),
1655 ..Default::default()
1656 };
1657
1658 for message in &self.messages {
1659 request.messages.extend(message.to_request());
1660 }
1661
1662 request.messages.push(LanguageModelRequestMessage {
1663 role: Role::User,
1664 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1665 cache: false,
1666 });
1667 cx.spawn(async move |this, cx| {
1668 let mut summary = String::new();
1669 let mut messages = model.stream_completion(request, cx).await?;
1670 while let Some(event) = messages.next().await {
1671 let event = event?;
1672 let text = match event {
1673 LanguageModelCompletionEvent::Text(text) => text,
1674 LanguageModelCompletionEvent::StatusUpdate(
1675 CompletionRequestStatus::UsageUpdated { amount, limit },
1676 ) => {
1677 this.update(cx, |thread, cx| {
1678 thread.update_model_request_usage(amount, limit, cx);
1679 })?;
1680 continue;
1681 }
1682 _ => continue,
1683 };
1684
1685 let mut lines = text.lines();
1686 summary.extend(lines.next());
1687 }
1688
1689 log::debug!("Setting summary: {}", summary);
1690 let summary = SharedString::from(summary);
1691
1692 this.update(cx, |this, cx| {
1693 this.summary = Some(summary.clone());
1694 cx.notify()
1695 })?;
1696
1697 Ok(summary)
1698 })
1699 }
1700
1701 fn generate_title(&mut self, cx: &mut Context<Self>) {
1702 let Some(model) = self.summarization_model.clone() else {
1703 return;
1704 };
1705
1706 log::debug!(
1707 "Generating title with model: {:?}",
1708 self.summarization_model.as_ref().map(|model| model.name())
1709 );
1710 let mut request = LanguageModelRequest {
1711 intent: Some(CompletionIntent::ThreadSummarization),
1712 temperature: AgentSettings::temperature_for_model(&model, cx),
1713 ..Default::default()
1714 };
1715
1716 for message in &self.messages {
1717 request.messages.extend(message.to_request());
1718 }
1719
1720 request.messages.push(LanguageModelRequestMessage {
1721 role: Role::User,
1722 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1723 cache: false,
1724 });
1725 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1726 let mut title = String::new();
1727
1728 let generate = async {
1729 let mut messages = model.stream_completion(request, cx).await?;
1730 while let Some(event) = messages.next().await {
1731 let event = event?;
1732 let text = match event {
1733 LanguageModelCompletionEvent::Text(text) => text,
1734 LanguageModelCompletionEvent::StatusUpdate(
1735 CompletionRequestStatus::UsageUpdated { amount, limit },
1736 ) => {
1737 this.update(cx, |thread, cx| {
1738 thread.update_model_request_usage(amount, limit, cx);
1739 })?;
1740 continue;
1741 }
1742 _ => continue,
1743 };
1744
1745 let mut lines = text.lines();
1746 title.extend(lines.next());
1747
1748 // Stop if the LLM generated multiple lines.
1749 if lines.next().is_some() {
1750 break;
1751 }
1752 }
1753 anyhow::Ok(())
1754 };
1755
1756 if generate.await.context("failed to generate title").is_ok() {
1757 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1758 }
1759 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1760 }));
1761 }
1762
1763 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1764 self.pending_title_generation = None;
1765 if Some(&title) != self.title.as_ref() {
1766 self.title = Some(title);
1767 cx.emit(TitleUpdated);
1768 cx.notify();
1769 }
1770 }
1771
1772 fn last_user_message(&self) -> Option<&UserMessage> {
1773 self.messages
1774 .iter()
1775 .rev()
1776 .find_map(|message| match message {
1777 Message::User(user_message) => Some(user_message),
1778 Message::Agent(_) => None,
1779 Message::Resume => None,
1780 })
1781 }
1782
1783 fn pending_message(&mut self) -> &mut AgentMessage {
1784 self.pending_message.get_or_insert_default()
1785 }
1786
1787 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1788 let Some(mut message) = self.pending_message.take() else {
1789 return;
1790 };
1791
1792 if message.content.is_empty() {
1793 return;
1794 }
1795
1796 for content in &message.content {
1797 let AgentMessageContent::ToolUse(tool_use) = content else {
1798 continue;
1799 };
1800
1801 if !message.tool_results.contains_key(&tool_use.id) {
1802 message.tool_results.insert(
1803 tool_use.id.clone(),
1804 LanguageModelToolResult {
1805 tool_use_id: tool_use.id.clone(),
1806 tool_name: tool_use.name.clone(),
1807 is_error: true,
1808 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1809 output: None,
1810 },
1811 );
1812 }
1813 }
1814
1815 self.messages.push(Message::Agent(message));
1816 self.updated_at = Utc::now();
1817 self.summary = None;
1818 cx.notify()
1819 }
1820
1821 pub(crate) fn build_completion_request(
1822 &self,
1823 completion_intent: CompletionIntent,
1824 cx: &App,
1825 ) -> Result<LanguageModelRequest> {
1826 let model = self.model().context("No language model configured")?;
1827 let tools = if let Some(turn) = self.running_turn.as_ref() {
1828 turn.tools
1829 .iter()
1830 .filter_map(|(tool_name, tool)| {
1831 log::trace!("Including tool: {}", tool_name);
1832 Some(LanguageModelRequestTool {
1833 name: tool_name.to_string(),
1834 description: tool.description().to_string(),
1835 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1836 })
1837 })
1838 .collect::<Vec<_>>()
1839 } else {
1840 Vec::new()
1841 };
1842
1843 log::debug!("Building completion request");
1844 log::debug!("Completion intent: {:?}", completion_intent);
1845 log::debug!("Completion mode: {:?}", self.completion_mode);
1846
1847 let messages = self.build_request_messages(cx);
1848 log::debug!("Request will include {} messages", messages.len());
1849 log::debug!("Request includes {} tools", tools.len());
1850
1851 let request = LanguageModelRequest {
1852 thread_id: Some(self.id.to_string()),
1853 prompt_id: Some(self.prompt_id.to_string()),
1854 intent: Some(completion_intent),
1855 mode: Some(self.completion_mode.into()),
1856 messages,
1857 tools,
1858 tool_choice: None,
1859 stop: Vec::new(),
1860 temperature: AgentSettings::temperature_for_model(model, cx),
1861 thinking_allowed: true,
1862 };
1863
1864 log::debug!("Completion request built successfully");
1865 Ok(request)
1866 }
1867
1868 fn enabled_tools(
1869 &self,
1870 profile: &AgentProfileSettings,
1871 model: &Arc<dyn LanguageModel>,
1872 cx: &App,
1873 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1874 fn truncate(tool_name: &SharedString) -> SharedString {
1875 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1876 let mut truncated = tool_name.to_string();
1877 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1878 truncated.into()
1879 } else {
1880 tool_name.clone()
1881 }
1882 }
1883
1884 let mut tools = self
1885 .tools
1886 .iter()
1887 .filter_map(|(tool_name, tool)| {
1888 if tool.supported_provider(&model.provider_id())
1889 && profile.is_tool_enabled(tool_name)
1890 {
1891 Some((truncate(tool_name), tool.clone()))
1892 } else {
1893 None
1894 }
1895 })
1896 .collect::<BTreeMap<_, _>>();
1897
1898 let mut context_server_tools = Vec::new();
1899 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1900 let mut duplicate_tool_names = HashSet::default();
1901 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1902 for (tool_name, tool) in server_tools {
1903 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1904 let tool_name = truncate(tool_name);
1905 if !seen_tools.insert(tool_name.clone()) {
1906 duplicate_tool_names.insert(tool_name.clone());
1907 }
1908 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1909 }
1910 }
1911 }
1912
1913 // When there are duplicate tool names, disambiguate by prefixing them
1914 // with the server ID. In the rare case there isn't enough space for the
1915 // disambiguated tool name, keep only the last tool with this name.
1916 for (server_id, tool_name, tool) in context_server_tools {
1917 if duplicate_tool_names.contains(&tool_name) {
1918 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1919 if available >= 2 {
1920 let mut disambiguated = server_id.0.to_string();
1921 disambiguated.truncate(available - 1);
1922 disambiguated.push('_');
1923 disambiguated.push_str(&tool_name);
1924 tools.insert(disambiguated.into(), tool.clone());
1925 } else {
1926 tools.insert(tool_name, tool.clone());
1927 }
1928 } else {
1929 tools.insert(tool_name, tool.clone());
1930 }
1931 }
1932
1933 tools
1934 }
1935
1936 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1937 self.running_turn.as_ref()?.tools.get(name).cloned()
1938 }
1939
1940 fn build_request_messages(&self, cx: &App) -> Vec<LanguageModelRequestMessage> {
1941 log::trace!(
1942 "Building request messages from {} thread messages",
1943 self.messages.len()
1944 );
1945
1946 let system_prompt = SystemPromptTemplate {
1947 project: self.project_context.read(cx),
1948 available_tools: self.tools.keys().cloned().collect(),
1949 }
1950 .render(&self.templates)
1951 .context("failed to build system prompt")
1952 .expect("Invalid template");
1953 let mut messages = vec![LanguageModelRequestMessage {
1954 role: Role::System,
1955 content: vec![system_prompt.into()],
1956 cache: false,
1957 }];
1958 for message in &self.messages {
1959 messages.extend(message.to_request());
1960 }
1961
1962 if let Some(last_message) = messages.last_mut() {
1963 last_message.cache = true;
1964 }
1965
1966 if let Some(message) = self.pending_message.as_ref() {
1967 messages.extend(message.to_request());
1968 }
1969
1970 messages
1971 }
1972
1973 pub fn to_markdown(&self) -> String {
1974 let mut markdown = String::new();
1975 for (ix, message) in self.messages.iter().enumerate() {
1976 if ix > 0 {
1977 markdown.push('\n');
1978 }
1979 markdown.push_str(&message.to_markdown());
1980 }
1981
1982 if let Some(message) = self.pending_message.as_ref() {
1983 markdown.push('\n');
1984 markdown.push_str(&message.to_markdown());
1985 }
1986
1987 markdown
1988 }
1989
1990 fn advance_prompt_id(&mut self) {
1991 self.prompt_id = PromptId::new();
1992 }
1993
1994 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
1995 use LanguageModelCompletionError::*;
1996 use http_client::StatusCode;
1997
1998 // General strategy here:
1999 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2000 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2001 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2002 match error {
2003 HttpResponseError {
2004 status_code: StatusCode::TOO_MANY_REQUESTS,
2005 ..
2006 } => Some(RetryStrategy::ExponentialBackoff {
2007 initial_delay: BASE_RETRY_DELAY,
2008 max_attempts: MAX_RETRY_ATTEMPTS,
2009 }),
2010 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2011 Some(RetryStrategy::Fixed {
2012 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2013 max_attempts: MAX_RETRY_ATTEMPTS,
2014 })
2015 }
2016 UpstreamProviderError {
2017 status,
2018 retry_after,
2019 ..
2020 } => match *status {
2021 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2022 Some(RetryStrategy::Fixed {
2023 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2024 max_attempts: MAX_RETRY_ATTEMPTS,
2025 })
2026 }
2027 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2028 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2029 // Internal Server Error could be anything, retry up to 3 times.
2030 max_attempts: 3,
2031 }),
2032 status => {
2033 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2034 // but we frequently get them in practice. See https://http.dev/529
2035 if status.as_u16() == 529 {
2036 Some(RetryStrategy::Fixed {
2037 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2038 max_attempts: MAX_RETRY_ATTEMPTS,
2039 })
2040 } else {
2041 Some(RetryStrategy::Fixed {
2042 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2043 max_attempts: 2,
2044 })
2045 }
2046 }
2047 },
2048 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2049 delay: BASE_RETRY_DELAY,
2050 max_attempts: 3,
2051 }),
2052 ApiReadResponseError { .. }
2053 | HttpSend { .. }
2054 | DeserializeResponse { .. }
2055 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2056 delay: BASE_RETRY_DELAY,
2057 max_attempts: 3,
2058 }),
2059 // Retrying these errors definitely shouldn't help.
2060 HttpResponseError {
2061 status_code:
2062 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2063 ..
2064 }
2065 | AuthenticationError { .. }
2066 | PermissionError { .. }
2067 | NoApiKey { .. }
2068 | ApiEndpointNotFound { .. }
2069 | PromptTooLarge { .. } => None,
2070 // These errors might be transient, so retry them
2071 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2072 delay: BASE_RETRY_DELAY,
2073 max_attempts: 1,
2074 }),
2075 // Retry all other 4xx and 5xx errors once.
2076 HttpResponseError { status_code, .. }
2077 if status_code.is_client_error() || status_code.is_server_error() =>
2078 {
2079 Some(RetryStrategy::Fixed {
2080 delay: BASE_RETRY_DELAY,
2081 max_attempts: 3,
2082 })
2083 }
2084 Other(err)
2085 if err.is::<language_model::PaymentRequiredError>()
2086 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2087 {
2088 // Retrying won't help for Payment Required or Model Request Limit errors (where
2089 // the user must upgrade to usage-based billing to get more requests, or else wait
2090 // for a significant amount of time for the request limit to reset).
2091 None
2092 }
2093 // Conservatively assume that any other errors are non-retryable
2094 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2095 delay: BASE_RETRY_DELAY,
2096 max_attempts: 2,
2097 }),
2098 }
2099 }
2100}
2101
2102struct RunningTurn {
2103 /// Holds the task that handles agent interaction until the end of the turn.
2104 /// Survives across multiple requests as the model performs tool calls and
2105 /// we run tools, report their results.
2106 _task: Task<()>,
2107 /// The current event stream for the running turn. Used to report a final
2108 /// cancellation event if we cancel the turn.
2109 event_stream: ThreadEventStream,
2110 /// The tools that were enabled for this turn.
2111 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2112}
2113
2114impl RunningTurn {
2115 fn cancel(self) {
2116 log::debug!("Cancelling in progress turn");
2117 self.event_stream.send_canceled();
2118 }
2119}
2120
2121pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2122
2123impl EventEmitter<TokenUsageUpdated> for Thread {}
2124
2125pub struct TitleUpdated;
2126
2127impl EventEmitter<TitleUpdated> for Thread {}
2128
2129pub trait AgentTool
2130where
2131 Self: 'static + Sized,
2132{
2133 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2134 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2135
2136 fn name() -> &'static str;
2137
2138 fn description(&self) -> SharedString {
2139 let schema = schemars::schema_for!(Self::Input);
2140 SharedString::new(
2141 schema
2142 .get("description")
2143 .and_then(|description| description.as_str())
2144 .unwrap_or_default(),
2145 )
2146 }
2147
2148 fn kind() -> acp::ToolKind;
2149
2150 /// The initial tool title to display. Can be updated during the tool run.
2151 fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString;
2152
2153 /// Returns the JSON schema that describes the tool's input.
2154 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Schema {
2155 crate::tool_schema::root_schema_for::<Self::Input>(format)
2156 }
2157
2158 /// Some tools rely on a provider for the underlying billing or other reasons.
2159 /// Allow the tool to check if they are compatible, or should be filtered out.
2160 fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2161 true
2162 }
2163
2164 /// Runs the tool with the provided input.
2165 fn run(
2166 self: Arc<Self>,
2167 input: Self::Input,
2168 event_stream: ToolCallEventStream,
2169 cx: &mut App,
2170 ) -> Task<Result<Self::Output>>;
2171
2172 /// Emits events for a previous execution of the tool.
2173 fn replay(
2174 &self,
2175 _input: Self::Input,
2176 _output: Self::Output,
2177 _event_stream: ToolCallEventStream,
2178 _cx: &mut App,
2179 ) -> Result<()> {
2180 Ok(())
2181 }
2182
2183 fn erase(self) -> Arc<dyn AnyAgentTool> {
2184 Arc::new(Erased(Arc::new(self)))
2185 }
2186}
2187
2188pub struct Erased<T>(T);
2189
2190pub struct AgentToolOutput {
2191 pub llm_output: LanguageModelToolResultContent,
2192 pub raw_output: serde_json::Value,
2193}
2194
2195pub trait AnyAgentTool {
2196 fn name(&self) -> SharedString;
2197 fn description(&self) -> SharedString;
2198 fn kind(&self) -> acp::ToolKind;
2199 fn initial_title(&self, input: serde_json::Value) -> SharedString;
2200 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2201 fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2202 true
2203 }
2204 fn run(
2205 self: Arc<Self>,
2206 input: serde_json::Value,
2207 event_stream: ToolCallEventStream,
2208 cx: &mut App,
2209 ) -> Task<Result<AgentToolOutput>>;
2210 fn replay(
2211 &self,
2212 input: serde_json::Value,
2213 output: serde_json::Value,
2214 event_stream: ToolCallEventStream,
2215 cx: &mut App,
2216 ) -> Result<()>;
2217}
2218
2219impl<T> AnyAgentTool for Erased<Arc<T>>
2220where
2221 T: AgentTool,
2222{
2223 fn name(&self) -> SharedString {
2224 T::name().into()
2225 }
2226
2227 fn description(&self) -> SharedString {
2228 self.0.description()
2229 }
2230
2231 fn kind(&self) -> agent_client_protocol::ToolKind {
2232 T::kind()
2233 }
2234
2235 fn initial_title(&self, input: serde_json::Value) -> SharedString {
2236 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2237 self.0.initial_title(parsed_input)
2238 }
2239
2240 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2241 let mut json = serde_json::to_value(self.0.input_schema(format))?;
2242 adapt_schema_to_format(&mut json, format)?;
2243 Ok(json)
2244 }
2245
2246 fn supported_provider(&self, provider: &LanguageModelProviderId) -> bool {
2247 self.0.supported_provider(provider)
2248 }
2249
2250 fn run(
2251 self: Arc<Self>,
2252 input: serde_json::Value,
2253 event_stream: ToolCallEventStream,
2254 cx: &mut App,
2255 ) -> Task<Result<AgentToolOutput>> {
2256 cx.spawn(async move |cx| {
2257 let input = serde_json::from_value(input)?;
2258 let output = cx
2259 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2260 .await?;
2261 let raw_output = serde_json::to_value(&output)?;
2262 Ok(AgentToolOutput {
2263 llm_output: output.into(),
2264 raw_output,
2265 })
2266 })
2267 }
2268
2269 fn replay(
2270 &self,
2271 input: serde_json::Value,
2272 output: serde_json::Value,
2273 event_stream: ToolCallEventStream,
2274 cx: &mut App,
2275 ) -> Result<()> {
2276 let input = serde_json::from_value(input)?;
2277 let output = serde_json::from_value(output)?;
2278 self.0.replay(input, output, event_stream, cx)
2279 }
2280}
2281
2282#[derive(Clone)]
2283struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2284
2285impl ThreadEventStream {
2286 fn send_user_message(&self, message: &UserMessage) {
2287 self.0
2288 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2289 .ok();
2290 }
2291
2292 fn send_text(&self, text: &str) {
2293 self.0
2294 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2295 .ok();
2296 }
2297
2298 fn send_thinking(&self, text: &str) {
2299 self.0
2300 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2301 .ok();
2302 }
2303
2304 fn send_tool_call(
2305 &self,
2306 id: &LanguageModelToolUseId,
2307 title: SharedString,
2308 kind: acp::ToolKind,
2309 input: serde_json::Value,
2310 ) {
2311 self.0
2312 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2313 id,
2314 title.to_string(),
2315 kind,
2316 input,
2317 ))))
2318 .ok();
2319 }
2320
2321 fn initial_tool_call(
2322 id: &LanguageModelToolUseId,
2323 title: String,
2324 kind: acp::ToolKind,
2325 input: serde_json::Value,
2326 ) -> acp::ToolCall {
2327 acp::ToolCall {
2328 id: acp::ToolCallId(id.to_string().into()),
2329 title,
2330 kind,
2331 status: acp::ToolCallStatus::Pending,
2332 content: vec![],
2333 locations: vec![],
2334 raw_input: Some(input),
2335 raw_output: None,
2336 }
2337 }
2338
2339 fn update_tool_call_fields(
2340 &self,
2341 tool_use_id: &LanguageModelToolUseId,
2342 fields: acp::ToolCallUpdateFields,
2343 ) {
2344 self.0
2345 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2346 acp::ToolCallUpdate {
2347 id: acp::ToolCallId(tool_use_id.to_string().into()),
2348 fields,
2349 }
2350 .into(),
2351 )))
2352 .ok();
2353 }
2354
2355 fn send_retry(&self, status: acp_thread::RetryStatus) {
2356 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2357 }
2358
2359 fn send_stop(&self, reason: acp::StopReason) {
2360 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2361 }
2362
2363 fn send_canceled(&self) {
2364 self.0
2365 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2366 .ok();
2367 }
2368
2369 fn send_error(&self, error: impl Into<anyhow::Error>) {
2370 self.0.unbounded_send(Err(error.into())).ok();
2371 }
2372}
2373
2374#[derive(Clone)]
2375pub struct ToolCallEventStream {
2376 tool_use_id: LanguageModelToolUseId,
2377 stream: ThreadEventStream,
2378 fs: Option<Arc<dyn Fs>>,
2379}
2380
2381impl ToolCallEventStream {
2382 #[cfg(test)]
2383 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2384 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2385
2386 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2387
2388 (stream, ToolCallEventStreamReceiver(events_rx))
2389 }
2390
2391 fn new(
2392 tool_use_id: LanguageModelToolUseId,
2393 stream: ThreadEventStream,
2394 fs: Option<Arc<dyn Fs>>,
2395 ) -> Self {
2396 Self {
2397 tool_use_id,
2398 stream,
2399 fs,
2400 }
2401 }
2402
2403 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2404 self.stream
2405 .update_tool_call_fields(&self.tool_use_id, fields);
2406 }
2407
2408 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2409 self.stream
2410 .0
2411 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2412 acp_thread::ToolCallUpdateDiff {
2413 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2414 diff,
2415 }
2416 .into(),
2417 )))
2418 .ok();
2419 }
2420
2421 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2422 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2423 return Task::ready(Ok(()));
2424 }
2425
2426 let (response_tx, response_rx) = oneshot::channel();
2427 self.stream
2428 .0
2429 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2430 ToolCallAuthorization {
2431 tool_call: acp::ToolCallUpdate {
2432 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2433 fields: acp::ToolCallUpdateFields {
2434 title: Some(title.into()),
2435 ..Default::default()
2436 },
2437 },
2438 options: vec![
2439 acp::PermissionOption {
2440 id: acp::PermissionOptionId("always_allow".into()),
2441 name: "Always Allow".into(),
2442 kind: acp::PermissionOptionKind::AllowAlways,
2443 },
2444 acp::PermissionOption {
2445 id: acp::PermissionOptionId("allow".into()),
2446 name: "Allow".into(),
2447 kind: acp::PermissionOptionKind::AllowOnce,
2448 },
2449 acp::PermissionOption {
2450 id: acp::PermissionOptionId("deny".into()),
2451 name: "Deny".into(),
2452 kind: acp::PermissionOptionKind::RejectOnce,
2453 },
2454 ],
2455 response: response_tx,
2456 },
2457 )))
2458 .ok();
2459 let fs = self.fs.clone();
2460 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2461 "always_allow" => {
2462 if let Some(fs) = fs.clone() {
2463 cx.update(|cx| {
2464 update_settings_file::<AgentSettings>(fs, cx, |settings, _| {
2465 settings.set_always_allow_tool_actions(true);
2466 });
2467 })?;
2468 }
2469
2470 Ok(())
2471 }
2472 "allow" => Ok(()),
2473 _ => Err(anyhow!("Permission to run tool denied by user")),
2474 })
2475 }
2476}
2477
2478#[cfg(test)]
2479pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2480
2481#[cfg(test)]
2482impl ToolCallEventStreamReceiver {
2483 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2484 let event = self.0.next().await;
2485 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2486 auth
2487 } else {
2488 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2489 }
2490 }
2491
2492 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2493 let event = self.0.next().await;
2494 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2495 update,
2496 )))) = event
2497 {
2498 update.fields
2499 } else {
2500 panic!("Expected update fields but got: {:?}", event);
2501 }
2502 }
2503
2504 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2505 let event = self.0.next().await;
2506 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2507 update,
2508 )))) = event
2509 {
2510 update.diff
2511 } else {
2512 panic!("Expected diff but got: {:?}", event);
2513 }
2514 }
2515
2516 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2517 let event = self.0.next().await;
2518 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2519 update,
2520 )))) = event
2521 {
2522 update.terminal
2523 } else {
2524 panic!("Expected terminal but got: {:?}", event);
2525 }
2526 }
2527}
2528
2529#[cfg(test)]
2530impl std::ops::Deref for ToolCallEventStreamReceiver {
2531 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2532
2533 fn deref(&self) -> &Self::Target {
2534 &self.0
2535 }
2536}
2537
2538#[cfg(test)]
2539impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2540 fn deref_mut(&mut self) -> &mut Self::Target {
2541 &mut self.0
2542 }
2543}
2544
2545impl From<&str> for UserMessageContent {
2546 fn from(text: &str) -> Self {
2547 Self::Text(text.into())
2548 }
2549}
2550
2551impl From<acp::ContentBlock> for UserMessageContent {
2552 fn from(value: acp::ContentBlock) -> Self {
2553 match value {
2554 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2555 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2556 acp::ContentBlock::Audio(_) => {
2557 // TODO
2558 Self::Text("[audio]".to_string())
2559 }
2560 acp::ContentBlock::ResourceLink(resource_link) => {
2561 match MentionUri::parse(&resource_link.uri) {
2562 Ok(uri) => Self::Mention {
2563 uri,
2564 content: String::new(),
2565 },
2566 Err(err) => {
2567 log::error!("Failed to parse mention link: {}", err);
2568 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2569 }
2570 }
2571 }
2572 acp::ContentBlock::Resource(resource) => match resource.resource {
2573 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2574 match MentionUri::parse(&resource.uri) {
2575 Ok(uri) => Self::Mention {
2576 uri,
2577 content: resource.text,
2578 },
2579 Err(err) => {
2580 log::error!("Failed to parse mention link: {}", err);
2581 Self::Text(
2582 MarkdownCodeBlock {
2583 tag: &resource.uri,
2584 text: &resource.text,
2585 }
2586 .to_string(),
2587 )
2588 }
2589 }
2590 }
2591 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2592 // TODO
2593 Self::Text("[blob]".to_string())
2594 }
2595 },
2596 }
2597 }
2598}
2599
2600impl From<UserMessageContent> for acp::ContentBlock {
2601 fn from(content: UserMessageContent) -> Self {
2602 match content {
2603 UserMessageContent::Text(text) => acp::ContentBlock::Text(acp::TextContent {
2604 text,
2605 annotations: None,
2606 }),
2607 UserMessageContent::Image(image) => acp::ContentBlock::Image(acp::ImageContent {
2608 data: image.source.to_string(),
2609 mime_type: "image/png".to_string(),
2610 annotations: None,
2611 uri: None,
2612 }),
2613 UserMessageContent::Mention { uri, content } => {
2614 acp::ContentBlock::Resource(acp::EmbeddedResource {
2615 resource: acp::EmbeddedResourceResource::TextResourceContents(
2616 acp::TextResourceContents {
2617 mime_type: None,
2618 text: content,
2619 uri: uri.to_uri().to_string(),
2620 },
2621 ),
2622 annotations: None,
2623 })
2624 }
2625 }
2626 }
2627}
2628
2629fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2630 LanguageModelImage {
2631 source: image_content.data.into(),
2632 // TODO: make this optional?
2633 size: gpui::Size::new(0.into(), 0.into()),
2634 }
2635}