1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ReadFileTool, SystemPromptTemplate,
5 Template, Templates, TerminalTool, ThinkingTool, WebSearchTool,
6};
7use acp_thread::{MentionUri, UserMessageId};
8use action_log::ActionLog;
9use agent::thread::{GitState, ProjectSnapshot, WorktreeSnapshot};
10use agent_client_protocol as acp;
11use agent_settings::{
12 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
13 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
14};
15use anyhow::{Context as _, Result, anyhow};
16use assistant_tool::adapt_schema_to_format;
17use chrono::{DateTime, Utc};
18use client::{ModelRequestUsage, RequestUsage};
19use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
20use collections::{HashMap, HashSet, IndexMap};
21use fs::Fs;
22use futures::{
23 FutureExt,
24 channel::{mpsc, oneshot},
25 future::Shared,
26 stream::FuturesUnordered,
27};
28use git::repository::DiffType;
29use gpui::{
30 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
31};
32use language_model::{
33 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
34 LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry, LanguageModelRequest,
35 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
36 LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
37 LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
38};
39use project::{
40 Project,
41 git_store::{GitStore, RepositoryState},
42};
43use prompt_store::ProjectContext;
44use schemars::{JsonSchema, Schema};
45use serde::{Deserialize, Serialize};
46use settings::{Settings, update_settings_file};
47use smol::stream::StreamExt;
48use std::fmt::Write;
49use std::{
50 collections::BTreeMap,
51 ops::RangeInclusive,
52 path::Path,
53 sync::Arc,
54 time::{Duration, Instant},
55};
56use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock};
57use uuid::Uuid;
58
59const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
60pub const MAX_TOOL_NAME_LENGTH: usize = 64;
61
62/// The ID of the user prompt that initiated a request.
63///
64/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
65#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
66pub struct PromptId(Arc<str>);
67
68impl PromptId {
69 pub fn new() -> Self {
70 Self(Uuid::new_v4().to_string().into())
71 }
72}
73
74impl std::fmt::Display for PromptId {
75 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
76 write!(f, "{}", self.0)
77 }
78}
79
80pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
81pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
82
83#[derive(Debug, Clone)]
84enum RetryStrategy {
85 ExponentialBackoff {
86 initial_delay: Duration,
87 max_attempts: u8,
88 },
89 Fixed {
90 delay: Duration,
91 max_attempts: u8,
92 },
93}
94
95#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
96pub enum Message {
97 User(UserMessage),
98 Agent(AgentMessage),
99 Resume,
100}
101
102impl Message {
103 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
104 match self {
105 Message::Agent(agent_message) => Some(agent_message),
106 _ => None,
107 }
108 }
109
110 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
111 match self {
112 Message::User(message) => vec![message.to_request()],
113 Message::Agent(message) => message.to_request(),
114 Message::Resume => vec![LanguageModelRequestMessage {
115 role: Role::User,
116 content: vec!["Continue where you left off".into()],
117 cache: false,
118 }],
119 }
120 }
121
122 pub fn to_markdown(&self) -> String {
123 match self {
124 Message::User(message) => message.to_markdown(),
125 Message::Agent(message) => message.to_markdown(),
126 Message::Resume => "[resume]\n".into(),
127 }
128 }
129
130 pub fn role(&self) -> Role {
131 match self {
132 Message::User(_) | Message::Resume => Role::User,
133 Message::Agent(_) => Role::Assistant,
134 }
135 }
136}
137
138#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
139pub struct UserMessage {
140 pub id: UserMessageId,
141 pub content: Vec<UserMessageContent>,
142}
143
144#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
145pub enum UserMessageContent {
146 Text(String),
147 Mention { uri: MentionUri, content: String },
148 Image(LanguageModelImage),
149}
150
151impl UserMessage {
152 pub fn to_markdown(&self) -> String {
153 let mut markdown = String::from("## User\n\n");
154
155 for content in &self.content {
156 match content {
157 UserMessageContent::Text(text) => {
158 markdown.push_str(text);
159 markdown.push('\n');
160 }
161 UserMessageContent::Image(_) => {
162 markdown.push_str("<image />\n");
163 }
164 UserMessageContent::Mention { uri, content } => {
165 if !content.is_empty() {
166 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
167 } else {
168 let _ = writeln!(&mut markdown, "{}", uri.as_link());
169 }
170 }
171 }
172 }
173
174 markdown
175 }
176
177 fn to_request(&self) -> LanguageModelRequestMessage {
178 let mut message = LanguageModelRequestMessage {
179 role: Role::User,
180 content: Vec::with_capacity(self.content.len()),
181 cache: false,
182 };
183
184 const OPEN_CONTEXT: &str = "<context>\n\
185 The following items were attached by the user. \
186 They are up-to-date and don't need to be re-read.\n\n";
187
188 const OPEN_FILES_TAG: &str = "<files>";
189 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
190 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
191 const OPEN_SELECTIONS_TAG: &str = "<selections>";
192 const OPEN_THREADS_TAG: &str = "<threads>";
193 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
194 const OPEN_RULES_TAG: &str =
195 "<rules>\nThe user has specified the following rules that should be applied:\n";
196
197 let mut file_context = OPEN_FILES_TAG.to_string();
198 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
199 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
200 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
201 let mut thread_context = OPEN_THREADS_TAG.to_string();
202 let mut fetch_context = OPEN_FETCH_TAG.to_string();
203 let mut rules_context = OPEN_RULES_TAG.to_string();
204
205 for chunk in &self.content {
206 let chunk = match chunk {
207 UserMessageContent::Text(text) => {
208 language_model::MessageContent::Text(text.clone())
209 }
210 UserMessageContent::Image(value) => {
211 language_model::MessageContent::Image(value.clone())
212 }
213 UserMessageContent::Mention { uri, content } => {
214 match uri {
215 MentionUri::File { abs_path } => {
216 write!(
217 &mut file_context,
218 "\n{}",
219 MarkdownCodeBlock {
220 tag: &codeblock_tag(abs_path, None),
221 text: &content.to_string(),
222 }
223 )
224 .ok();
225 }
226 MentionUri::PastedImage => {
227 debug_panic!("pasted image URI should not be used in mention content")
228 }
229 MentionUri::Directory { .. } => {
230 write!(&mut directory_context, "\n{}\n", content).ok();
231 }
232 MentionUri::Symbol {
233 abs_path: path,
234 line_range,
235 ..
236 } => {
237 write!(
238 &mut symbol_context,
239 "\n{}",
240 MarkdownCodeBlock {
241 tag: &codeblock_tag(path, Some(line_range)),
242 text: content
243 }
244 )
245 .ok();
246 }
247 MentionUri::Selection {
248 abs_path: path,
249 line_range,
250 ..
251 } => {
252 write!(
253 &mut selection_context,
254 "\n{}",
255 MarkdownCodeBlock {
256 tag: &codeblock_tag(
257 path.as_deref().unwrap_or("Untitled".as_ref()),
258 Some(line_range)
259 ),
260 text: content
261 }
262 )
263 .ok();
264 }
265 MentionUri::Thread { .. } => {
266 write!(&mut thread_context, "\n{}\n", content).ok();
267 }
268 MentionUri::TextThread { .. } => {
269 write!(&mut thread_context, "\n{}\n", content).ok();
270 }
271 MentionUri::Rule { .. } => {
272 write!(
273 &mut rules_context,
274 "\n{}",
275 MarkdownCodeBlock {
276 tag: "",
277 text: content
278 }
279 )
280 .ok();
281 }
282 MentionUri::Fetch { url } => {
283 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
284 }
285 }
286
287 language_model::MessageContent::Text(uri.as_link().to_string())
288 }
289 };
290
291 message.content.push(chunk);
292 }
293
294 let len_before_context = message.content.len();
295
296 if file_context.len() > OPEN_FILES_TAG.len() {
297 file_context.push_str("</files>\n");
298 message
299 .content
300 .push(language_model::MessageContent::Text(file_context));
301 }
302
303 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
304 directory_context.push_str("</directories>\n");
305 message
306 .content
307 .push(language_model::MessageContent::Text(directory_context));
308 }
309
310 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
311 symbol_context.push_str("</symbols>\n");
312 message
313 .content
314 .push(language_model::MessageContent::Text(symbol_context));
315 }
316
317 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
318 selection_context.push_str("</selections>\n");
319 message
320 .content
321 .push(language_model::MessageContent::Text(selection_context));
322 }
323
324 if thread_context.len() > OPEN_THREADS_TAG.len() {
325 thread_context.push_str("</threads>\n");
326 message
327 .content
328 .push(language_model::MessageContent::Text(thread_context));
329 }
330
331 if fetch_context.len() > OPEN_FETCH_TAG.len() {
332 fetch_context.push_str("</fetched_urls>\n");
333 message
334 .content
335 .push(language_model::MessageContent::Text(fetch_context));
336 }
337
338 if rules_context.len() > OPEN_RULES_TAG.len() {
339 rules_context.push_str("</user_rules>\n");
340 message
341 .content
342 .push(language_model::MessageContent::Text(rules_context));
343 }
344
345 if message.content.len() > len_before_context {
346 message.content.insert(
347 len_before_context,
348 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
349 );
350 message
351 .content
352 .push(language_model::MessageContent::Text("</context>".into()));
353 }
354
355 message
356 }
357}
358
359fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
360 let mut result = String::new();
361
362 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
363 let _ = write!(result, "{} ", extension);
364 }
365
366 let _ = write!(result, "{}", full_path.display());
367
368 if let Some(range) = line_range {
369 if range.start() == range.end() {
370 let _ = write!(result, ":{}", range.start() + 1);
371 } else {
372 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
373 }
374 }
375
376 result
377}
378
379impl AgentMessage {
380 pub fn to_markdown(&self) -> String {
381 let mut markdown = String::from("## Assistant\n\n");
382
383 for content in &self.content {
384 match content {
385 AgentMessageContent::Text(text) => {
386 markdown.push_str(text);
387 markdown.push('\n');
388 }
389 AgentMessageContent::Thinking { text, .. } => {
390 markdown.push_str("<think>");
391 markdown.push_str(text);
392 markdown.push_str("</think>\n");
393 }
394 AgentMessageContent::RedactedThinking(_) => {
395 markdown.push_str("<redacted_thinking />\n")
396 }
397 AgentMessageContent::ToolUse(tool_use) => {
398 markdown.push_str(&format!(
399 "**Tool Use**: {} (ID: {})\n",
400 tool_use.name, tool_use.id
401 ));
402 markdown.push_str(&format!(
403 "{}\n",
404 MarkdownCodeBlock {
405 tag: "json",
406 text: &format!("{:#}", tool_use.input)
407 }
408 ));
409 }
410 }
411 }
412
413 for tool_result in self.tool_results.values() {
414 markdown.push_str(&format!(
415 "**Tool Result**: {} (ID: {})\n\n",
416 tool_result.tool_name, tool_result.tool_use_id
417 ));
418 if tool_result.is_error {
419 markdown.push_str("**ERROR:**\n");
420 }
421
422 match &tool_result.content {
423 LanguageModelToolResultContent::Text(text) => {
424 writeln!(markdown, "{text}\n").ok();
425 }
426 LanguageModelToolResultContent::Image(_) => {
427 writeln!(markdown, "<image />\n").ok();
428 }
429 }
430
431 if let Some(output) = tool_result.output.as_ref() {
432 writeln!(
433 markdown,
434 "**Debug Output**:\n\n```json\n{}\n```\n",
435 serde_json::to_string_pretty(output).unwrap()
436 )
437 .unwrap();
438 }
439 }
440
441 markdown
442 }
443
444 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
445 let mut assistant_message = LanguageModelRequestMessage {
446 role: Role::Assistant,
447 content: Vec::with_capacity(self.content.len()),
448 cache: false,
449 };
450 for chunk in &self.content {
451 match chunk {
452 AgentMessageContent::Text(text) => {
453 assistant_message
454 .content
455 .push(language_model::MessageContent::Text(text.clone()));
456 }
457 AgentMessageContent::Thinking { text, signature } => {
458 assistant_message
459 .content
460 .push(language_model::MessageContent::Thinking {
461 text: text.clone(),
462 signature: signature.clone(),
463 });
464 }
465 AgentMessageContent::RedactedThinking(value) => {
466 assistant_message.content.push(
467 language_model::MessageContent::RedactedThinking(value.clone()),
468 );
469 }
470 AgentMessageContent::ToolUse(tool_use) => {
471 if self.tool_results.contains_key(&tool_use.id) {
472 assistant_message
473 .content
474 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
475 }
476 }
477 };
478 }
479
480 let mut user_message = LanguageModelRequestMessage {
481 role: Role::User,
482 content: Vec::new(),
483 cache: false,
484 };
485
486 for tool_result in self.tool_results.values() {
487 user_message
488 .content
489 .push(language_model::MessageContent::ToolResult(
490 tool_result.clone(),
491 ));
492 }
493
494 let mut messages = Vec::new();
495 if !assistant_message.content.is_empty() {
496 messages.push(assistant_message);
497 }
498 if !user_message.content.is_empty() {
499 messages.push(user_message);
500 }
501 messages
502 }
503}
504
505#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
506pub struct AgentMessage {
507 pub content: Vec<AgentMessageContent>,
508 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
509}
510
511#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
512pub enum AgentMessageContent {
513 Text(String),
514 Thinking {
515 text: String,
516 signature: Option<String>,
517 },
518 RedactedThinking(String),
519 ToolUse(LanguageModelToolUse),
520}
521
522#[derive(Debug)]
523pub enum ThreadEvent {
524 UserMessage(UserMessage),
525 AgentText(String),
526 AgentThinking(String),
527 ToolCall(acp::ToolCall),
528 ToolCallUpdate(acp_thread::ToolCallUpdate),
529 ToolCallAuthorization(ToolCallAuthorization),
530 Retry(acp_thread::RetryStatus),
531 Stop(acp::StopReason),
532}
533
534#[derive(Debug)]
535pub struct ToolCallAuthorization {
536 pub tool_call: acp::ToolCallUpdate,
537 pub options: Vec<acp::PermissionOption>,
538 pub response: oneshot::Sender<acp::PermissionOptionId>,
539}
540
541#[derive(Debug, thiserror::Error)]
542enum CompletionError {
543 #[error("max tokens")]
544 MaxTokens,
545 #[error("refusal")]
546 Refusal,
547 #[error(transparent)]
548 Other(#[from] anyhow::Error),
549}
550
551pub struct Thread {
552 id: acp::SessionId,
553 prompt_id: PromptId,
554 updated_at: DateTime<Utc>,
555 title: Option<SharedString>,
556 pending_title_generation: Option<Task<()>>,
557 summary: Option<SharedString>,
558 messages: Vec<Message>,
559 completion_mode: CompletionMode,
560 /// Holds the task that handles agent interaction until the end of the turn.
561 /// Survives across multiple requests as the model performs tool calls and
562 /// we run tools, report their results.
563 running_turn: Option<RunningTurn>,
564 pending_message: Option<AgentMessage>,
565 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
566 tool_use_limit_reached: bool,
567 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
568 #[allow(unused)]
569 cumulative_token_usage: TokenUsage,
570 #[allow(unused)]
571 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
572 context_server_registry: Entity<ContextServerRegistry>,
573 profile_id: AgentProfileId,
574 project_context: Entity<ProjectContext>,
575 templates: Arc<Templates>,
576 model: Option<Arc<dyn LanguageModel>>,
577 summarization_model: Option<Arc<dyn LanguageModel>>,
578 pub(crate) project: Entity<Project>,
579 pub(crate) action_log: Entity<ActionLog>,
580}
581
582impl Thread {
583 pub fn new(
584 project: Entity<Project>,
585 project_context: Entity<ProjectContext>,
586 context_server_registry: Entity<ContextServerRegistry>,
587 templates: Arc<Templates>,
588 model: Option<Arc<dyn LanguageModel>>,
589 cx: &mut Context<Self>,
590 ) -> Self {
591 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
592 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
593 Self {
594 id: acp::SessionId(uuid::Uuid::new_v4().to_string().into()),
595 prompt_id: PromptId::new(),
596 updated_at: Utc::now(),
597 title: None,
598 pending_title_generation: None,
599 summary: None,
600 messages: Vec::new(),
601 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
602 running_turn: None,
603 pending_message: None,
604 tools: BTreeMap::default(),
605 tool_use_limit_reached: false,
606 request_token_usage: HashMap::default(),
607 cumulative_token_usage: TokenUsage::default(),
608 initial_project_snapshot: {
609 let project_snapshot = Self::project_snapshot(project.clone(), cx);
610 cx.foreground_executor()
611 .spawn(async move { Some(project_snapshot.await) })
612 .shared()
613 },
614 context_server_registry,
615 profile_id,
616 project_context,
617 templates,
618 model,
619 summarization_model: None,
620 project,
621 action_log,
622 }
623 }
624
625 pub fn id(&self) -> &acp::SessionId {
626 &self.id
627 }
628
629 pub fn replay(
630 &mut self,
631 cx: &mut Context<Self>,
632 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
633 let (tx, rx) = mpsc::unbounded();
634 let stream = ThreadEventStream(tx);
635 for message in &self.messages {
636 match message {
637 Message::User(user_message) => stream.send_user_message(user_message),
638 Message::Agent(assistant_message) => {
639 for content in &assistant_message.content {
640 match content {
641 AgentMessageContent::Text(text) => stream.send_text(text),
642 AgentMessageContent::Thinking { text, .. } => {
643 stream.send_thinking(text)
644 }
645 AgentMessageContent::RedactedThinking(_) => {}
646 AgentMessageContent::ToolUse(tool_use) => {
647 self.replay_tool_call(
648 tool_use,
649 assistant_message.tool_results.get(&tool_use.id),
650 &stream,
651 cx,
652 );
653 }
654 }
655 }
656 }
657 Message::Resume => {}
658 }
659 }
660 rx
661 }
662
663 fn replay_tool_call(
664 &self,
665 tool_use: &LanguageModelToolUse,
666 tool_result: Option<&LanguageModelToolResult>,
667 stream: &ThreadEventStream,
668 cx: &mut Context<Self>,
669 ) {
670 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
671 self.context_server_registry
672 .read(cx)
673 .servers()
674 .find_map(|(_, tools)| {
675 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
676 Some(tool.clone())
677 } else {
678 None
679 }
680 })
681 });
682
683 let Some(tool) = tool else {
684 stream
685 .0
686 .unbounded_send(Ok(ThreadEvent::ToolCall(acp::ToolCall {
687 id: acp::ToolCallId(tool_use.id.to_string().into()),
688 title: tool_use.name.to_string(),
689 kind: acp::ToolKind::Other,
690 status: acp::ToolCallStatus::Failed,
691 content: Vec::new(),
692 locations: Vec::new(),
693 raw_input: Some(tool_use.input.clone()),
694 raw_output: None,
695 })))
696 .ok();
697 return;
698 };
699
700 let title = tool.initial_title(tool_use.input.clone());
701 let kind = tool.kind();
702 stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
703
704 let output = tool_result
705 .as_ref()
706 .and_then(|result| result.output.clone());
707 if let Some(output) = output.clone() {
708 let tool_event_stream = ToolCallEventStream::new(
709 tool_use.id.clone(),
710 stream.clone(),
711 Some(self.project.read(cx).fs().clone()),
712 );
713 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
714 .log_err();
715 }
716
717 stream.update_tool_call_fields(
718 &tool_use.id,
719 acp::ToolCallUpdateFields {
720 status: Some(acp::ToolCallStatus::Completed),
721 raw_output: output,
722 ..Default::default()
723 },
724 );
725 }
726
727 pub fn from_db(
728 id: acp::SessionId,
729 db_thread: DbThread,
730 project: Entity<Project>,
731 project_context: Entity<ProjectContext>,
732 context_server_registry: Entity<ContextServerRegistry>,
733 action_log: Entity<ActionLog>,
734 templates: Arc<Templates>,
735 cx: &mut Context<Self>,
736 ) -> Self {
737 let profile_id = db_thread
738 .profile
739 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
740 let model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
741 db_thread
742 .model
743 .and_then(|model| {
744 let model = SelectedModel {
745 provider: model.provider.clone().into(),
746 model: model.model.into(),
747 };
748 registry.select_model(&model, cx)
749 })
750 .or_else(|| registry.default_model())
751 .map(|model| model.model)
752 });
753
754 Self {
755 id,
756 prompt_id: PromptId::new(),
757 title: if db_thread.title.is_empty() {
758 None
759 } else {
760 Some(db_thread.title.clone())
761 },
762 pending_title_generation: None,
763 summary: db_thread.detailed_summary,
764 messages: db_thread.messages,
765 completion_mode: db_thread.completion_mode.unwrap_or_default(),
766 running_turn: None,
767 pending_message: None,
768 tools: BTreeMap::default(),
769 tool_use_limit_reached: false,
770 request_token_usage: db_thread.request_token_usage.clone(),
771 cumulative_token_usage: db_thread.cumulative_token_usage,
772 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
773 context_server_registry,
774 profile_id,
775 project_context,
776 templates,
777 model,
778 summarization_model: None,
779 project,
780 action_log,
781 updated_at: db_thread.updated_at,
782 }
783 }
784
785 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
786 let initial_project_snapshot = self.initial_project_snapshot.clone();
787 let mut thread = DbThread {
788 title: self.title(),
789 messages: self.messages.clone(),
790 updated_at: self.updated_at,
791 detailed_summary: self.summary.clone(),
792 initial_project_snapshot: None,
793 cumulative_token_usage: self.cumulative_token_usage,
794 request_token_usage: self.request_token_usage.clone(),
795 model: self.model.as_ref().map(|model| DbLanguageModel {
796 provider: model.provider_id().to_string(),
797 model: model.name().0.to_string(),
798 }),
799 completion_mode: Some(self.completion_mode),
800 profile: Some(self.profile_id.clone()),
801 };
802
803 cx.background_spawn(async move {
804 let initial_project_snapshot = initial_project_snapshot.await;
805 thread.initial_project_snapshot = initial_project_snapshot;
806 thread
807 })
808 }
809
810 /// Create a snapshot of the current project state including git information and unsaved buffers.
811 fn project_snapshot(
812 project: Entity<Project>,
813 cx: &mut Context<Self>,
814 ) -> Task<Arc<agent::thread::ProjectSnapshot>> {
815 let git_store = project.read(cx).git_store().clone();
816 let worktree_snapshots: Vec<_> = project
817 .read(cx)
818 .visible_worktrees(cx)
819 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
820 .collect();
821
822 cx.spawn(async move |_, cx| {
823 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
824
825 let mut unsaved_buffers = Vec::new();
826 cx.update(|app_cx| {
827 let buffer_store = project.read(app_cx).buffer_store();
828 for buffer_handle in buffer_store.read(app_cx).buffers() {
829 let buffer = buffer_handle.read(app_cx);
830 if buffer.is_dirty()
831 && let Some(file) = buffer.file()
832 {
833 let path = file.path().to_string_lossy().to_string();
834 unsaved_buffers.push(path);
835 }
836 }
837 })
838 .ok();
839
840 Arc::new(ProjectSnapshot {
841 worktree_snapshots,
842 unsaved_buffer_paths: unsaved_buffers,
843 timestamp: Utc::now(),
844 })
845 })
846 }
847
848 fn worktree_snapshot(
849 worktree: Entity<project::Worktree>,
850 git_store: Entity<GitStore>,
851 cx: &App,
852 ) -> Task<agent::thread::WorktreeSnapshot> {
853 cx.spawn(async move |cx| {
854 // Get worktree path and snapshot
855 let worktree_info = cx.update(|app_cx| {
856 let worktree = worktree.read(app_cx);
857 let path = worktree.abs_path().to_string_lossy().to_string();
858 let snapshot = worktree.snapshot();
859 (path, snapshot)
860 });
861
862 let Ok((worktree_path, _snapshot)) = worktree_info else {
863 return WorktreeSnapshot {
864 worktree_path: String::new(),
865 git_state: None,
866 };
867 };
868
869 let git_state = git_store
870 .update(cx, |git_store, cx| {
871 git_store
872 .repositories()
873 .values()
874 .find(|repo| {
875 repo.read(cx)
876 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
877 .is_some()
878 })
879 .cloned()
880 })
881 .ok()
882 .flatten()
883 .map(|repo| {
884 repo.update(cx, |repo, _| {
885 let current_branch =
886 repo.branch.as_ref().map(|branch| branch.name().to_owned());
887 repo.send_job(None, |state, _| async move {
888 let RepositoryState::Local { backend, .. } = state else {
889 return GitState {
890 remote_url: None,
891 head_sha: None,
892 current_branch,
893 diff: None,
894 };
895 };
896
897 let remote_url = backend.remote_url("origin");
898 let head_sha = backend.head_sha().await;
899 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
900
901 GitState {
902 remote_url,
903 head_sha,
904 current_branch,
905 diff,
906 }
907 })
908 })
909 });
910
911 let git_state = match git_state {
912 Some(git_state) => match git_state.ok() {
913 Some(git_state) => git_state.await.ok(),
914 None => None,
915 },
916 None => None,
917 };
918
919 WorktreeSnapshot {
920 worktree_path,
921 git_state,
922 }
923 })
924 }
925
926 pub fn project_context(&self) -> &Entity<ProjectContext> {
927 &self.project_context
928 }
929
930 pub fn project(&self) -> &Entity<Project> {
931 &self.project
932 }
933
934 pub fn action_log(&self) -> &Entity<ActionLog> {
935 &self.action_log
936 }
937
938 pub fn is_empty(&self) -> bool {
939 self.messages.is_empty() && self.title.is_none()
940 }
941
942 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
943 self.model.as_ref()
944 }
945
946 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
947 let old_usage = self.latest_token_usage();
948 self.model = Some(model);
949 let new_usage = self.latest_token_usage();
950 if old_usage != new_usage {
951 cx.emit(TokenUsageUpdated(new_usage));
952 }
953 cx.notify()
954 }
955
956 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
957 self.summarization_model.as_ref()
958 }
959
960 pub fn set_summarization_model(
961 &mut self,
962 model: Option<Arc<dyn LanguageModel>>,
963 cx: &mut Context<Self>,
964 ) {
965 self.summarization_model = model;
966 cx.notify()
967 }
968
969 pub fn completion_mode(&self) -> CompletionMode {
970 self.completion_mode
971 }
972
973 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
974 let old_usage = self.latest_token_usage();
975 self.completion_mode = mode;
976 let new_usage = self.latest_token_usage();
977 if old_usage != new_usage {
978 cx.emit(TokenUsageUpdated(new_usage));
979 }
980 cx.notify()
981 }
982
983 #[cfg(any(test, feature = "test-support"))]
984 pub fn last_message(&self) -> Option<Message> {
985 if let Some(message) = self.pending_message.clone() {
986 Some(Message::Agent(message))
987 } else {
988 self.messages.last().cloned()
989 }
990 }
991
992 pub fn add_default_tools(&mut self, cx: &mut Context<Self>) {
993 let language_registry = self.project.read(cx).languages().clone();
994 self.add_tool(CopyPathTool::new(self.project.clone()));
995 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
996 self.add_tool(DeletePathTool::new(
997 self.project.clone(),
998 self.action_log.clone(),
999 ));
1000 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1001 self.add_tool(EditFileTool::new(cx.weak_entity(), language_registry));
1002 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1003 self.add_tool(FindPathTool::new(self.project.clone()));
1004 self.add_tool(GrepTool::new(self.project.clone()));
1005 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1006 self.add_tool(MovePathTool::new(self.project.clone()));
1007 self.add_tool(NowTool);
1008 self.add_tool(OpenTool::new(self.project.clone()));
1009 self.add_tool(ReadFileTool::new(
1010 self.project.clone(),
1011 self.action_log.clone(),
1012 ));
1013 self.add_tool(TerminalTool::new(self.project.clone(), cx));
1014 self.add_tool(ThinkingTool);
1015 self.add_tool(WebSearchTool);
1016 }
1017
1018 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1019 self.tools.insert(T::name().into(), tool.erase());
1020 }
1021
1022 pub fn remove_tool(&mut self, name: &str) -> bool {
1023 self.tools.remove(name).is_some()
1024 }
1025
1026 pub fn profile(&self) -> &AgentProfileId {
1027 &self.profile_id
1028 }
1029
1030 pub fn set_profile(&mut self, profile_id: AgentProfileId) {
1031 self.profile_id = profile_id;
1032 }
1033
1034 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1035 if let Some(running_turn) = self.running_turn.take() {
1036 running_turn.cancel();
1037 }
1038 self.flush_pending_message(cx);
1039 }
1040
1041 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1042 let Some(last_user_message) = self.last_user_message() else {
1043 return;
1044 };
1045
1046 self.request_token_usage
1047 .insert(last_user_message.id.clone(), update);
1048 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1049 cx.notify();
1050 }
1051
1052 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1053 self.cancel(cx);
1054 let Some(position) = self.messages.iter().position(
1055 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1056 ) else {
1057 return Err(anyhow!("Message not found"));
1058 };
1059
1060 for message in self.messages.drain(position..) {
1061 match message {
1062 Message::User(message) => {
1063 self.request_token_usage.remove(&message.id);
1064 }
1065 Message::Agent(_) | Message::Resume => {}
1066 }
1067 }
1068 self.summary = None;
1069 cx.notify();
1070 Ok(())
1071 }
1072
1073 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1074 let last_user_message = self.last_user_message()?;
1075 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1076 let model = self.model.clone()?;
1077
1078 Some(acp_thread::TokenUsage {
1079 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1080 used_tokens: tokens.total_tokens(),
1081 })
1082 }
1083
1084 pub fn resume(
1085 &mut self,
1086 cx: &mut Context<Self>,
1087 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1088 self.messages.push(Message::Resume);
1089 cx.notify();
1090
1091 log::debug!("Total messages in thread: {}", self.messages.len());
1092 self.run_turn(cx)
1093 }
1094
1095 /// Sending a message results in the model streaming a response, which could include tool calls.
1096 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1097 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1098 pub fn send<T>(
1099 &mut self,
1100 id: UserMessageId,
1101 content: impl IntoIterator<Item = T>,
1102 cx: &mut Context<Self>,
1103 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1104 where
1105 T: Into<UserMessageContent>,
1106 {
1107 let model = self.model().context("No language model configured")?;
1108
1109 log::info!("Thread::send called with model: {}", model.name().0);
1110 self.advance_prompt_id();
1111
1112 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1113 log::debug!("Thread::send content: {:?}", content);
1114
1115 self.messages
1116 .push(Message::User(UserMessage { id, content }));
1117 cx.notify();
1118
1119 log::debug!("Total messages in thread: {}", self.messages.len());
1120 self.run_turn(cx)
1121 }
1122
1123 fn run_turn(
1124 &mut self,
1125 cx: &mut Context<Self>,
1126 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1127 self.cancel(cx);
1128
1129 let model = self.model.clone().context("No language model configured")?;
1130 let profile = AgentSettings::get_global(cx)
1131 .profiles
1132 .get(&self.profile_id)
1133 .context("Profile not found")?;
1134 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1135 let event_stream = ThreadEventStream(events_tx);
1136 let message_ix = self.messages.len().saturating_sub(1);
1137 self.tool_use_limit_reached = false;
1138 self.summary = None;
1139 self.running_turn = Some(RunningTurn {
1140 event_stream: event_stream.clone(),
1141 tools: self.enabled_tools(profile, &model, cx),
1142 _task: cx.spawn(async move |this, cx| {
1143 log::debug!("Starting agent turn execution");
1144
1145 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1146 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1147
1148 match turn_result {
1149 Ok(()) => {
1150 log::debug!("Turn execution completed");
1151 event_stream.send_stop(acp::StopReason::EndTurn);
1152 }
1153 Err(error) => {
1154 log::error!("Turn execution failed: {:?}", error);
1155 match error.downcast::<CompletionError>() {
1156 Ok(CompletionError::Refusal) => {
1157 event_stream.send_stop(acp::StopReason::Refusal);
1158 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1159 }
1160 Ok(CompletionError::MaxTokens) => {
1161 event_stream.send_stop(acp::StopReason::MaxTokens);
1162 }
1163 Ok(CompletionError::Other(error)) | Err(error) => {
1164 event_stream.send_error(error);
1165 }
1166 }
1167 }
1168 }
1169
1170 _ = this.update(cx, |this, _| this.running_turn.take());
1171 }),
1172 });
1173 Ok(events_rx)
1174 }
1175
1176 async fn run_turn_internal(
1177 this: &WeakEntity<Self>,
1178 model: Arc<dyn LanguageModel>,
1179 event_stream: &ThreadEventStream,
1180 cx: &mut AsyncApp,
1181 ) -> Result<()> {
1182 let mut attempt = 0;
1183 let mut intent = CompletionIntent::UserPrompt;
1184 loop {
1185 let request =
1186 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1187
1188 telemetry::event!(
1189 "Agent Thread Completion",
1190 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1191 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1192 model = model.telemetry_id(),
1193 model_provider = model.provider_id().to_string(),
1194 attempt
1195 );
1196
1197 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1198 let mut events = model
1199 .stream_completion(request, cx)
1200 .await
1201 .map_err(|error| anyhow!(error))?;
1202 let mut tool_results = FuturesUnordered::new();
1203 let mut error = None;
1204 while let Some(event) = events.next().await {
1205 log::trace!("Received completion event: {:?}", event);
1206 match event {
1207 Ok(event) => {
1208 tool_results.extend(this.update(cx, |this, cx| {
1209 this.handle_completion_event(event, event_stream, cx)
1210 })??);
1211 }
1212 Err(err) => {
1213 error = Some(err);
1214 break;
1215 }
1216 }
1217 }
1218
1219 let end_turn = tool_results.is_empty();
1220 while let Some(tool_result) = tool_results.next().await {
1221 log::debug!("Tool finished {:?}", tool_result);
1222
1223 event_stream.update_tool_call_fields(
1224 &tool_result.tool_use_id,
1225 acp::ToolCallUpdateFields {
1226 status: Some(if tool_result.is_error {
1227 acp::ToolCallStatus::Failed
1228 } else {
1229 acp::ToolCallStatus::Completed
1230 }),
1231 raw_output: tool_result.output.clone(),
1232 ..Default::default()
1233 },
1234 );
1235 this.update(cx, |this, _cx| {
1236 this.pending_message()
1237 .tool_results
1238 .insert(tool_result.tool_use_id.clone(), tool_result);
1239 })?;
1240 }
1241
1242 this.update(cx, |this, cx| {
1243 this.flush_pending_message(cx);
1244 if this.title.is_none() && this.pending_title_generation.is_none() {
1245 this.generate_title(cx);
1246 }
1247 })?;
1248
1249 if let Some(error) = error {
1250 attempt += 1;
1251 let retry =
1252 this.update(cx, |this, _| this.handle_completion_error(error, attempt))??;
1253 let timer = cx.background_executor().timer(retry.duration);
1254 event_stream.send_retry(retry);
1255 timer.await;
1256 this.update(cx, |this, _cx| {
1257 if let Some(Message::Agent(message)) = this.messages.last() {
1258 if message.tool_results.is_empty() {
1259 intent = CompletionIntent::UserPrompt;
1260 this.messages.push(Message::Resume);
1261 }
1262 }
1263 })?;
1264 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1265 return Err(language_model::ToolUseLimitReachedError.into());
1266 } else if end_turn {
1267 return Ok(());
1268 } else {
1269 intent = CompletionIntent::ToolResults;
1270 attempt = 0;
1271 }
1272 }
1273 }
1274
1275 fn handle_completion_error(
1276 &mut self,
1277 error: LanguageModelCompletionError,
1278 attempt: u8,
1279 ) -> Result<acp_thread::RetryStatus> {
1280 if self.completion_mode == CompletionMode::Normal {
1281 return Err(anyhow!(error));
1282 }
1283
1284 let Some(strategy) = Self::retry_strategy_for(&error) else {
1285 return Err(anyhow!(error));
1286 };
1287
1288 let max_attempts = match &strategy {
1289 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1290 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1291 };
1292
1293 if attempt > max_attempts {
1294 return Err(anyhow!(error));
1295 }
1296
1297 let delay = match &strategy {
1298 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1299 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1300 Duration::from_secs(delay_secs)
1301 }
1302 RetryStrategy::Fixed { delay, .. } => *delay,
1303 };
1304 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1305
1306 Ok(acp_thread::RetryStatus {
1307 last_error: error.to_string().into(),
1308 attempt: attempt as usize,
1309 max_attempts: max_attempts as usize,
1310 started_at: Instant::now(),
1311 duration: delay,
1312 })
1313 }
1314
1315 /// A helper method that's called on every streamed completion event.
1316 /// Returns an optional tool result task, which the main agentic loop will
1317 /// send back to the model when it resolves.
1318 fn handle_completion_event(
1319 &mut self,
1320 event: LanguageModelCompletionEvent,
1321 event_stream: &ThreadEventStream,
1322 cx: &mut Context<Self>,
1323 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1324 log::trace!("Handling streamed completion event: {:?}", event);
1325 use LanguageModelCompletionEvent::*;
1326
1327 match event {
1328 StartMessage { .. } => {
1329 self.flush_pending_message(cx);
1330 self.pending_message = Some(AgentMessage::default());
1331 }
1332 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1333 Thinking { text, signature } => {
1334 self.handle_thinking_event(text, signature, event_stream, cx)
1335 }
1336 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1337 ToolUse(tool_use) => {
1338 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1339 }
1340 ToolUseJsonParseError {
1341 id,
1342 tool_name,
1343 raw_input,
1344 json_parse_error,
1345 } => {
1346 return Ok(Some(Task::ready(
1347 self.handle_tool_use_json_parse_error_event(
1348 id,
1349 tool_name,
1350 raw_input,
1351 json_parse_error,
1352 ),
1353 )));
1354 }
1355 UsageUpdate(usage) => {
1356 telemetry::event!(
1357 "Agent Thread Completion Usage Updated",
1358 thread_id = self.id.to_string(),
1359 prompt_id = self.prompt_id.to_string(),
1360 model = self.model.as_ref().map(|m| m.telemetry_id()),
1361 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1362 input_tokens = usage.input_tokens,
1363 output_tokens = usage.output_tokens,
1364 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1365 cache_read_input_tokens = usage.cache_read_input_tokens,
1366 );
1367 self.update_token_usage(usage, cx);
1368 }
1369 StatusUpdate(CompletionRequestStatus::UsageUpdated { amount, limit }) => {
1370 self.update_model_request_usage(amount, limit, cx);
1371 }
1372 StatusUpdate(
1373 CompletionRequestStatus::Started
1374 | CompletionRequestStatus::Queued { .. }
1375 | CompletionRequestStatus::Failed { .. },
1376 ) => {}
1377 StatusUpdate(CompletionRequestStatus::ToolUseLimitReached) => {
1378 self.tool_use_limit_reached = true;
1379 }
1380 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1381 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1382 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1383 }
1384
1385 Ok(None)
1386 }
1387
1388 fn handle_text_event(
1389 &mut self,
1390 new_text: String,
1391 event_stream: &ThreadEventStream,
1392 cx: &mut Context<Self>,
1393 ) {
1394 event_stream.send_text(&new_text);
1395
1396 let last_message = self.pending_message();
1397 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1398 text.push_str(&new_text);
1399 } else {
1400 last_message
1401 .content
1402 .push(AgentMessageContent::Text(new_text));
1403 }
1404
1405 cx.notify();
1406 }
1407
1408 fn handle_thinking_event(
1409 &mut self,
1410 new_text: String,
1411 new_signature: Option<String>,
1412 event_stream: &ThreadEventStream,
1413 cx: &mut Context<Self>,
1414 ) {
1415 event_stream.send_thinking(&new_text);
1416
1417 let last_message = self.pending_message();
1418 if let Some(AgentMessageContent::Thinking { text, signature }) =
1419 last_message.content.last_mut()
1420 {
1421 text.push_str(&new_text);
1422 *signature = new_signature.or(signature.take());
1423 } else {
1424 last_message.content.push(AgentMessageContent::Thinking {
1425 text: new_text,
1426 signature: new_signature,
1427 });
1428 }
1429
1430 cx.notify();
1431 }
1432
1433 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1434 let last_message = self.pending_message();
1435 last_message
1436 .content
1437 .push(AgentMessageContent::RedactedThinking(data));
1438 cx.notify();
1439 }
1440
1441 fn handle_tool_use_event(
1442 &mut self,
1443 tool_use: LanguageModelToolUse,
1444 event_stream: &ThreadEventStream,
1445 cx: &mut Context<Self>,
1446 ) -> Option<Task<LanguageModelToolResult>> {
1447 cx.notify();
1448
1449 let tool = self.tool(tool_use.name.as_ref());
1450 let mut title = SharedString::from(&tool_use.name);
1451 let mut kind = acp::ToolKind::Other;
1452 if let Some(tool) = tool.as_ref() {
1453 title = tool.initial_title(tool_use.input.clone());
1454 kind = tool.kind();
1455 }
1456
1457 // Ensure the last message ends in the current tool use
1458 let last_message = self.pending_message();
1459 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1460 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1461 if last_tool_use.id == tool_use.id {
1462 *last_tool_use = tool_use.clone();
1463 false
1464 } else {
1465 true
1466 }
1467 } else {
1468 true
1469 }
1470 });
1471
1472 if push_new_tool_use {
1473 event_stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
1474 last_message
1475 .content
1476 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1477 } else {
1478 event_stream.update_tool_call_fields(
1479 &tool_use.id,
1480 acp::ToolCallUpdateFields {
1481 title: Some(title.into()),
1482 kind: Some(kind),
1483 raw_input: Some(tool_use.input.clone()),
1484 ..Default::default()
1485 },
1486 );
1487 }
1488
1489 if !tool_use.is_input_complete {
1490 return None;
1491 }
1492
1493 let Some(tool) = tool else {
1494 let content = format!("No tool named {} exists", tool_use.name);
1495 return Some(Task::ready(LanguageModelToolResult {
1496 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1497 tool_use_id: tool_use.id,
1498 tool_name: tool_use.name,
1499 is_error: true,
1500 output: None,
1501 }));
1502 };
1503
1504 let fs = self.project.read(cx).fs().clone();
1505 let tool_event_stream =
1506 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1507 tool_event_stream.update_fields(acp::ToolCallUpdateFields {
1508 status: Some(acp::ToolCallStatus::InProgress),
1509 ..Default::default()
1510 });
1511 let supports_images = self.model().is_some_and(|model| model.supports_images());
1512 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1513 log::debug!("Running tool {}", tool_use.name);
1514 Some(cx.foreground_executor().spawn(async move {
1515 let tool_result = tool_result.await.and_then(|output| {
1516 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1517 && !supports_images
1518 {
1519 return Err(anyhow!(
1520 "Attempted to read an image, but this model doesn't support it.",
1521 ));
1522 }
1523 Ok(output)
1524 });
1525
1526 match tool_result {
1527 Ok(output) => LanguageModelToolResult {
1528 tool_use_id: tool_use.id,
1529 tool_name: tool_use.name,
1530 is_error: false,
1531 content: output.llm_output,
1532 output: Some(output.raw_output),
1533 },
1534 Err(error) => LanguageModelToolResult {
1535 tool_use_id: tool_use.id,
1536 tool_name: tool_use.name,
1537 is_error: true,
1538 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1539 output: None,
1540 },
1541 }
1542 }))
1543 }
1544
1545 fn handle_tool_use_json_parse_error_event(
1546 &mut self,
1547 tool_use_id: LanguageModelToolUseId,
1548 tool_name: Arc<str>,
1549 raw_input: Arc<str>,
1550 json_parse_error: String,
1551 ) -> LanguageModelToolResult {
1552 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1553 LanguageModelToolResult {
1554 tool_use_id,
1555 tool_name,
1556 is_error: true,
1557 content: LanguageModelToolResultContent::Text(tool_output.into()),
1558 output: Some(serde_json::Value::String(raw_input.to_string())),
1559 }
1560 }
1561
1562 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1563 self.project
1564 .read(cx)
1565 .user_store()
1566 .update(cx, |user_store, cx| {
1567 user_store.update_model_request_usage(
1568 ModelRequestUsage(RequestUsage {
1569 amount: amount as i32,
1570 limit,
1571 }),
1572 cx,
1573 )
1574 });
1575 }
1576
1577 pub fn title(&self) -> SharedString {
1578 self.title.clone().unwrap_or("New Thread".into())
1579 }
1580
1581 pub fn summary(&mut self, cx: &mut Context<Self>) -> Task<Result<SharedString>> {
1582 if let Some(summary) = self.summary.as_ref() {
1583 return Task::ready(Ok(summary.clone()));
1584 }
1585 let Some(model) = self.summarization_model.clone() else {
1586 return Task::ready(Err(anyhow!("No summarization model available")));
1587 };
1588 let mut request = LanguageModelRequest {
1589 intent: Some(CompletionIntent::ThreadContextSummarization),
1590 temperature: AgentSettings::temperature_for_model(&model, cx),
1591 ..Default::default()
1592 };
1593
1594 for message in &self.messages {
1595 request.messages.extend(message.to_request());
1596 }
1597
1598 request.messages.push(LanguageModelRequestMessage {
1599 role: Role::User,
1600 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1601 cache: false,
1602 });
1603 cx.spawn(async move |this, cx| {
1604 let mut summary = String::new();
1605 let mut messages = model.stream_completion(request, cx).await?;
1606 while let Some(event) = messages.next().await {
1607 let event = event?;
1608 let text = match event {
1609 LanguageModelCompletionEvent::Text(text) => text,
1610 LanguageModelCompletionEvent::StatusUpdate(
1611 CompletionRequestStatus::UsageUpdated { amount, limit },
1612 ) => {
1613 this.update(cx, |thread, cx| {
1614 thread.update_model_request_usage(amount, limit, cx);
1615 })?;
1616 continue;
1617 }
1618 _ => continue,
1619 };
1620
1621 let mut lines = text.lines();
1622 summary.extend(lines.next());
1623 }
1624
1625 log::debug!("Setting summary: {}", summary);
1626 let summary = SharedString::from(summary);
1627
1628 this.update(cx, |this, cx| {
1629 this.summary = Some(summary.clone());
1630 cx.notify()
1631 })?;
1632
1633 Ok(summary)
1634 })
1635 }
1636
1637 fn generate_title(&mut self, cx: &mut Context<Self>) {
1638 let Some(model) = self.summarization_model.clone() else {
1639 return;
1640 };
1641
1642 log::debug!(
1643 "Generating title with model: {:?}",
1644 self.summarization_model.as_ref().map(|model| model.name())
1645 );
1646 let mut request = LanguageModelRequest {
1647 intent: Some(CompletionIntent::ThreadSummarization),
1648 temperature: AgentSettings::temperature_for_model(&model, cx),
1649 ..Default::default()
1650 };
1651
1652 for message in &self.messages {
1653 request.messages.extend(message.to_request());
1654 }
1655
1656 request.messages.push(LanguageModelRequestMessage {
1657 role: Role::User,
1658 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1659 cache: false,
1660 });
1661 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1662 let mut title = String::new();
1663
1664 let generate = async {
1665 let mut messages = model.stream_completion(request, cx).await?;
1666 while let Some(event) = messages.next().await {
1667 let event = event?;
1668 let text = match event {
1669 LanguageModelCompletionEvent::Text(text) => text,
1670 LanguageModelCompletionEvent::StatusUpdate(
1671 CompletionRequestStatus::UsageUpdated { amount, limit },
1672 ) => {
1673 this.update(cx, |thread, cx| {
1674 thread.update_model_request_usage(amount, limit, cx);
1675 })?;
1676 continue;
1677 }
1678 _ => continue,
1679 };
1680
1681 let mut lines = text.lines();
1682 title.extend(lines.next());
1683
1684 // Stop if the LLM generated multiple lines.
1685 if lines.next().is_some() {
1686 break;
1687 }
1688 }
1689 anyhow::Ok(())
1690 };
1691
1692 if generate.await.context("failed to generate title").is_ok() {
1693 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1694 }
1695 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1696 }));
1697 }
1698
1699 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1700 self.pending_title_generation = None;
1701 if Some(&title) != self.title.as_ref() {
1702 self.title = Some(title);
1703 cx.emit(TitleUpdated);
1704 cx.notify();
1705 }
1706 }
1707
1708 fn last_user_message(&self) -> Option<&UserMessage> {
1709 self.messages
1710 .iter()
1711 .rev()
1712 .find_map(|message| match message {
1713 Message::User(user_message) => Some(user_message),
1714 Message::Agent(_) => None,
1715 Message::Resume => None,
1716 })
1717 }
1718
1719 fn pending_message(&mut self) -> &mut AgentMessage {
1720 self.pending_message.get_or_insert_default()
1721 }
1722
1723 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1724 let Some(mut message) = self.pending_message.take() else {
1725 return;
1726 };
1727
1728 if message.content.is_empty() {
1729 return;
1730 }
1731
1732 for content in &message.content {
1733 let AgentMessageContent::ToolUse(tool_use) = content else {
1734 continue;
1735 };
1736
1737 if !message.tool_results.contains_key(&tool_use.id) {
1738 message.tool_results.insert(
1739 tool_use.id.clone(),
1740 LanguageModelToolResult {
1741 tool_use_id: tool_use.id.clone(),
1742 tool_name: tool_use.name.clone(),
1743 is_error: true,
1744 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1745 output: None,
1746 },
1747 );
1748 }
1749 }
1750
1751 self.messages.push(Message::Agent(message));
1752 self.updated_at = Utc::now();
1753 self.summary = None;
1754 cx.notify()
1755 }
1756
1757 pub(crate) fn build_completion_request(
1758 &self,
1759 completion_intent: CompletionIntent,
1760 cx: &App,
1761 ) -> Result<LanguageModelRequest> {
1762 let model = self.model().context("No language model configured")?;
1763 let tools = if let Some(turn) = self.running_turn.as_ref() {
1764 turn.tools
1765 .iter()
1766 .filter_map(|(tool_name, tool)| {
1767 log::trace!("Including tool: {}", tool_name);
1768 Some(LanguageModelRequestTool {
1769 name: tool_name.to_string(),
1770 description: tool.description().to_string(),
1771 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1772 })
1773 })
1774 .collect::<Vec<_>>()
1775 } else {
1776 Vec::new()
1777 };
1778
1779 log::debug!("Building completion request");
1780 log::debug!("Completion intent: {:?}", completion_intent);
1781 log::debug!("Completion mode: {:?}", self.completion_mode);
1782
1783 let messages = self.build_request_messages(cx);
1784 log::debug!("Request will include {} messages", messages.len());
1785 log::debug!("Request includes {} tools", tools.len());
1786
1787 let request = LanguageModelRequest {
1788 thread_id: Some(self.id.to_string()),
1789 prompt_id: Some(self.prompt_id.to_string()),
1790 intent: Some(completion_intent),
1791 mode: Some(self.completion_mode.into()),
1792 messages,
1793 tools,
1794 tool_choice: None,
1795 stop: Vec::new(),
1796 temperature: AgentSettings::temperature_for_model(model, cx),
1797 thinking_allowed: true,
1798 };
1799
1800 log::debug!("Completion request built successfully");
1801 Ok(request)
1802 }
1803
1804 fn enabled_tools(
1805 &self,
1806 profile: &AgentProfileSettings,
1807 model: &Arc<dyn LanguageModel>,
1808 cx: &App,
1809 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1810 fn truncate(tool_name: &SharedString) -> SharedString {
1811 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1812 let mut truncated = tool_name.to_string();
1813 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1814 truncated.into()
1815 } else {
1816 tool_name.clone()
1817 }
1818 }
1819
1820 let mut tools = self
1821 .tools
1822 .iter()
1823 .filter_map(|(tool_name, tool)| {
1824 if tool.supported_provider(&model.provider_id())
1825 && profile.is_tool_enabled(tool_name)
1826 {
1827 Some((truncate(tool_name), tool.clone()))
1828 } else {
1829 None
1830 }
1831 })
1832 .collect::<BTreeMap<_, _>>();
1833
1834 let mut context_server_tools = Vec::new();
1835 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1836 let mut duplicate_tool_names = HashSet::default();
1837 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1838 for (tool_name, tool) in server_tools {
1839 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1840 let tool_name = truncate(tool_name);
1841 if !seen_tools.insert(tool_name.clone()) {
1842 duplicate_tool_names.insert(tool_name.clone());
1843 }
1844 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1845 }
1846 }
1847 }
1848
1849 // When there are duplicate tool names, disambiguate by prefixing them
1850 // with the server ID. In the rare case there isn't enough space for the
1851 // disambiguated tool name, keep only the last tool with this name.
1852 for (server_id, tool_name, tool) in context_server_tools {
1853 if duplicate_tool_names.contains(&tool_name) {
1854 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1855 if available >= 2 {
1856 let mut disambiguated = server_id.0.to_string();
1857 disambiguated.truncate(available - 1);
1858 disambiguated.push('_');
1859 disambiguated.push_str(&tool_name);
1860 tools.insert(disambiguated.into(), tool.clone());
1861 } else {
1862 tools.insert(tool_name, tool.clone());
1863 }
1864 } else {
1865 tools.insert(tool_name, tool.clone());
1866 }
1867 }
1868
1869 tools
1870 }
1871
1872 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1873 self.running_turn.as_ref()?.tools.get(name).cloned()
1874 }
1875
1876 fn build_request_messages(&self, cx: &App) -> Vec<LanguageModelRequestMessage> {
1877 log::trace!(
1878 "Building request messages from {} thread messages",
1879 self.messages.len()
1880 );
1881
1882 let system_prompt = SystemPromptTemplate {
1883 project: self.project_context.read(cx),
1884 available_tools: self.tools.keys().cloned().collect(),
1885 }
1886 .render(&self.templates)
1887 .context("failed to build system prompt")
1888 .expect("Invalid template");
1889 let mut messages = vec![LanguageModelRequestMessage {
1890 role: Role::System,
1891 content: vec![system_prompt.into()],
1892 cache: false,
1893 }];
1894 for message in &self.messages {
1895 messages.extend(message.to_request());
1896 }
1897
1898 if let Some(last_message) = messages.last_mut() {
1899 last_message.cache = true;
1900 }
1901
1902 if let Some(message) = self.pending_message.as_ref() {
1903 messages.extend(message.to_request());
1904 }
1905
1906 messages
1907 }
1908
1909 pub fn to_markdown(&self) -> String {
1910 let mut markdown = String::new();
1911 for (ix, message) in self.messages.iter().enumerate() {
1912 if ix > 0 {
1913 markdown.push('\n');
1914 }
1915 markdown.push_str(&message.to_markdown());
1916 }
1917
1918 if let Some(message) = self.pending_message.as_ref() {
1919 markdown.push('\n');
1920 markdown.push_str(&message.to_markdown());
1921 }
1922
1923 markdown
1924 }
1925
1926 fn advance_prompt_id(&mut self) {
1927 self.prompt_id = PromptId::new();
1928 }
1929
1930 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
1931 use LanguageModelCompletionError::*;
1932 use http_client::StatusCode;
1933
1934 // General strategy here:
1935 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
1936 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
1937 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
1938 match error {
1939 HttpResponseError {
1940 status_code: StatusCode::TOO_MANY_REQUESTS,
1941 ..
1942 } => Some(RetryStrategy::ExponentialBackoff {
1943 initial_delay: BASE_RETRY_DELAY,
1944 max_attempts: MAX_RETRY_ATTEMPTS,
1945 }),
1946 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
1947 Some(RetryStrategy::Fixed {
1948 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
1949 max_attempts: MAX_RETRY_ATTEMPTS,
1950 })
1951 }
1952 UpstreamProviderError {
1953 status,
1954 retry_after,
1955 ..
1956 } => match *status {
1957 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
1958 Some(RetryStrategy::Fixed {
1959 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
1960 max_attempts: MAX_RETRY_ATTEMPTS,
1961 })
1962 }
1963 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
1964 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
1965 // Internal Server Error could be anything, retry up to 3 times.
1966 max_attempts: 3,
1967 }),
1968 status => {
1969 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
1970 // but we frequently get them in practice. See https://http.dev/529
1971 if status.as_u16() == 529 {
1972 Some(RetryStrategy::Fixed {
1973 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
1974 max_attempts: MAX_RETRY_ATTEMPTS,
1975 })
1976 } else {
1977 Some(RetryStrategy::Fixed {
1978 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
1979 max_attempts: 2,
1980 })
1981 }
1982 }
1983 },
1984 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
1985 delay: BASE_RETRY_DELAY,
1986 max_attempts: 3,
1987 }),
1988 ApiReadResponseError { .. }
1989 | HttpSend { .. }
1990 | DeserializeResponse { .. }
1991 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
1992 delay: BASE_RETRY_DELAY,
1993 max_attempts: 3,
1994 }),
1995 // Retrying these errors definitely shouldn't help.
1996 HttpResponseError {
1997 status_code:
1998 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
1999 ..
2000 }
2001 | AuthenticationError { .. }
2002 | PermissionError { .. }
2003 | NoApiKey { .. }
2004 | ApiEndpointNotFound { .. }
2005 | PromptTooLarge { .. } => None,
2006 // These errors might be transient, so retry them
2007 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2008 delay: BASE_RETRY_DELAY,
2009 max_attempts: 1,
2010 }),
2011 // Retry all other 4xx and 5xx errors once.
2012 HttpResponseError { status_code, .. }
2013 if status_code.is_client_error() || status_code.is_server_error() =>
2014 {
2015 Some(RetryStrategy::Fixed {
2016 delay: BASE_RETRY_DELAY,
2017 max_attempts: 3,
2018 })
2019 }
2020 Other(err)
2021 if err.is::<language_model::PaymentRequiredError>()
2022 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2023 {
2024 // Retrying won't help for Payment Required or Model Request Limit errors (where
2025 // the user must upgrade to usage-based billing to get more requests, or else wait
2026 // for a significant amount of time for the request limit to reset).
2027 None
2028 }
2029 // Conservatively assume that any other errors are non-retryable
2030 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2031 delay: BASE_RETRY_DELAY,
2032 max_attempts: 2,
2033 }),
2034 }
2035 }
2036}
2037
2038struct RunningTurn {
2039 /// Holds the task that handles agent interaction until the end of the turn.
2040 /// Survives across multiple requests as the model performs tool calls and
2041 /// we run tools, report their results.
2042 _task: Task<()>,
2043 /// The current event stream for the running turn. Used to report a final
2044 /// cancellation event if we cancel the turn.
2045 event_stream: ThreadEventStream,
2046 /// The tools that were enabled for this turn.
2047 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2048}
2049
2050impl RunningTurn {
2051 fn cancel(self) {
2052 log::debug!("Cancelling in progress turn");
2053 self.event_stream.send_canceled();
2054 }
2055}
2056
2057pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2058
2059impl EventEmitter<TokenUsageUpdated> for Thread {}
2060
2061pub struct TitleUpdated;
2062
2063impl EventEmitter<TitleUpdated> for Thread {}
2064
2065pub trait AgentTool
2066where
2067 Self: 'static + Sized,
2068{
2069 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2070 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2071
2072 fn name() -> &'static str;
2073
2074 fn description(&self) -> SharedString {
2075 let schema = schemars::schema_for!(Self::Input);
2076 SharedString::new(
2077 schema
2078 .get("description")
2079 .and_then(|description| description.as_str())
2080 .unwrap_or_default(),
2081 )
2082 }
2083
2084 fn kind() -> acp::ToolKind;
2085
2086 /// The initial tool title to display. Can be updated during the tool run.
2087 fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString;
2088
2089 /// Returns the JSON schema that describes the tool's input.
2090 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Schema {
2091 crate::tool_schema::root_schema_for::<Self::Input>(format)
2092 }
2093
2094 /// Some tools rely on a provider for the underlying billing or other reasons.
2095 /// Allow the tool to check if they are compatible, or should be filtered out.
2096 fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2097 true
2098 }
2099
2100 /// Runs the tool with the provided input.
2101 fn run(
2102 self: Arc<Self>,
2103 input: Self::Input,
2104 event_stream: ToolCallEventStream,
2105 cx: &mut App,
2106 ) -> Task<Result<Self::Output>>;
2107
2108 /// Emits events for a previous execution of the tool.
2109 fn replay(
2110 &self,
2111 _input: Self::Input,
2112 _output: Self::Output,
2113 _event_stream: ToolCallEventStream,
2114 _cx: &mut App,
2115 ) -> Result<()> {
2116 Ok(())
2117 }
2118
2119 fn erase(self) -> Arc<dyn AnyAgentTool> {
2120 Arc::new(Erased(Arc::new(self)))
2121 }
2122}
2123
2124pub struct Erased<T>(T);
2125
2126pub struct AgentToolOutput {
2127 pub llm_output: LanguageModelToolResultContent,
2128 pub raw_output: serde_json::Value,
2129}
2130
2131pub trait AnyAgentTool {
2132 fn name(&self) -> SharedString;
2133 fn description(&self) -> SharedString;
2134 fn kind(&self) -> acp::ToolKind;
2135 fn initial_title(&self, input: serde_json::Value) -> SharedString;
2136 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2137 fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2138 true
2139 }
2140 fn run(
2141 self: Arc<Self>,
2142 input: serde_json::Value,
2143 event_stream: ToolCallEventStream,
2144 cx: &mut App,
2145 ) -> Task<Result<AgentToolOutput>>;
2146 fn replay(
2147 &self,
2148 input: serde_json::Value,
2149 output: serde_json::Value,
2150 event_stream: ToolCallEventStream,
2151 cx: &mut App,
2152 ) -> Result<()>;
2153}
2154
2155impl<T> AnyAgentTool for Erased<Arc<T>>
2156where
2157 T: AgentTool,
2158{
2159 fn name(&self) -> SharedString {
2160 T::name().into()
2161 }
2162
2163 fn description(&self) -> SharedString {
2164 self.0.description()
2165 }
2166
2167 fn kind(&self) -> agent_client_protocol::ToolKind {
2168 T::kind()
2169 }
2170
2171 fn initial_title(&self, input: serde_json::Value) -> SharedString {
2172 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2173 self.0.initial_title(parsed_input)
2174 }
2175
2176 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2177 let mut json = serde_json::to_value(self.0.input_schema(format))?;
2178 adapt_schema_to_format(&mut json, format)?;
2179 Ok(json)
2180 }
2181
2182 fn supported_provider(&self, provider: &LanguageModelProviderId) -> bool {
2183 self.0.supported_provider(provider)
2184 }
2185
2186 fn run(
2187 self: Arc<Self>,
2188 input: serde_json::Value,
2189 event_stream: ToolCallEventStream,
2190 cx: &mut App,
2191 ) -> Task<Result<AgentToolOutput>> {
2192 cx.spawn(async move |cx| {
2193 let input = serde_json::from_value(input)?;
2194 let output = cx
2195 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2196 .await?;
2197 let raw_output = serde_json::to_value(&output)?;
2198 Ok(AgentToolOutput {
2199 llm_output: output.into(),
2200 raw_output,
2201 })
2202 })
2203 }
2204
2205 fn replay(
2206 &self,
2207 input: serde_json::Value,
2208 output: serde_json::Value,
2209 event_stream: ToolCallEventStream,
2210 cx: &mut App,
2211 ) -> Result<()> {
2212 let input = serde_json::from_value(input)?;
2213 let output = serde_json::from_value(output)?;
2214 self.0.replay(input, output, event_stream, cx)
2215 }
2216}
2217
2218#[derive(Clone)]
2219struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2220
2221impl ThreadEventStream {
2222 fn send_user_message(&self, message: &UserMessage) {
2223 self.0
2224 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2225 .ok();
2226 }
2227
2228 fn send_text(&self, text: &str) {
2229 self.0
2230 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2231 .ok();
2232 }
2233
2234 fn send_thinking(&self, text: &str) {
2235 self.0
2236 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2237 .ok();
2238 }
2239
2240 fn send_tool_call(
2241 &self,
2242 id: &LanguageModelToolUseId,
2243 title: SharedString,
2244 kind: acp::ToolKind,
2245 input: serde_json::Value,
2246 ) {
2247 self.0
2248 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2249 id,
2250 title.to_string(),
2251 kind,
2252 input,
2253 ))))
2254 .ok();
2255 }
2256
2257 fn initial_tool_call(
2258 id: &LanguageModelToolUseId,
2259 title: String,
2260 kind: acp::ToolKind,
2261 input: serde_json::Value,
2262 ) -> acp::ToolCall {
2263 acp::ToolCall {
2264 id: acp::ToolCallId(id.to_string().into()),
2265 title,
2266 kind,
2267 status: acp::ToolCallStatus::Pending,
2268 content: vec![],
2269 locations: vec![],
2270 raw_input: Some(input),
2271 raw_output: None,
2272 }
2273 }
2274
2275 fn update_tool_call_fields(
2276 &self,
2277 tool_use_id: &LanguageModelToolUseId,
2278 fields: acp::ToolCallUpdateFields,
2279 ) {
2280 self.0
2281 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2282 acp::ToolCallUpdate {
2283 id: acp::ToolCallId(tool_use_id.to_string().into()),
2284 fields,
2285 }
2286 .into(),
2287 )))
2288 .ok();
2289 }
2290
2291 fn send_retry(&self, status: acp_thread::RetryStatus) {
2292 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2293 }
2294
2295 fn send_stop(&self, reason: acp::StopReason) {
2296 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2297 }
2298
2299 fn send_canceled(&self) {
2300 self.0
2301 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2302 .ok();
2303 }
2304
2305 fn send_error(&self, error: impl Into<anyhow::Error>) {
2306 self.0.unbounded_send(Err(error.into())).ok();
2307 }
2308}
2309
2310#[derive(Clone)]
2311pub struct ToolCallEventStream {
2312 tool_use_id: LanguageModelToolUseId,
2313 stream: ThreadEventStream,
2314 fs: Option<Arc<dyn Fs>>,
2315}
2316
2317impl ToolCallEventStream {
2318 #[cfg(test)]
2319 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2320 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2321
2322 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2323
2324 (stream, ToolCallEventStreamReceiver(events_rx))
2325 }
2326
2327 fn new(
2328 tool_use_id: LanguageModelToolUseId,
2329 stream: ThreadEventStream,
2330 fs: Option<Arc<dyn Fs>>,
2331 ) -> Self {
2332 Self {
2333 tool_use_id,
2334 stream,
2335 fs,
2336 }
2337 }
2338
2339 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2340 self.stream
2341 .update_tool_call_fields(&self.tool_use_id, fields);
2342 }
2343
2344 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2345 self.stream
2346 .0
2347 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2348 acp_thread::ToolCallUpdateDiff {
2349 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2350 diff,
2351 }
2352 .into(),
2353 )))
2354 .ok();
2355 }
2356
2357 pub fn update_terminal(&self, terminal: Entity<acp_thread::Terminal>) {
2358 self.stream
2359 .0
2360 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2361 acp_thread::ToolCallUpdateTerminal {
2362 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2363 terminal,
2364 }
2365 .into(),
2366 )))
2367 .ok();
2368 }
2369
2370 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2371 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2372 return Task::ready(Ok(()));
2373 }
2374
2375 let (response_tx, response_rx) = oneshot::channel();
2376 self.stream
2377 .0
2378 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2379 ToolCallAuthorization {
2380 tool_call: acp::ToolCallUpdate {
2381 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2382 fields: acp::ToolCallUpdateFields {
2383 title: Some(title.into()),
2384 ..Default::default()
2385 },
2386 },
2387 options: vec![
2388 acp::PermissionOption {
2389 id: acp::PermissionOptionId("always_allow".into()),
2390 name: "Always Allow".into(),
2391 kind: acp::PermissionOptionKind::AllowAlways,
2392 },
2393 acp::PermissionOption {
2394 id: acp::PermissionOptionId("allow".into()),
2395 name: "Allow".into(),
2396 kind: acp::PermissionOptionKind::AllowOnce,
2397 },
2398 acp::PermissionOption {
2399 id: acp::PermissionOptionId("deny".into()),
2400 name: "Deny".into(),
2401 kind: acp::PermissionOptionKind::RejectOnce,
2402 },
2403 ],
2404 response: response_tx,
2405 },
2406 )))
2407 .ok();
2408 let fs = self.fs.clone();
2409 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2410 "always_allow" => {
2411 if let Some(fs) = fs.clone() {
2412 cx.update(|cx| {
2413 update_settings_file::<AgentSettings>(fs, cx, |settings, _| {
2414 settings.set_always_allow_tool_actions(true);
2415 });
2416 })?;
2417 }
2418
2419 Ok(())
2420 }
2421 "allow" => Ok(()),
2422 _ => Err(anyhow!("Permission to run tool denied by user")),
2423 })
2424 }
2425}
2426
2427#[cfg(test)]
2428pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2429
2430#[cfg(test)]
2431impl ToolCallEventStreamReceiver {
2432 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2433 let event = self.0.next().await;
2434 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2435 auth
2436 } else {
2437 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2438 }
2439 }
2440
2441 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2442 let event = self.0.next().await;
2443 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2444 update,
2445 )))) = event
2446 {
2447 update.terminal
2448 } else {
2449 panic!("Expected terminal but got: {:?}", event);
2450 }
2451 }
2452}
2453
2454#[cfg(test)]
2455impl std::ops::Deref for ToolCallEventStreamReceiver {
2456 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2457
2458 fn deref(&self) -> &Self::Target {
2459 &self.0
2460 }
2461}
2462
2463#[cfg(test)]
2464impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2465 fn deref_mut(&mut self) -> &mut Self::Target {
2466 &mut self.0
2467 }
2468}
2469
2470impl From<&str> for UserMessageContent {
2471 fn from(text: &str) -> Self {
2472 Self::Text(text.into())
2473 }
2474}
2475
2476impl From<acp::ContentBlock> for UserMessageContent {
2477 fn from(value: acp::ContentBlock) -> Self {
2478 match value {
2479 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2480 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2481 acp::ContentBlock::Audio(_) => {
2482 // TODO
2483 Self::Text("[audio]".to_string())
2484 }
2485 acp::ContentBlock::ResourceLink(resource_link) => {
2486 match MentionUri::parse(&resource_link.uri) {
2487 Ok(uri) => Self::Mention {
2488 uri,
2489 content: String::new(),
2490 },
2491 Err(err) => {
2492 log::error!("Failed to parse mention link: {}", err);
2493 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2494 }
2495 }
2496 }
2497 acp::ContentBlock::Resource(resource) => match resource.resource {
2498 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2499 match MentionUri::parse(&resource.uri) {
2500 Ok(uri) => Self::Mention {
2501 uri,
2502 content: resource.text,
2503 },
2504 Err(err) => {
2505 log::error!("Failed to parse mention link: {}", err);
2506 Self::Text(
2507 MarkdownCodeBlock {
2508 tag: &resource.uri,
2509 text: &resource.text,
2510 }
2511 .to_string(),
2512 )
2513 }
2514 }
2515 }
2516 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2517 // TODO
2518 Self::Text("[blob]".to_string())
2519 }
2520 },
2521 }
2522 }
2523}
2524
2525impl From<UserMessageContent> for acp::ContentBlock {
2526 fn from(content: UserMessageContent) -> Self {
2527 match content {
2528 UserMessageContent::Text(text) => acp::ContentBlock::Text(acp::TextContent {
2529 text,
2530 annotations: None,
2531 }),
2532 UserMessageContent::Image(image) => acp::ContentBlock::Image(acp::ImageContent {
2533 data: image.source.to_string(),
2534 mime_type: "image/png".to_string(),
2535 annotations: None,
2536 uri: None,
2537 }),
2538 UserMessageContent::Mention { uri, content } => {
2539 acp::ContentBlock::Resource(acp::EmbeddedResource {
2540 resource: acp::EmbeddedResourceResource::TextResourceContents(
2541 acp::TextResourceContents {
2542 mime_type: None,
2543 text: content,
2544 uri: uri.to_uri().to_string(),
2545 },
2546 ),
2547 annotations: None,
2548 })
2549 }
2550 }
2551 }
2552}
2553
2554fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2555 LanguageModelImage {
2556 source: image_content.data.into(),
2557 // TODO: make this optional?
2558 size: gpui::Size::new(0.into(), 0.into()),
2559 }
2560}