1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ReadFileTool, SystemPromptTemplate,
5 Template, Templates, TerminalTool, ThinkingTool, WebSearchTool,
6};
7use acp_thread::{MentionUri, UserMessageId};
8use action_log::ActionLog;
9use agent::thread::{GitState, ProjectSnapshot, WorktreeSnapshot};
10use agent_client_protocol as acp;
11use agent_settings::{
12 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
13 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
14};
15use anyhow::{Context as _, Result, anyhow};
16use assistant_tool::adapt_schema_to_format;
17use chrono::{DateTime, Utc};
18use client::{ModelRequestUsage, RequestUsage};
19use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
20use collections::{HashMap, HashSet, IndexMap};
21use fs::Fs;
22use futures::{
23 FutureExt,
24 channel::{mpsc, oneshot},
25 future::Shared,
26 stream::FuturesUnordered,
27};
28use git::repository::DiffType;
29use gpui::{
30 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
31};
32use language_model::{
33 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
34 LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry, LanguageModelRequest,
35 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
36 LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
37 LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
38};
39use project::{
40 Project,
41 git_store::{GitStore, RepositoryState},
42};
43use prompt_store::ProjectContext;
44use schemars::{JsonSchema, Schema};
45use serde::{Deserialize, Serialize};
46use settings::{Settings, update_settings_file};
47use smol::stream::StreamExt;
48use std::fmt::Write;
49use std::{
50 collections::BTreeMap,
51 ops::RangeInclusive,
52 path::Path,
53 sync::Arc,
54 time::{Duration, Instant},
55};
56use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock};
57use uuid::Uuid;
58
59const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
60pub const MAX_TOOL_NAME_LENGTH: usize = 64;
61
62/// The ID of the user prompt that initiated a request.
63///
64/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
65#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
66pub struct PromptId(Arc<str>);
67
68impl PromptId {
69 pub fn new() -> Self {
70 Self(Uuid::new_v4().to_string().into())
71 }
72}
73
74impl std::fmt::Display for PromptId {
75 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
76 write!(f, "{}", self.0)
77 }
78}
79
80pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
81pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
82
83#[derive(Debug, Clone)]
84enum RetryStrategy {
85 ExponentialBackoff {
86 initial_delay: Duration,
87 max_attempts: u8,
88 },
89 Fixed {
90 delay: Duration,
91 max_attempts: u8,
92 },
93}
94
95#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
96pub enum Message {
97 User(UserMessage),
98 Agent(AgentMessage),
99 Resume,
100}
101
102impl Message {
103 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
104 match self {
105 Message::Agent(agent_message) => Some(agent_message),
106 _ => None,
107 }
108 }
109
110 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
111 match self {
112 Message::User(message) => vec![message.to_request()],
113 Message::Agent(message) => message.to_request(),
114 Message::Resume => vec![LanguageModelRequestMessage {
115 role: Role::User,
116 content: vec!["Continue where you left off".into()],
117 cache: false,
118 }],
119 }
120 }
121
122 pub fn to_markdown(&self) -> String {
123 match self {
124 Message::User(message) => message.to_markdown(),
125 Message::Agent(message) => message.to_markdown(),
126 Message::Resume => "[resume]\n".into(),
127 }
128 }
129
130 pub fn role(&self) -> Role {
131 match self {
132 Message::User(_) | Message::Resume => Role::User,
133 Message::Agent(_) => Role::Assistant,
134 }
135 }
136}
137
138#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
139pub struct UserMessage {
140 pub id: UserMessageId,
141 pub content: Vec<UserMessageContent>,
142}
143
144#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
145pub enum UserMessageContent {
146 Text(String),
147 Mention { uri: MentionUri, content: String },
148 Image(LanguageModelImage),
149}
150
151impl UserMessage {
152 pub fn to_markdown(&self) -> String {
153 let mut markdown = String::from("## User\n\n");
154
155 for content in &self.content {
156 match content {
157 UserMessageContent::Text(text) => {
158 markdown.push_str(text);
159 markdown.push('\n');
160 }
161 UserMessageContent::Image(_) => {
162 markdown.push_str("<image />\n");
163 }
164 UserMessageContent::Mention { uri, content } => {
165 if !content.is_empty() {
166 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
167 } else {
168 let _ = writeln!(&mut markdown, "{}", uri.as_link());
169 }
170 }
171 }
172 }
173
174 markdown
175 }
176
177 fn to_request(&self) -> LanguageModelRequestMessage {
178 let mut message = LanguageModelRequestMessage {
179 role: Role::User,
180 content: Vec::with_capacity(self.content.len()),
181 cache: false,
182 };
183
184 const OPEN_CONTEXT: &str = "<context>\n\
185 The following items were attached by the user. \
186 They are up-to-date and don't need to be re-read.\n\n";
187
188 const OPEN_FILES_TAG: &str = "<files>";
189 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
190 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
191 const OPEN_SELECTIONS_TAG: &str = "<selections>";
192 const OPEN_THREADS_TAG: &str = "<threads>";
193 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
194 const OPEN_RULES_TAG: &str =
195 "<rules>\nThe user has specified the following rules that should be applied:\n";
196
197 let mut file_context = OPEN_FILES_TAG.to_string();
198 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
199 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
200 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
201 let mut thread_context = OPEN_THREADS_TAG.to_string();
202 let mut fetch_context = OPEN_FETCH_TAG.to_string();
203 let mut rules_context = OPEN_RULES_TAG.to_string();
204
205 for chunk in &self.content {
206 let chunk = match chunk {
207 UserMessageContent::Text(text) => {
208 language_model::MessageContent::Text(text.clone())
209 }
210 UserMessageContent::Image(value) => {
211 language_model::MessageContent::Image(value.clone())
212 }
213 UserMessageContent::Mention { uri, content } => {
214 match uri {
215 MentionUri::File { abs_path } => {
216 write!(
217 &mut file_context,
218 "\n{}",
219 MarkdownCodeBlock {
220 tag: &codeblock_tag(abs_path, None),
221 text: &content.to_string(),
222 }
223 )
224 .ok();
225 }
226 MentionUri::PastedImage => {
227 debug_panic!("pasted image URI should not be used in mention content")
228 }
229 MentionUri::Directory { .. } => {
230 write!(&mut directory_context, "\n{}\n", content).ok();
231 }
232 MentionUri::Symbol {
233 abs_path: path,
234 line_range,
235 ..
236 } => {
237 write!(
238 &mut symbol_context,
239 "\n{}",
240 MarkdownCodeBlock {
241 tag: &codeblock_tag(path, Some(line_range)),
242 text: content
243 }
244 )
245 .ok();
246 }
247 MentionUri::Selection {
248 abs_path: path,
249 line_range,
250 ..
251 } => {
252 write!(
253 &mut selection_context,
254 "\n{}",
255 MarkdownCodeBlock {
256 tag: &codeblock_tag(
257 path.as_deref().unwrap_or("Untitled".as_ref()),
258 Some(line_range)
259 ),
260 text: content
261 }
262 )
263 .ok();
264 }
265 MentionUri::Thread { .. } => {
266 write!(&mut thread_context, "\n{}\n", content).ok();
267 }
268 MentionUri::TextThread { .. } => {
269 write!(&mut thread_context, "\n{}\n", content).ok();
270 }
271 MentionUri::Rule { .. } => {
272 write!(
273 &mut rules_context,
274 "\n{}",
275 MarkdownCodeBlock {
276 tag: "",
277 text: content
278 }
279 )
280 .ok();
281 }
282 MentionUri::Fetch { url } => {
283 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
284 }
285 }
286
287 language_model::MessageContent::Text(uri.as_link().to_string())
288 }
289 };
290
291 message.content.push(chunk);
292 }
293
294 let len_before_context = message.content.len();
295
296 if file_context.len() > OPEN_FILES_TAG.len() {
297 file_context.push_str("</files>\n");
298 message
299 .content
300 .push(language_model::MessageContent::Text(file_context));
301 }
302
303 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
304 directory_context.push_str("</directories>\n");
305 message
306 .content
307 .push(language_model::MessageContent::Text(directory_context));
308 }
309
310 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
311 symbol_context.push_str("</symbols>\n");
312 message
313 .content
314 .push(language_model::MessageContent::Text(symbol_context));
315 }
316
317 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
318 selection_context.push_str("</selections>\n");
319 message
320 .content
321 .push(language_model::MessageContent::Text(selection_context));
322 }
323
324 if thread_context.len() > OPEN_THREADS_TAG.len() {
325 thread_context.push_str("</threads>\n");
326 message
327 .content
328 .push(language_model::MessageContent::Text(thread_context));
329 }
330
331 if fetch_context.len() > OPEN_FETCH_TAG.len() {
332 fetch_context.push_str("</fetched_urls>\n");
333 message
334 .content
335 .push(language_model::MessageContent::Text(fetch_context));
336 }
337
338 if rules_context.len() > OPEN_RULES_TAG.len() {
339 rules_context.push_str("</user_rules>\n");
340 message
341 .content
342 .push(language_model::MessageContent::Text(rules_context));
343 }
344
345 if message.content.len() > len_before_context {
346 message.content.insert(
347 len_before_context,
348 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
349 );
350 message
351 .content
352 .push(language_model::MessageContent::Text("</context>".into()));
353 }
354
355 message
356 }
357}
358
359fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
360 let mut result = String::new();
361
362 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
363 let _ = write!(result, "{} ", extension);
364 }
365
366 let _ = write!(result, "{}", full_path.display());
367
368 if let Some(range) = line_range {
369 if range.start() == range.end() {
370 let _ = write!(result, ":{}", range.start() + 1);
371 } else {
372 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
373 }
374 }
375
376 result
377}
378
379impl AgentMessage {
380 pub fn to_markdown(&self) -> String {
381 let mut markdown = String::from("## Assistant\n\n");
382
383 for content in &self.content {
384 match content {
385 AgentMessageContent::Text(text) => {
386 markdown.push_str(text);
387 markdown.push('\n');
388 }
389 AgentMessageContent::Thinking { text, .. } => {
390 markdown.push_str("<think>");
391 markdown.push_str(text);
392 markdown.push_str("</think>\n");
393 }
394 AgentMessageContent::RedactedThinking(_) => {
395 markdown.push_str("<redacted_thinking />\n")
396 }
397 AgentMessageContent::ToolUse(tool_use) => {
398 markdown.push_str(&format!(
399 "**Tool Use**: {} (ID: {})\n",
400 tool_use.name, tool_use.id
401 ));
402 markdown.push_str(&format!(
403 "{}\n",
404 MarkdownCodeBlock {
405 tag: "json",
406 text: &format!("{:#}", tool_use.input)
407 }
408 ));
409 }
410 }
411 }
412
413 for tool_result in self.tool_results.values() {
414 markdown.push_str(&format!(
415 "**Tool Result**: {} (ID: {})\n\n",
416 tool_result.tool_name, tool_result.tool_use_id
417 ));
418 if tool_result.is_error {
419 markdown.push_str("**ERROR:**\n");
420 }
421
422 match &tool_result.content {
423 LanguageModelToolResultContent::Text(text) => {
424 writeln!(markdown, "{text}\n").ok();
425 }
426 LanguageModelToolResultContent::Image(_) => {
427 writeln!(markdown, "<image />\n").ok();
428 }
429 }
430
431 if let Some(output) = tool_result.output.as_ref() {
432 writeln!(
433 markdown,
434 "**Debug Output**:\n\n```json\n{}\n```\n",
435 serde_json::to_string_pretty(output).unwrap()
436 )
437 .unwrap();
438 }
439 }
440
441 markdown
442 }
443
444 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
445 let mut assistant_message = LanguageModelRequestMessage {
446 role: Role::Assistant,
447 content: Vec::with_capacity(self.content.len()),
448 cache: false,
449 };
450 for chunk in &self.content {
451 match chunk {
452 AgentMessageContent::Text(text) => {
453 assistant_message
454 .content
455 .push(language_model::MessageContent::Text(text.clone()));
456 }
457 AgentMessageContent::Thinking { text, signature } => {
458 assistant_message
459 .content
460 .push(language_model::MessageContent::Thinking {
461 text: text.clone(),
462 signature: signature.clone(),
463 });
464 }
465 AgentMessageContent::RedactedThinking(value) => {
466 assistant_message.content.push(
467 language_model::MessageContent::RedactedThinking(value.clone()),
468 );
469 }
470 AgentMessageContent::ToolUse(tool_use) => {
471 if self.tool_results.contains_key(&tool_use.id) {
472 assistant_message
473 .content
474 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
475 }
476 }
477 };
478 }
479
480 let mut user_message = LanguageModelRequestMessage {
481 role: Role::User,
482 content: Vec::new(),
483 cache: false,
484 };
485
486 for tool_result in self.tool_results.values() {
487 user_message
488 .content
489 .push(language_model::MessageContent::ToolResult(
490 tool_result.clone(),
491 ));
492 }
493
494 let mut messages = Vec::new();
495 if !assistant_message.content.is_empty() {
496 messages.push(assistant_message);
497 }
498 if !user_message.content.is_empty() {
499 messages.push(user_message);
500 }
501 messages
502 }
503}
504
505#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
506pub struct AgentMessage {
507 pub content: Vec<AgentMessageContent>,
508 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
509}
510
511#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
512pub enum AgentMessageContent {
513 Text(String),
514 Thinking {
515 text: String,
516 signature: Option<String>,
517 },
518 RedactedThinking(String),
519 ToolUse(LanguageModelToolUse),
520}
521
522#[derive(Debug)]
523pub enum ThreadEvent {
524 UserMessage(UserMessage),
525 AgentText(String),
526 AgentThinking(String),
527 ToolCall(acp::ToolCall),
528 ToolCallUpdate(acp_thread::ToolCallUpdate),
529 ToolCallAuthorization(ToolCallAuthorization),
530 Retry(acp_thread::RetryStatus),
531 Stop(acp::StopReason),
532}
533
534#[derive(Debug)]
535pub struct ToolCallAuthorization {
536 pub tool_call: acp::ToolCallUpdate,
537 pub options: Vec<acp::PermissionOption>,
538 pub response: oneshot::Sender<acp::PermissionOptionId>,
539}
540
541#[derive(Debug, thiserror::Error)]
542enum CompletionError {
543 #[error("max tokens")]
544 MaxTokens,
545 #[error("refusal")]
546 Refusal,
547 #[error(transparent)]
548 Other(#[from] anyhow::Error),
549}
550
551pub struct Thread {
552 id: acp::SessionId,
553 prompt_id: PromptId,
554 updated_at: DateTime<Utc>,
555 title: Option<SharedString>,
556 pending_title_generation: Option<Task<()>>,
557 summary: Option<SharedString>,
558 messages: Vec<Message>,
559 completion_mode: CompletionMode,
560 /// Holds the task that handles agent interaction until the end of the turn.
561 /// Survives across multiple requests as the model performs tool calls and
562 /// we run tools, report their results.
563 running_turn: Option<RunningTurn>,
564 pending_message: Option<AgentMessage>,
565 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
566 tool_use_limit_reached: bool,
567 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
568 #[allow(unused)]
569 cumulative_token_usage: TokenUsage,
570 #[allow(unused)]
571 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
572 context_server_registry: Entity<ContextServerRegistry>,
573 profile_id: AgentProfileId,
574 project_context: Entity<ProjectContext>,
575 templates: Arc<Templates>,
576 model: Option<Arc<dyn LanguageModel>>,
577 summarization_model: Option<Arc<dyn LanguageModel>>,
578 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
579 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
580 pub(crate) project: Entity<Project>,
581 pub(crate) action_log: Entity<ActionLog>,
582}
583
584impl Thread {
585 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
586 let image = model.map_or(true, |model| model.supports_images());
587 acp::PromptCapabilities {
588 image,
589 audio: false,
590 embedded_context: true,
591 supports_custom_commands: false,
592 }
593 }
594
595 pub fn new(
596 project: Entity<Project>,
597 project_context: Entity<ProjectContext>,
598 context_server_registry: Entity<ContextServerRegistry>,
599 templates: Arc<Templates>,
600 model: Option<Arc<dyn LanguageModel>>,
601 cx: &mut Context<Self>,
602 ) -> Self {
603 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
604 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
605 let (prompt_capabilities_tx, prompt_capabilities_rx) =
606 watch::channel(Self::prompt_capabilities(model.as_deref()));
607 Self {
608 id: acp::SessionId(uuid::Uuid::new_v4().to_string().into()),
609 prompt_id: PromptId::new(),
610 updated_at: Utc::now(),
611 title: None,
612 pending_title_generation: None,
613 summary: None,
614 messages: Vec::new(),
615 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
616 running_turn: None,
617 pending_message: None,
618 tools: BTreeMap::default(),
619 tool_use_limit_reached: false,
620 request_token_usage: HashMap::default(),
621 cumulative_token_usage: TokenUsage::default(),
622 initial_project_snapshot: {
623 let project_snapshot = Self::project_snapshot(project.clone(), cx);
624 cx.foreground_executor()
625 .spawn(async move { Some(project_snapshot.await) })
626 .shared()
627 },
628 context_server_registry,
629 profile_id,
630 project_context,
631 templates,
632 model,
633 summarization_model: None,
634 prompt_capabilities_tx,
635 prompt_capabilities_rx,
636 project,
637 action_log,
638 }
639 }
640
641 pub fn id(&self) -> &acp::SessionId {
642 &self.id
643 }
644
645 pub fn replay(
646 &mut self,
647 cx: &mut Context<Self>,
648 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
649 let (tx, rx) = mpsc::unbounded();
650 let stream = ThreadEventStream(tx);
651 for message in &self.messages {
652 match message {
653 Message::User(user_message) => stream.send_user_message(user_message),
654 Message::Agent(assistant_message) => {
655 for content in &assistant_message.content {
656 match content {
657 AgentMessageContent::Text(text) => stream.send_text(text),
658 AgentMessageContent::Thinking { text, .. } => {
659 stream.send_thinking(text)
660 }
661 AgentMessageContent::RedactedThinking(_) => {}
662 AgentMessageContent::ToolUse(tool_use) => {
663 self.replay_tool_call(
664 tool_use,
665 assistant_message.tool_results.get(&tool_use.id),
666 &stream,
667 cx,
668 );
669 }
670 }
671 }
672 }
673 Message::Resume => {}
674 }
675 }
676 rx
677 }
678
679 fn replay_tool_call(
680 &self,
681 tool_use: &LanguageModelToolUse,
682 tool_result: Option<&LanguageModelToolResult>,
683 stream: &ThreadEventStream,
684 cx: &mut Context<Self>,
685 ) {
686 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
687 self.context_server_registry
688 .read(cx)
689 .servers()
690 .find_map(|(_, tools)| {
691 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
692 Some(tool.clone())
693 } else {
694 None
695 }
696 })
697 });
698
699 let Some(tool) = tool else {
700 stream
701 .0
702 .unbounded_send(Ok(ThreadEvent::ToolCall(acp::ToolCall {
703 id: acp::ToolCallId(tool_use.id.to_string().into()),
704 title: tool_use.name.to_string(),
705 kind: acp::ToolKind::Other,
706 status: acp::ToolCallStatus::Failed,
707 content: Vec::new(),
708 locations: Vec::new(),
709 raw_input: Some(tool_use.input.clone()),
710 raw_output: None,
711 })))
712 .ok();
713 return;
714 };
715
716 let title = tool.initial_title(tool_use.input.clone());
717 let kind = tool.kind();
718 stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
719
720 let output = tool_result
721 .as_ref()
722 .and_then(|result| result.output.clone());
723 if let Some(output) = output.clone() {
724 let tool_event_stream = ToolCallEventStream::new(
725 tool_use.id.clone(),
726 stream.clone(),
727 Some(self.project.read(cx).fs().clone()),
728 );
729 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
730 .log_err();
731 }
732
733 stream.update_tool_call_fields(
734 &tool_use.id,
735 acp::ToolCallUpdateFields {
736 status: Some(
737 tool_result
738 .as_ref()
739 .map_or(acp::ToolCallStatus::Failed, |result| {
740 if result.is_error {
741 acp::ToolCallStatus::Failed
742 } else {
743 acp::ToolCallStatus::Completed
744 }
745 }),
746 ),
747 raw_output: output,
748 ..Default::default()
749 },
750 );
751 }
752
753 pub fn from_db(
754 id: acp::SessionId,
755 db_thread: DbThread,
756 project: Entity<Project>,
757 project_context: Entity<ProjectContext>,
758 context_server_registry: Entity<ContextServerRegistry>,
759 action_log: Entity<ActionLog>,
760 templates: Arc<Templates>,
761 cx: &mut Context<Self>,
762 ) -> Self {
763 let profile_id = db_thread
764 .profile
765 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
766 let model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
767 db_thread
768 .model
769 .and_then(|model| {
770 let model = SelectedModel {
771 provider: model.provider.clone().into(),
772 model: model.model.into(),
773 };
774 registry.select_model(&model, cx)
775 })
776 .or_else(|| registry.default_model())
777 .map(|model| model.model)
778 });
779 let (prompt_capabilities_tx, prompt_capabilities_rx) =
780 watch::channel(Self::prompt_capabilities(model.as_deref()));
781
782 Self {
783 id,
784 prompt_id: PromptId::new(),
785 title: if db_thread.title.is_empty() {
786 None
787 } else {
788 Some(db_thread.title.clone())
789 },
790 pending_title_generation: None,
791 summary: db_thread.detailed_summary,
792 messages: db_thread.messages,
793 completion_mode: db_thread.completion_mode.unwrap_or_default(),
794 running_turn: None,
795 pending_message: None,
796 tools: BTreeMap::default(),
797 tool_use_limit_reached: false,
798 request_token_usage: db_thread.request_token_usage.clone(),
799 cumulative_token_usage: db_thread.cumulative_token_usage,
800 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
801 context_server_registry,
802 profile_id,
803 project_context,
804 templates,
805 model,
806 summarization_model: None,
807 project,
808 action_log,
809 updated_at: db_thread.updated_at,
810 prompt_capabilities_tx,
811 prompt_capabilities_rx,
812 }
813 }
814
815 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
816 let initial_project_snapshot = self.initial_project_snapshot.clone();
817 let mut thread = DbThread {
818 title: self.title(),
819 messages: self.messages.clone(),
820 updated_at: self.updated_at,
821 detailed_summary: self.summary.clone(),
822 initial_project_snapshot: None,
823 cumulative_token_usage: self.cumulative_token_usage,
824 request_token_usage: self.request_token_usage.clone(),
825 model: self.model.as_ref().map(|model| DbLanguageModel {
826 provider: model.provider_id().to_string(),
827 model: model.name().0.to_string(),
828 }),
829 completion_mode: Some(self.completion_mode),
830 profile: Some(self.profile_id.clone()),
831 };
832
833 cx.background_spawn(async move {
834 let initial_project_snapshot = initial_project_snapshot.await;
835 thread.initial_project_snapshot = initial_project_snapshot;
836 thread
837 })
838 }
839
840 /// Create a snapshot of the current project state including git information and unsaved buffers.
841 fn project_snapshot(
842 project: Entity<Project>,
843 cx: &mut Context<Self>,
844 ) -> Task<Arc<agent::thread::ProjectSnapshot>> {
845 let git_store = project.read(cx).git_store().clone();
846 let worktree_snapshots: Vec<_> = project
847 .read(cx)
848 .visible_worktrees(cx)
849 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
850 .collect();
851
852 cx.spawn(async move |_, cx| {
853 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
854
855 let mut unsaved_buffers = Vec::new();
856 cx.update(|app_cx| {
857 let buffer_store = project.read(app_cx).buffer_store();
858 for buffer_handle in buffer_store.read(app_cx).buffers() {
859 let buffer = buffer_handle.read(app_cx);
860 if buffer.is_dirty()
861 && let Some(file) = buffer.file()
862 {
863 let path = file.path().to_string_lossy().to_string();
864 unsaved_buffers.push(path);
865 }
866 }
867 })
868 .ok();
869
870 Arc::new(ProjectSnapshot {
871 worktree_snapshots,
872 unsaved_buffer_paths: unsaved_buffers,
873 timestamp: Utc::now(),
874 })
875 })
876 }
877
878 fn worktree_snapshot(
879 worktree: Entity<project::Worktree>,
880 git_store: Entity<GitStore>,
881 cx: &App,
882 ) -> Task<agent::thread::WorktreeSnapshot> {
883 cx.spawn(async move |cx| {
884 // Get worktree path and snapshot
885 let worktree_info = cx.update(|app_cx| {
886 let worktree = worktree.read(app_cx);
887 let path = worktree.abs_path().to_string_lossy().to_string();
888 let snapshot = worktree.snapshot();
889 (path, snapshot)
890 });
891
892 let Ok((worktree_path, _snapshot)) = worktree_info else {
893 return WorktreeSnapshot {
894 worktree_path: String::new(),
895 git_state: None,
896 };
897 };
898
899 let git_state = git_store
900 .update(cx, |git_store, cx| {
901 git_store
902 .repositories()
903 .values()
904 .find(|repo| {
905 repo.read(cx)
906 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
907 .is_some()
908 })
909 .cloned()
910 })
911 .ok()
912 .flatten()
913 .map(|repo| {
914 repo.update(cx, |repo, _| {
915 let current_branch =
916 repo.branch.as_ref().map(|branch| branch.name().to_owned());
917 repo.send_job(None, |state, _| async move {
918 let RepositoryState::Local { backend, .. } = state else {
919 return GitState {
920 remote_url: None,
921 head_sha: None,
922 current_branch,
923 diff: None,
924 };
925 };
926
927 let remote_url = backend.remote_url("origin");
928 let head_sha = backend.head_sha().await;
929 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
930
931 GitState {
932 remote_url,
933 head_sha,
934 current_branch,
935 diff,
936 }
937 })
938 })
939 });
940
941 let git_state = match git_state {
942 Some(git_state) => match git_state.ok() {
943 Some(git_state) => git_state.await.ok(),
944 None => None,
945 },
946 None => None,
947 };
948
949 WorktreeSnapshot {
950 worktree_path,
951 git_state,
952 }
953 })
954 }
955
956 pub fn project_context(&self) -> &Entity<ProjectContext> {
957 &self.project_context
958 }
959
960 pub fn project(&self) -> &Entity<Project> {
961 &self.project
962 }
963
964 pub fn action_log(&self) -> &Entity<ActionLog> {
965 &self.action_log
966 }
967
968 pub fn is_empty(&self) -> bool {
969 self.messages.is_empty() && self.title.is_none()
970 }
971
972 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
973 self.model.as_ref()
974 }
975
976 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
977 let old_usage = self.latest_token_usage();
978 self.model = Some(model);
979 let new_caps = Self::prompt_capabilities(self.model.as_deref());
980 let new_usage = self.latest_token_usage();
981 if old_usage != new_usage {
982 cx.emit(TokenUsageUpdated(new_usage));
983 }
984 self.prompt_capabilities_tx.send(new_caps).log_err();
985 cx.notify()
986 }
987
988 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
989 self.summarization_model.as_ref()
990 }
991
992 pub fn set_summarization_model(
993 &mut self,
994 model: Option<Arc<dyn LanguageModel>>,
995 cx: &mut Context<Self>,
996 ) {
997 self.summarization_model = model;
998 cx.notify()
999 }
1000
1001 pub fn completion_mode(&self) -> CompletionMode {
1002 self.completion_mode
1003 }
1004
1005 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
1006 let old_usage = self.latest_token_usage();
1007 self.completion_mode = mode;
1008 let new_usage = self.latest_token_usage();
1009 if old_usage != new_usage {
1010 cx.emit(TokenUsageUpdated(new_usage));
1011 }
1012 cx.notify()
1013 }
1014
1015 #[cfg(any(test, feature = "test-support"))]
1016 pub fn last_message(&self) -> Option<Message> {
1017 if let Some(message) = self.pending_message.clone() {
1018 Some(Message::Agent(message))
1019 } else {
1020 self.messages.last().cloned()
1021 }
1022 }
1023
1024 pub fn add_default_tools(&mut self, cx: &mut Context<Self>) {
1025 let language_registry = self.project.read(cx).languages().clone();
1026 self.add_tool(CopyPathTool::new(self.project.clone()));
1027 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
1028 self.add_tool(DeletePathTool::new(
1029 self.project.clone(),
1030 self.action_log.clone(),
1031 ));
1032 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1033 self.add_tool(EditFileTool::new(cx.weak_entity(), language_registry));
1034 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1035 self.add_tool(FindPathTool::new(self.project.clone()));
1036 self.add_tool(GrepTool::new(self.project.clone()));
1037 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1038 self.add_tool(MovePathTool::new(self.project.clone()));
1039 self.add_tool(NowTool);
1040 self.add_tool(OpenTool::new(self.project.clone()));
1041 self.add_tool(ReadFileTool::new(
1042 self.project.clone(),
1043 self.action_log.clone(),
1044 ));
1045 self.add_tool(TerminalTool::new(self.project.clone(), cx));
1046 self.add_tool(ThinkingTool);
1047 self.add_tool(WebSearchTool);
1048 }
1049
1050 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1051 self.tools.insert(T::name().into(), tool.erase());
1052 }
1053
1054 pub fn remove_tool(&mut self, name: &str) -> bool {
1055 self.tools.remove(name).is_some()
1056 }
1057
1058 pub fn profile(&self) -> &AgentProfileId {
1059 &self.profile_id
1060 }
1061
1062 pub fn set_profile(&mut self, profile_id: AgentProfileId) {
1063 self.profile_id = profile_id;
1064 }
1065
1066 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1067 if let Some(running_turn) = self.running_turn.take() {
1068 running_turn.cancel();
1069 }
1070 self.flush_pending_message(cx);
1071 }
1072
1073 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1074 let Some(last_user_message) = self.last_user_message() else {
1075 return;
1076 };
1077
1078 self.request_token_usage
1079 .insert(last_user_message.id.clone(), update);
1080 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1081 cx.notify();
1082 }
1083
1084 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1085 self.cancel(cx);
1086 let Some(position) = self.messages.iter().position(
1087 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1088 ) else {
1089 return Err(anyhow!("Message not found"));
1090 };
1091
1092 for message in self.messages.drain(position..) {
1093 match message {
1094 Message::User(message) => {
1095 self.request_token_usage.remove(&message.id);
1096 }
1097 Message::Agent(_) | Message::Resume => {}
1098 }
1099 }
1100 self.summary = None;
1101 cx.notify();
1102 Ok(())
1103 }
1104
1105 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1106 let last_user_message = self.last_user_message()?;
1107 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1108 let model = self.model.clone()?;
1109
1110 Some(acp_thread::TokenUsage {
1111 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1112 used_tokens: tokens.total_tokens(),
1113 })
1114 }
1115
1116 pub fn resume(
1117 &mut self,
1118 cx: &mut Context<Self>,
1119 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1120 self.messages.push(Message::Resume);
1121 cx.notify();
1122
1123 log::debug!("Total messages in thread: {}", self.messages.len());
1124 self.run_turn(cx)
1125 }
1126
1127 /// Sending a message results in the model streaming a response, which could include tool calls.
1128 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1129 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1130 pub fn send<T>(
1131 &mut self,
1132 id: UserMessageId,
1133 content: impl IntoIterator<Item = T>,
1134 cx: &mut Context<Self>,
1135 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1136 where
1137 T: Into<UserMessageContent>,
1138 {
1139 let model = self.model().context("No language model configured")?;
1140
1141 log::info!("Thread::send called with model: {}", model.name().0);
1142 self.advance_prompt_id();
1143
1144 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1145 log::debug!("Thread::send content: {:?}", content);
1146
1147 self.messages
1148 .push(Message::User(UserMessage { id, content }));
1149 cx.notify();
1150
1151 log::debug!("Total messages in thread: {}", self.messages.len());
1152 self.run_turn(cx)
1153 }
1154
1155 fn run_turn(
1156 &mut self,
1157 cx: &mut Context<Self>,
1158 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1159 self.cancel(cx);
1160
1161 let model = self.model.clone().context("No language model configured")?;
1162 let profile = AgentSettings::get_global(cx)
1163 .profiles
1164 .get(&self.profile_id)
1165 .context("Profile not found")?;
1166 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1167 let event_stream = ThreadEventStream(events_tx);
1168 let message_ix = self.messages.len().saturating_sub(1);
1169 self.tool_use_limit_reached = false;
1170 self.summary = None;
1171 self.running_turn = Some(RunningTurn {
1172 event_stream: event_stream.clone(),
1173 tools: self.enabled_tools(profile, &model, cx),
1174 _task: cx.spawn(async move |this, cx| {
1175 log::debug!("Starting agent turn execution");
1176
1177 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1178 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1179
1180 match turn_result {
1181 Ok(()) => {
1182 log::debug!("Turn execution completed");
1183 event_stream.send_stop(acp::StopReason::EndTurn);
1184 }
1185 Err(error) => {
1186 log::error!("Turn execution failed: {:?}", error);
1187 match error.downcast::<CompletionError>() {
1188 Ok(CompletionError::Refusal) => {
1189 event_stream.send_stop(acp::StopReason::Refusal);
1190 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1191 }
1192 Ok(CompletionError::MaxTokens) => {
1193 event_stream.send_stop(acp::StopReason::MaxTokens);
1194 }
1195 Ok(CompletionError::Other(error)) | Err(error) => {
1196 event_stream.send_error(error);
1197 }
1198 }
1199 }
1200 }
1201
1202 _ = this.update(cx, |this, _| this.running_turn.take());
1203 }),
1204 });
1205 Ok(events_rx)
1206 }
1207
1208 async fn run_turn_internal(
1209 this: &WeakEntity<Self>,
1210 model: Arc<dyn LanguageModel>,
1211 event_stream: &ThreadEventStream,
1212 cx: &mut AsyncApp,
1213 ) -> Result<()> {
1214 let mut attempt = 0;
1215 let mut intent = CompletionIntent::UserPrompt;
1216 loop {
1217 let request =
1218 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1219
1220 telemetry::event!(
1221 "Agent Thread Completion",
1222 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1223 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1224 model = model.telemetry_id(),
1225 model_provider = model.provider_id().to_string(),
1226 attempt
1227 );
1228
1229 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1230 let mut events = model
1231 .stream_completion(request, cx)
1232 .await
1233 .map_err(|error| anyhow!(error))?;
1234 let mut tool_results = FuturesUnordered::new();
1235 let mut error = None;
1236 while let Some(event) = events.next().await {
1237 log::trace!("Received completion event: {:?}", event);
1238 match event {
1239 Ok(event) => {
1240 tool_results.extend(this.update(cx, |this, cx| {
1241 this.handle_completion_event(event, event_stream, cx)
1242 })??);
1243 }
1244 Err(err) => {
1245 error = Some(err);
1246 break;
1247 }
1248 }
1249 }
1250
1251 let end_turn = tool_results.is_empty();
1252 while let Some(tool_result) = tool_results.next().await {
1253 log::debug!("Tool finished {:?}", tool_result);
1254
1255 event_stream.update_tool_call_fields(
1256 &tool_result.tool_use_id,
1257 acp::ToolCallUpdateFields {
1258 status: Some(if tool_result.is_error {
1259 acp::ToolCallStatus::Failed
1260 } else {
1261 acp::ToolCallStatus::Completed
1262 }),
1263 raw_output: tool_result.output.clone(),
1264 ..Default::default()
1265 },
1266 );
1267 this.update(cx, |this, _cx| {
1268 this.pending_message()
1269 .tool_results
1270 .insert(tool_result.tool_use_id.clone(), tool_result);
1271 })?;
1272 }
1273
1274 this.update(cx, |this, cx| {
1275 this.flush_pending_message(cx);
1276 if this.title.is_none() && this.pending_title_generation.is_none() {
1277 this.generate_title(cx);
1278 }
1279 })?;
1280
1281 if let Some(error) = error {
1282 attempt += 1;
1283 let retry =
1284 this.update(cx, |this, _| this.handle_completion_error(error, attempt))??;
1285 let timer = cx.background_executor().timer(retry.duration);
1286 event_stream.send_retry(retry);
1287 timer.await;
1288 this.update(cx, |this, _cx| {
1289 if let Some(Message::Agent(message)) = this.messages.last() {
1290 if message.tool_results.is_empty() {
1291 intent = CompletionIntent::UserPrompt;
1292 this.messages.push(Message::Resume);
1293 }
1294 }
1295 })?;
1296 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1297 return Err(language_model::ToolUseLimitReachedError.into());
1298 } else if end_turn {
1299 return Ok(());
1300 } else {
1301 intent = CompletionIntent::ToolResults;
1302 attempt = 0;
1303 }
1304 }
1305 }
1306
1307 fn handle_completion_error(
1308 &mut self,
1309 error: LanguageModelCompletionError,
1310 attempt: u8,
1311 ) -> Result<acp_thread::RetryStatus> {
1312 if self.completion_mode == CompletionMode::Normal {
1313 return Err(anyhow!(error));
1314 }
1315
1316 let Some(strategy) = Self::retry_strategy_for(&error) else {
1317 return Err(anyhow!(error));
1318 };
1319
1320 let max_attempts = match &strategy {
1321 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1322 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1323 };
1324
1325 if attempt > max_attempts {
1326 return Err(anyhow!(error));
1327 }
1328
1329 let delay = match &strategy {
1330 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1331 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1332 Duration::from_secs(delay_secs)
1333 }
1334 RetryStrategy::Fixed { delay, .. } => *delay,
1335 };
1336 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1337
1338 Ok(acp_thread::RetryStatus {
1339 last_error: error.to_string().into(),
1340 attempt: attempt as usize,
1341 max_attempts: max_attempts as usize,
1342 started_at: Instant::now(),
1343 duration: delay,
1344 })
1345 }
1346
1347 /// A helper method that's called on every streamed completion event.
1348 /// Returns an optional tool result task, which the main agentic loop will
1349 /// send back to the model when it resolves.
1350 fn handle_completion_event(
1351 &mut self,
1352 event: LanguageModelCompletionEvent,
1353 event_stream: &ThreadEventStream,
1354 cx: &mut Context<Self>,
1355 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1356 log::trace!("Handling streamed completion event: {:?}", event);
1357 use LanguageModelCompletionEvent::*;
1358
1359 match event {
1360 StartMessage { .. } => {
1361 self.flush_pending_message(cx);
1362 self.pending_message = Some(AgentMessage::default());
1363 }
1364 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1365 Thinking { text, signature } => {
1366 self.handle_thinking_event(text, signature, event_stream, cx)
1367 }
1368 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1369 ToolUse(tool_use) => {
1370 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1371 }
1372 ToolUseJsonParseError {
1373 id,
1374 tool_name,
1375 raw_input,
1376 json_parse_error,
1377 } => {
1378 return Ok(Some(Task::ready(
1379 self.handle_tool_use_json_parse_error_event(
1380 id,
1381 tool_name,
1382 raw_input,
1383 json_parse_error,
1384 ),
1385 )));
1386 }
1387 UsageUpdate(usage) => {
1388 telemetry::event!(
1389 "Agent Thread Completion Usage Updated",
1390 thread_id = self.id.to_string(),
1391 prompt_id = self.prompt_id.to_string(),
1392 model = self.model.as_ref().map(|m| m.telemetry_id()),
1393 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1394 input_tokens = usage.input_tokens,
1395 output_tokens = usage.output_tokens,
1396 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1397 cache_read_input_tokens = usage.cache_read_input_tokens,
1398 );
1399 self.update_token_usage(usage, cx);
1400 }
1401 StatusUpdate(CompletionRequestStatus::UsageUpdated { amount, limit }) => {
1402 self.update_model_request_usage(amount, limit, cx);
1403 }
1404 StatusUpdate(
1405 CompletionRequestStatus::Started
1406 | CompletionRequestStatus::Queued { .. }
1407 | CompletionRequestStatus::Failed { .. },
1408 ) => {}
1409 StatusUpdate(CompletionRequestStatus::ToolUseLimitReached) => {
1410 self.tool_use_limit_reached = true;
1411 }
1412 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1413 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1414 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1415 }
1416
1417 Ok(None)
1418 }
1419
1420 fn handle_text_event(
1421 &mut self,
1422 new_text: String,
1423 event_stream: &ThreadEventStream,
1424 cx: &mut Context<Self>,
1425 ) {
1426 event_stream.send_text(&new_text);
1427
1428 let last_message = self.pending_message();
1429 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1430 text.push_str(&new_text);
1431 } else {
1432 last_message
1433 .content
1434 .push(AgentMessageContent::Text(new_text));
1435 }
1436
1437 cx.notify();
1438 }
1439
1440 fn handle_thinking_event(
1441 &mut self,
1442 new_text: String,
1443 new_signature: Option<String>,
1444 event_stream: &ThreadEventStream,
1445 cx: &mut Context<Self>,
1446 ) {
1447 event_stream.send_thinking(&new_text);
1448
1449 let last_message = self.pending_message();
1450 if let Some(AgentMessageContent::Thinking { text, signature }) =
1451 last_message.content.last_mut()
1452 {
1453 text.push_str(&new_text);
1454 *signature = new_signature.or(signature.take());
1455 } else {
1456 last_message.content.push(AgentMessageContent::Thinking {
1457 text: new_text,
1458 signature: new_signature,
1459 });
1460 }
1461
1462 cx.notify();
1463 }
1464
1465 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1466 let last_message = self.pending_message();
1467 last_message
1468 .content
1469 .push(AgentMessageContent::RedactedThinking(data));
1470 cx.notify();
1471 }
1472
1473 fn handle_tool_use_event(
1474 &mut self,
1475 tool_use: LanguageModelToolUse,
1476 event_stream: &ThreadEventStream,
1477 cx: &mut Context<Self>,
1478 ) -> Option<Task<LanguageModelToolResult>> {
1479 cx.notify();
1480
1481 let tool = self.tool(tool_use.name.as_ref());
1482 let mut title = SharedString::from(&tool_use.name);
1483 let mut kind = acp::ToolKind::Other;
1484 if let Some(tool) = tool.as_ref() {
1485 title = tool.initial_title(tool_use.input.clone());
1486 kind = tool.kind();
1487 }
1488
1489 // Ensure the last message ends in the current tool use
1490 let last_message = self.pending_message();
1491 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1492 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1493 if last_tool_use.id == tool_use.id {
1494 *last_tool_use = tool_use.clone();
1495 false
1496 } else {
1497 true
1498 }
1499 } else {
1500 true
1501 }
1502 });
1503
1504 if push_new_tool_use {
1505 event_stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
1506 last_message
1507 .content
1508 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1509 } else {
1510 event_stream.update_tool_call_fields(
1511 &tool_use.id,
1512 acp::ToolCallUpdateFields {
1513 title: Some(title.into()),
1514 kind: Some(kind),
1515 raw_input: Some(tool_use.input.clone()),
1516 ..Default::default()
1517 },
1518 );
1519 }
1520
1521 if !tool_use.is_input_complete {
1522 return None;
1523 }
1524
1525 let Some(tool) = tool else {
1526 let content = format!("No tool named {} exists", tool_use.name);
1527 return Some(Task::ready(LanguageModelToolResult {
1528 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1529 tool_use_id: tool_use.id,
1530 tool_name: tool_use.name,
1531 is_error: true,
1532 output: None,
1533 }));
1534 };
1535
1536 let fs = self.project.read(cx).fs().clone();
1537 let tool_event_stream =
1538 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1539 tool_event_stream.update_fields(acp::ToolCallUpdateFields {
1540 status: Some(acp::ToolCallStatus::InProgress),
1541 ..Default::default()
1542 });
1543 let supports_images = self.model().is_some_and(|model| model.supports_images());
1544 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1545 log::debug!("Running tool {}", tool_use.name);
1546 Some(cx.foreground_executor().spawn(async move {
1547 let tool_result = tool_result.await.and_then(|output| {
1548 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1549 && !supports_images
1550 {
1551 return Err(anyhow!(
1552 "Attempted to read an image, but this model doesn't support it.",
1553 ));
1554 }
1555 Ok(output)
1556 });
1557
1558 match tool_result {
1559 Ok(output) => LanguageModelToolResult {
1560 tool_use_id: tool_use.id,
1561 tool_name: tool_use.name,
1562 is_error: false,
1563 content: output.llm_output,
1564 output: Some(output.raw_output),
1565 },
1566 Err(error) => LanguageModelToolResult {
1567 tool_use_id: tool_use.id,
1568 tool_name: tool_use.name,
1569 is_error: true,
1570 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1571 output: Some(error.to_string().into()),
1572 },
1573 }
1574 }))
1575 }
1576
1577 fn handle_tool_use_json_parse_error_event(
1578 &mut self,
1579 tool_use_id: LanguageModelToolUseId,
1580 tool_name: Arc<str>,
1581 raw_input: Arc<str>,
1582 json_parse_error: String,
1583 ) -> LanguageModelToolResult {
1584 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1585 LanguageModelToolResult {
1586 tool_use_id,
1587 tool_name,
1588 is_error: true,
1589 content: LanguageModelToolResultContent::Text(tool_output.into()),
1590 output: Some(serde_json::Value::String(raw_input.to_string())),
1591 }
1592 }
1593
1594 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1595 self.project
1596 .read(cx)
1597 .user_store()
1598 .update(cx, |user_store, cx| {
1599 user_store.update_model_request_usage(
1600 ModelRequestUsage(RequestUsage {
1601 amount: amount as i32,
1602 limit,
1603 }),
1604 cx,
1605 )
1606 });
1607 }
1608
1609 pub fn title(&self) -> SharedString {
1610 self.title.clone().unwrap_or("New Thread".into())
1611 }
1612
1613 pub fn summary(&mut self, cx: &mut Context<Self>) -> Task<Result<SharedString>> {
1614 if let Some(summary) = self.summary.as_ref() {
1615 return Task::ready(Ok(summary.clone()));
1616 }
1617 let Some(model) = self.summarization_model.clone() else {
1618 return Task::ready(Err(anyhow!("No summarization model available")));
1619 };
1620 let mut request = LanguageModelRequest {
1621 intent: Some(CompletionIntent::ThreadContextSummarization),
1622 temperature: AgentSettings::temperature_for_model(&model, cx),
1623 ..Default::default()
1624 };
1625
1626 for message in &self.messages {
1627 request.messages.extend(message.to_request());
1628 }
1629
1630 request.messages.push(LanguageModelRequestMessage {
1631 role: Role::User,
1632 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1633 cache: false,
1634 });
1635 cx.spawn(async move |this, cx| {
1636 let mut summary = String::new();
1637 let mut messages = model.stream_completion(request, cx).await?;
1638 while let Some(event) = messages.next().await {
1639 let event = event?;
1640 let text = match event {
1641 LanguageModelCompletionEvent::Text(text) => text,
1642 LanguageModelCompletionEvent::StatusUpdate(
1643 CompletionRequestStatus::UsageUpdated { amount, limit },
1644 ) => {
1645 this.update(cx, |thread, cx| {
1646 thread.update_model_request_usage(amount, limit, cx);
1647 })?;
1648 continue;
1649 }
1650 _ => continue,
1651 };
1652
1653 let mut lines = text.lines();
1654 summary.extend(lines.next());
1655 }
1656
1657 log::debug!("Setting summary: {}", summary);
1658 let summary = SharedString::from(summary);
1659
1660 this.update(cx, |this, cx| {
1661 this.summary = Some(summary.clone());
1662 cx.notify()
1663 })?;
1664
1665 Ok(summary)
1666 })
1667 }
1668
1669 fn generate_title(&mut self, cx: &mut Context<Self>) {
1670 let Some(model) = self.summarization_model.clone() else {
1671 return;
1672 };
1673
1674 log::debug!(
1675 "Generating title with model: {:?}",
1676 self.summarization_model.as_ref().map(|model| model.name())
1677 );
1678 let mut request = LanguageModelRequest {
1679 intent: Some(CompletionIntent::ThreadSummarization),
1680 temperature: AgentSettings::temperature_for_model(&model, cx),
1681 ..Default::default()
1682 };
1683
1684 for message in &self.messages {
1685 request.messages.extend(message.to_request());
1686 }
1687
1688 request.messages.push(LanguageModelRequestMessage {
1689 role: Role::User,
1690 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1691 cache: false,
1692 });
1693 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1694 let mut title = String::new();
1695
1696 let generate = async {
1697 let mut messages = model.stream_completion(request, cx).await?;
1698 while let Some(event) = messages.next().await {
1699 let event = event?;
1700 let text = match event {
1701 LanguageModelCompletionEvent::Text(text) => text,
1702 LanguageModelCompletionEvent::StatusUpdate(
1703 CompletionRequestStatus::UsageUpdated { amount, limit },
1704 ) => {
1705 this.update(cx, |thread, cx| {
1706 thread.update_model_request_usage(amount, limit, cx);
1707 })?;
1708 continue;
1709 }
1710 _ => continue,
1711 };
1712
1713 let mut lines = text.lines();
1714 title.extend(lines.next());
1715
1716 // Stop if the LLM generated multiple lines.
1717 if lines.next().is_some() {
1718 break;
1719 }
1720 }
1721 anyhow::Ok(())
1722 };
1723
1724 if generate.await.context("failed to generate title").is_ok() {
1725 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1726 }
1727 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1728 }));
1729 }
1730
1731 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1732 self.pending_title_generation = None;
1733 if Some(&title) != self.title.as_ref() {
1734 self.title = Some(title);
1735 cx.emit(TitleUpdated);
1736 cx.notify();
1737 }
1738 }
1739
1740 fn last_user_message(&self) -> Option<&UserMessage> {
1741 self.messages
1742 .iter()
1743 .rev()
1744 .find_map(|message| match message {
1745 Message::User(user_message) => Some(user_message),
1746 Message::Agent(_) => None,
1747 Message::Resume => None,
1748 })
1749 }
1750
1751 fn pending_message(&mut self) -> &mut AgentMessage {
1752 self.pending_message.get_or_insert_default()
1753 }
1754
1755 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1756 let Some(mut message) = self.pending_message.take() else {
1757 return;
1758 };
1759
1760 if message.content.is_empty() {
1761 return;
1762 }
1763
1764 for content in &message.content {
1765 let AgentMessageContent::ToolUse(tool_use) = content else {
1766 continue;
1767 };
1768
1769 if !message.tool_results.contains_key(&tool_use.id) {
1770 message.tool_results.insert(
1771 tool_use.id.clone(),
1772 LanguageModelToolResult {
1773 tool_use_id: tool_use.id.clone(),
1774 tool_name: tool_use.name.clone(),
1775 is_error: true,
1776 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1777 output: None,
1778 },
1779 );
1780 }
1781 }
1782
1783 self.messages.push(Message::Agent(message));
1784 self.updated_at = Utc::now();
1785 self.summary = None;
1786 cx.notify()
1787 }
1788
1789 pub(crate) fn build_completion_request(
1790 &self,
1791 completion_intent: CompletionIntent,
1792 cx: &App,
1793 ) -> Result<LanguageModelRequest> {
1794 let model = self.model().context("No language model configured")?;
1795 let tools = if let Some(turn) = self.running_turn.as_ref() {
1796 turn.tools
1797 .iter()
1798 .filter_map(|(tool_name, tool)| {
1799 log::trace!("Including tool: {}", tool_name);
1800 Some(LanguageModelRequestTool {
1801 name: tool_name.to_string(),
1802 description: tool.description().to_string(),
1803 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1804 })
1805 })
1806 .collect::<Vec<_>>()
1807 } else {
1808 Vec::new()
1809 };
1810
1811 log::debug!("Building completion request");
1812 log::debug!("Completion intent: {:?}", completion_intent);
1813 log::debug!("Completion mode: {:?}", self.completion_mode);
1814
1815 let messages = self.build_request_messages(cx);
1816 log::debug!("Request will include {} messages", messages.len());
1817 log::debug!("Request includes {} tools", tools.len());
1818
1819 let request = LanguageModelRequest {
1820 thread_id: Some(self.id.to_string()),
1821 prompt_id: Some(self.prompt_id.to_string()),
1822 intent: Some(completion_intent),
1823 mode: Some(self.completion_mode.into()),
1824 messages,
1825 tools,
1826 tool_choice: None,
1827 stop: Vec::new(),
1828 temperature: AgentSettings::temperature_for_model(model, cx),
1829 thinking_allowed: true,
1830 };
1831
1832 log::debug!("Completion request built successfully");
1833 Ok(request)
1834 }
1835
1836 fn enabled_tools(
1837 &self,
1838 profile: &AgentProfileSettings,
1839 model: &Arc<dyn LanguageModel>,
1840 cx: &App,
1841 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1842 fn truncate(tool_name: &SharedString) -> SharedString {
1843 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1844 let mut truncated = tool_name.to_string();
1845 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1846 truncated.into()
1847 } else {
1848 tool_name.clone()
1849 }
1850 }
1851
1852 let mut tools = self
1853 .tools
1854 .iter()
1855 .filter_map(|(tool_name, tool)| {
1856 if tool.supported_provider(&model.provider_id())
1857 && profile.is_tool_enabled(tool_name)
1858 {
1859 Some((truncate(tool_name), tool.clone()))
1860 } else {
1861 None
1862 }
1863 })
1864 .collect::<BTreeMap<_, _>>();
1865
1866 let mut context_server_tools = Vec::new();
1867 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1868 let mut duplicate_tool_names = HashSet::default();
1869 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1870 for (tool_name, tool) in server_tools {
1871 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1872 let tool_name = truncate(tool_name);
1873 if !seen_tools.insert(tool_name.clone()) {
1874 duplicate_tool_names.insert(tool_name.clone());
1875 }
1876 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1877 }
1878 }
1879 }
1880
1881 // When there are duplicate tool names, disambiguate by prefixing them
1882 // with the server ID. In the rare case there isn't enough space for the
1883 // disambiguated tool name, keep only the last tool with this name.
1884 for (server_id, tool_name, tool) in context_server_tools {
1885 if duplicate_tool_names.contains(&tool_name) {
1886 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1887 if available >= 2 {
1888 let mut disambiguated = server_id.0.to_string();
1889 disambiguated.truncate(available - 1);
1890 disambiguated.push('_');
1891 disambiguated.push_str(&tool_name);
1892 tools.insert(disambiguated.into(), tool.clone());
1893 } else {
1894 tools.insert(tool_name, tool.clone());
1895 }
1896 } else {
1897 tools.insert(tool_name, tool.clone());
1898 }
1899 }
1900
1901 tools
1902 }
1903
1904 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1905 self.running_turn.as_ref()?.tools.get(name).cloned()
1906 }
1907
1908 fn build_request_messages(&self, cx: &App) -> Vec<LanguageModelRequestMessage> {
1909 log::trace!(
1910 "Building request messages from {} thread messages",
1911 self.messages.len()
1912 );
1913
1914 let system_prompt = SystemPromptTemplate {
1915 project: self.project_context.read(cx),
1916 available_tools: self.tools.keys().cloned().collect(),
1917 }
1918 .render(&self.templates)
1919 .context("failed to build system prompt")
1920 .expect("Invalid template");
1921 let mut messages = vec![LanguageModelRequestMessage {
1922 role: Role::System,
1923 content: vec![system_prompt.into()],
1924 cache: false,
1925 }];
1926 for message in &self.messages {
1927 messages.extend(message.to_request());
1928 }
1929
1930 if let Some(last_message) = messages.last_mut() {
1931 last_message.cache = true;
1932 }
1933
1934 if let Some(message) = self.pending_message.as_ref() {
1935 messages.extend(message.to_request());
1936 }
1937
1938 messages
1939 }
1940
1941 pub fn to_markdown(&self) -> String {
1942 let mut markdown = String::new();
1943 for (ix, message) in self.messages.iter().enumerate() {
1944 if ix > 0 {
1945 markdown.push('\n');
1946 }
1947 markdown.push_str(&message.to_markdown());
1948 }
1949
1950 if let Some(message) = self.pending_message.as_ref() {
1951 markdown.push('\n');
1952 markdown.push_str(&message.to_markdown());
1953 }
1954
1955 markdown
1956 }
1957
1958 fn advance_prompt_id(&mut self) {
1959 self.prompt_id = PromptId::new();
1960 }
1961
1962 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
1963 use LanguageModelCompletionError::*;
1964 use http_client::StatusCode;
1965
1966 // General strategy here:
1967 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
1968 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
1969 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
1970 match error {
1971 HttpResponseError {
1972 status_code: StatusCode::TOO_MANY_REQUESTS,
1973 ..
1974 } => Some(RetryStrategy::ExponentialBackoff {
1975 initial_delay: BASE_RETRY_DELAY,
1976 max_attempts: MAX_RETRY_ATTEMPTS,
1977 }),
1978 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
1979 Some(RetryStrategy::Fixed {
1980 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
1981 max_attempts: MAX_RETRY_ATTEMPTS,
1982 })
1983 }
1984 UpstreamProviderError {
1985 status,
1986 retry_after,
1987 ..
1988 } => match *status {
1989 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
1990 Some(RetryStrategy::Fixed {
1991 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
1992 max_attempts: MAX_RETRY_ATTEMPTS,
1993 })
1994 }
1995 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
1996 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
1997 // Internal Server Error could be anything, retry up to 3 times.
1998 max_attempts: 3,
1999 }),
2000 status => {
2001 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2002 // but we frequently get them in practice. See https://http.dev/529
2003 if status.as_u16() == 529 {
2004 Some(RetryStrategy::Fixed {
2005 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2006 max_attempts: MAX_RETRY_ATTEMPTS,
2007 })
2008 } else {
2009 Some(RetryStrategy::Fixed {
2010 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2011 max_attempts: 2,
2012 })
2013 }
2014 }
2015 },
2016 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2017 delay: BASE_RETRY_DELAY,
2018 max_attempts: 3,
2019 }),
2020 ApiReadResponseError { .. }
2021 | HttpSend { .. }
2022 | DeserializeResponse { .. }
2023 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2024 delay: BASE_RETRY_DELAY,
2025 max_attempts: 3,
2026 }),
2027 // Retrying these errors definitely shouldn't help.
2028 HttpResponseError {
2029 status_code:
2030 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2031 ..
2032 }
2033 | AuthenticationError { .. }
2034 | PermissionError { .. }
2035 | NoApiKey { .. }
2036 | ApiEndpointNotFound { .. }
2037 | PromptTooLarge { .. } => None,
2038 // These errors might be transient, so retry them
2039 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2040 delay: BASE_RETRY_DELAY,
2041 max_attempts: 1,
2042 }),
2043 // Retry all other 4xx and 5xx errors once.
2044 HttpResponseError { status_code, .. }
2045 if status_code.is_client_error() || status_code.is_server_error() =>
2046 {
2047 Some(RetryStrategy::Fixed {
2048 delay: BASE_RETRY_DELAY,
2049 max_attempts: 3,
2050 })
2051 }
2052 Other(err)
2053 if err.is::<language_model::PaymentRequiredError>()
2054 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2055 {
2056 // Retrying won't help for Payment Required or Model Request Limit errors (where
2057 // the user must upgrade to usage-based billing to get more requests, or else wait
2058 // for a significant amount of time for the request limit to reset).
2059 None
2060 }
2061 // Conservatively assume that any other errors are non-retryable
2062 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2063 delay: BASE_RETRY_DELAY,
2064 max_attempts: 2,
2065 }),
2066 }
2067 }
2068}
2069
2070struct RunningTurn {
2071 /// Holds the task that handles agent interaction until the end of the turn.
2072 /// Survives across multiple requests as the model performs tool calls and
2073 /// we run tools, report their results.
2074 _task: Task<()>,
2075 /// The current event stream for the running turn. Used to report a final
2076 /// cancellation event if we cancel the turn.
2077 event_stream: ThreadEventStream,
2078 /// The tools that were enabled for this turn.
2079 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2080}
2081
2082impl RunningTurn {
2083 fn cancel(self) {
2084 log::debug!("Cancelling in progress turn");
2085 self.event_stream.send_canceled();
2086 }
2087}
2088
2089pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2090
2091impl EventEmitter<TokenUsageUpdated> for Thread {}
2092
2093pub struct TitleUpdated;
2094
2095impl EventEmitter<TitleUpdated> for Thread {}
2096
2097pub trait AgentTool
2098where
2099 Self: 'static + Sized,
2100{
2101 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2102 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2103
2104 fn name() -> &'static str;
2105
2106 fn description(&self) -> SharedString {
2107 let schema = schemars::schema_for!(Self::Input);
2108 SharedString::new(
2109 schema
2110 .get("description")
2111 .and_then(|description| description.as_str())
2112 .unwrap_or_default(),
2113 )
2114 }
2115
2116 fn kind() -> acp::ToolKind;
2117
2118 /// The initial tool title to display. Can be updated during the tool run.
2119 fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString;
2120
2121 /// Returns the JSON schema that describes the tool's input.
2122 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Schema {
2123 crate::tool_schema::root_schema_for::<Self::Input>(format)
2124 }
2125
2126 /// Some tools rely on a provider for the underlying billing or other reasons.
2127 /// Allow the tool to check if they are compatible, or should be filtered out.
2128 fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2129 true
2130 }
2131
2132 /// Runs the tool with the provided input.
2133 fn run(
2134 self: Arc<Self>,
2135 input: Self::Input,
2136 event_stream: ToolCallEventStream,
2137 cx: &mut App,
2138 ) -> Task<Result<Self::Output>>;
2139
2140 /// Emits events for a previous execution of the tool.
2141 fn replay(
2142 &self,
2143 _input: Self::Input,
2144 _output: Self::Output,
2145 _event_stream: ToolCallEventStream,
2146 _cx: &mut App,
2147 ) -> Result<()> {
2148 Ok(())
2149 }
2150
2151 fn erase(self) -> Arc<dyn AnyAgentTool> {
2152 Arc::new(Erased(Arc::new(self)))
2153 }
2154}
2155
2156pub struct Erased<T>(T);
2157
2158pub struct AgentToolOutput {
2159 pub llm_output: LanguageModelToolResultContent,
2160 pub raw_output: serde_json::Value,
2161}
2162
2163pub trait AnyAgentTool {
2164 fn name(&self) -> SharedString;
2165 fn description(&self) -> SharedString;
2166 fn kind(&self) -> acp::ToolKind;
2167 fn initial_title(&self, input: serde_json::Value) -> SharedString;
2168 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2169 fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2170 true
2171 }
2172 fn run(
2173 self: Arc<Self>,
2174 input: serde_json::Value,
2175 event_stream: ToolCallEventStream,
2176 cx: &mut App,
2177 ) -> Task<Result<AgentToolOutput>>;
2178 fn replay(
2179 &self,
2180 input: serde_json::Value,
2181 output: serde_json::Value,
2182 event_stream: ToolCallEventStream,
2183 cx: &mut App,
2184 ) -> Result<()>;
2185}
2186
2187impl<T> AnyAgentTool for Erased<Arc<T>>
2188where
2189 T: AgentTool,
2190{
2191 fn name(&self) -> SharedString {
2192 T::name().into()
2193 }
2194
2195 fn description(&self) -> SharedString {
2196 self.0.description()
2197 }
2198
2199 fn kind(&self) -> agent_client_protocol::ToolKind {
2200 T::kind()
2201 }
2202
2203 fn initial_title(&self, input: serde_json::Value) -> SharedString {
2204 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2205 self.0.initial_title(parsed_input)
2206 }
2207
2208 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2209 let mut json = serde_json::to_value(self.0.input_schema(format))?;
2210 adapt_schema_to_format(&mut json, format)?;
2211 Ok(json)
2212 }
2213
2214 fn supported_provider(&self, provider: &LanguageModelProviderId) -> bool {
2215 self.0.supported_provider(provider)
2216 }
2217
2218 fn run(
2219 self: Arc<Self>,
2220 input: serde_json::Value,
2221 event_stream: ToolCallEventStream,
2222 cx: &mut App,
2223 ) -> Task<Result<AgentToolOutput>> {
2224 cx.spawn(async move |cx| {
2225 let input = serde_json::from_value(input)?;
2226 let output = cx
2227 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2228 .await?;
2229 let raw_output = serde_json::to_value(&output)?;
2230 Ok(AgentToolOutput {
2231 llm_output: output.into(),
2232 raw_output,
2233 })
2234 })
2235 }
2236
2237 fn replay(
2238 &self,
2239 input: serde_json::Value,
2240 output: serde_json::Value,
2241 event_stream: ToolCallEventStream,
2242 cx: &mut App,
2243 ) -> Result<()> {
2244 let input = serde_json::from_value(input)?;
2245 let output = serde_json::from_value(output)?;
2246 self.0.replay(input, output, event_stream, cx)
2247 }
2248}
2249
2250#[derive(Clone)]
2251struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2252
2253impl ThreadEventStream {
2254 fn send_user_message(&self, message: &UserMessage) {
2255 self.0
2256 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2257 .ok();
2258 }
2259
2260 fn send_text(&self, text: &str) {
2261 self.0
2262 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2263 .ok();
2264 }
2265
2266 fn send_thinking(&self, text: &str) {
2267 self.0
2268 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2269 .ok();
2270 }
2271
2272 fn send_tool_call(
2273 &self,
2274 id: &LanguageModelToolUseId,
2275 title: SharedString,
2276 kind: acp::ToolKind,
2277 input: serde_json::Value,
2278 ) {
2279 self.0
2280 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2281 id,
2282 title.to_string(),
2283 kind,
2284 input,
2285 ))))
2286 .ok();
2287 }
2288
2289 fn initial_tool_call(
2290 id: &LanguageModelToolUseId,
2291 title: String,
2292 kind: acp::ToolKind,
2293 input: serde_json::Value,
2294 ) -> acp::ToolCall {
2295 acp::ToolCall {
2296 id: acp::ToolCallId(id.to_string().into()),
2297 title,
2298 kind,
2299 status: acp::ToolCallStatus::Pending,
2300 content: vec![],
2301 locations: vec![],
2302 raw_input: Some(input),
2303 raw_output: None,
2304 }
2305 }
2306
2307 fn update_tool_call_fields(
2308 &self,
2309 tool_use_id: &LanguageModelToolUseId,
2310 fields: acp::ToolCallUpdateFields,
2311 ) {
2312 self.0
2313 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2314 acp::ToolCallUpdate {
2315 id: acp::ToolCallId(tool_use_id.to_string().into()),
2316 fields,
2317 }
2318 .into(),
2319 )))
2320 .ok();
2321 }
2322
2323 fn send_retry(&self, status: acp_thread::RetryStatus) {
2324 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2325 }
2326
2327 fn send_stop(&self, reason: acp::StopReason) {
2328 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2329 }
2330
2331 fn send_canceled(&self) {
2332 self.0
2333 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2334 .ok();
2335 }
2336
2337 fn send_error(&self, error: impl Into<anyhow::Error>) {
2338 self.0.unbounded_send(Err(error.into())).ok();
2339 }
2340}
2341
2342#[derive(Clone)]
2343pub struct ToolCallEventStream {
2344 tool_use_id: LanguageModelToolUseId,
2345 stream: ThreadEventStream,
2346 fs: Option<Arc<dyn Fs>>,
2347}
2348
2349impl ToolCallEventStream {
2350 #[cfg(test)]
2351 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2352 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2353
2354 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2355
2356 (stream, ToolCallEventStreamReceiver(events_rx))
2357 }
2358
2359 fn new(
2360 tool_use_id: LanguageModelToolUseId,
2361 stream: ThreadEventStream,
2362 fs: Option<Arc<dyn Fs>>,
2363 ) -> Self {
2364 Self {
2365 tool_use_id,
2366 stream,
2367 fs,
2368 }
2369 }
2370
2371 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2372 self.stream
2373 .update_tool_call_fields(&self.tool_use_id, fields);
2374 }
2375
2376 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2377 self.stream
2378 .0
2379 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2380 acp_thread::ToolCallUpdateDiff {
2381 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2382 diff,
2383 }
2384 .into(),
2385 )))
2386 .ok();
2387 }
2388
2389 pub fn update_terminal(&self, terminal: Entity<acp_thread::Terminal>) {
2390 self.stream
2391 .0
2392 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2393 acp_thread::ToolCallUpdateTerminal {
2394 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2395 terminal,
2396 }
2397 .into(),
2398 )))
2399 .ok();
2400 }
2401
2402 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2403 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2404 return Task::ready(Ok(()));
2405 }
2406
2407 let (response_tx, response_rx) = oneshot::channel();
2408 self.stream
2409 .0
2410 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2411 ToolCallAuthorization {
2412 tool_call: acp::ToolCallUpdate {
2413 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2414 fields: acp::ToolCallUpdateFields {
2415 title: Some(title.into()),
2416 ..Default::default()
2417 },
2418 },
2419 options: vec![
2420 acp::PermissionOption {
2421 id: acp::PermissionOptionId("always_allow".into()),
2422 name: "Always Allow".into(),
2423 kind: acp::PermissionOptionKind::AllowAlways,
2424 },
2425 acp::PermissionOption {
2426 id: acp::PermissionOptionId("allow".into()),
2427 name: "Allow".into(),
2428 kind: acp::PermissionOptionKind::AllowOnce,
2429 },
2430 acp::PermissionOption {
2431 id: acp::PermissionOptionId("deny".into()),
2432 name: "Deny".into(),
2433 kind: acp::PermissionOptionKind::RejectOnce,
2434 },
2435 ],
2436 response: response_tx,
2437 },
2438 )))
2439 .ok();
2440 let fs = self.fs.clone();
2441 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2442 "always_allow" => {
2443 if let Some(fs) = fs.clone() {
2444 cx.update(|cx| {
2445 update_settings_file::<AgentSettings>(fs, cx, |settings, _| {
2446 settings.set_always_allow_tool_actions(true);
2447 });
2448 })?;
2449 }
2450
2451 Ok(())
2452 }
2453 "allow" => Ok(()),
2454 _ => Err(anyhow!("Permission to run tool denied by user")),
2455 })
2456 }
2457}
2458
2459#[cfg(test)]
2460pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2461
2462#[cfg(test)]
2463impl ToolCallEventStreamReceiver {
2464 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2465 let event = self.0.next().await;
2466 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2467 auth
2468 } else {
2469 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2470 }
2471 }
2472
2473 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2474 let event = self.0.next().await;
2475 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2476 update,
2477 )))) = event
2478 {
2479 update.fields
2480 } else {
2481 panic!("Expected update fields but got: {:?}", event);
2482 }
2483 }
2484
2485 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2486 let event = self.0.next().await;
2487 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2488 update,
2489 )))) = event
2490 {
2491 update.diff
2492 } else {
2493 panic!("Expected diff but got: {:?}", event);
2494 }
2495 }
2496
2497 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2498 let event = self.0.next().await;
2499 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2500 update,
2501 )))) = event
2502 {
2503 update.terminal
2504 } else {
2505 panic!("Expected terminal but got: {:?}", event);
2506 }
2507 }
2508}
2509
2510#[cfg(test)]
2511impl std::ops::Deref for ToolCallEventStreamReceiver {
2512 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2513
2514 fn deref(&self) -> &Self::Target {
2515 &self.0
2516 }
2517}
2518
2519#[cfg(test)]
2520impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2521 fn deref_mut(&mut self) -> &mut Self::Target {
2522 &mut self.0
2523 }
2524}
2525
2526impl From<&str> for UserMessageContent {
2527 fn from(text: &str) -> Self {
2528 Self::Text(text.into())
2529 }
2530}
2531
2532impl From<acp::ContentBlock> for UserMessageContent {
2533 fn from(value: acp::ContentBlock) -> Self {
2534 match value {
2535 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2536 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2537 acp::ContentBlock::Audio(_) => {
2538 // TODO
2539 Self::Text("[audio]".to_string())
2540 }
2541 acp::ContentBlock::ResourceLink(resource_link) => {
2542 match MentionUri::parse(&resource_link.uri) {
2543 Ok(uri) => Self::Mention {
2544 uri,
2545 content: String::new(),
2546 },
2547 Err(err) => {
2548 log::error!("Failed to parse mention link: {}", err);
2549 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2550 }
2551 }
2552 }
2553 acp::ContentBlock::Resource(resource) => match resource.resource {
2554 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2555 match MentionUri::parse(&resource.uri) {
2556 Ok(uri) => Self::Mention {
2557 uri,
2558 content: resource.text,
2559 },
2560 Err(err) => {
2561 log::error!("Failed to parse mention link: {}", err);
2562 Self::Text(
2563 MarkdownCodeBlock {
2564 tag: &resource.uri,
2565 text: &resource.text,
2566 }
2567 .to_string(),
2568 )
2569 }
2570 }
2571 }
2572 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2573 // TODO
2574 Self::Text("[blob]".to_string())
2575 }
2576 },
2577 }
2578 }
2579}
2580
2581impl From<UserMessageContent> for acp::ContentBlock {
2582 fn from(content: UserMessageContent) -> Self {
2583 match content {
2584 UserMessageContent::Text(text) => acp::ContentBlock::Text(acp::TextContent {
2585 text,
2586 annotations: None,
2587 }),
2588 UserMessageContent::Image(image) => acp::ContentBlock::Image(acp::ImageContent {
2589 data: image.source.to_string(),
2590 mime_type: "image/png".to_string(),
2591 annotations: None,
2592 uri: None,
2593 }),
2594 UserMessageContent::Mention { uri, content } => {
2595 acp::ContentBlock::Resource(acp::EmbeddedResource {
2596 resource: acp::EmbeddedResourceResource::TextResourceContents(
2597 acp::TextResourceContents {
2598 mime_type: None,
2599 text: content,
2600 uri: uri.to_uri().to_string(),
2601 },
2602 ),
2603 annotations: None,
2604 })
2605 }
2606 }
2607 }
2608}
2609
2610fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2611 LanguageModelImage {
2612 source: image_content.data.into(),
2613 // TODO: make this optional?
2614 size: gpui::Size::new(0.into(), 0.into()),
2615 }
2616}