1use crate::{
2 ContextServerRegistry, CopyPathTool, CreateDirectoryTool, DbLanguageModel, DbThread,
3 DeletePathTool, DiagnosticsTool, EditFileTool, FetchTool, FindPathTool, GitState, GrepTool,
4 ListDirectoryTool, MovePathTool, NowTool, OpenTool, ProjectSnapshot, ReadFileTool,
5 SystemPromptTemplate, Template, Templates, TerminalTool, ThinkingTool, WebSearchTool,
6 WorktreeSnapshot,
7};
8use acp_thread::{MentionUri, UserMessageId};
9use action_log::ActionLog;
10
11use agent_client_protocol as acp;
12use agent_settings::{
13 AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
14 SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
15};
16use anyhow::{Context as _, Result, anyhow};
17use chrono::{DateTime, Utc};
18use client::{ModelRequestUsage, RequestUsage, UserStore};
19use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, Plan, UsageLimit};
20use collections::{HashMap, HashSet, IndexMap};
21use fs::Fs;
22use futures::stream;
23use futures::{
24 FutureExt,
25 channel::{mpsc, oneshot},
26 future::Shared,
27 stream::FuturesUnordered,
28};
29use git::repository::DiffType;
30use gpui::{
31 App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
32};
33use language_model::{
34 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
35 LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry, LanguageModelRequest,
36 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
37 LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
38 LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage, ZED_CLOUD_PROVIDER_ID,
39};
40use project::{
41 Project,
42 git_store::{GitStore, RepositoryState},
43};
44use prompt_store::ProjectContext;
45use schemars::{JsonSchema, Schema};
46use serde::{Deserialize, Serialize};
47use settings::{Settings, update_settings_file};
48use smol::stream::StreamExt;
49use std::{
50 collections::BTreeMap,
51 ops::RangeInclusive,
52 path::Path,
53 rc::Rc,
54 sync::Arc,
55 time::{Duration, Instant},
56};
57use std::{fmt::Write, path::PathBuf};
58use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock};
59use uuid::Uuid;
60
61const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user";
62pub const MAX_TOOL_NAME_LENGTH: usize = 64;
63
64/// The ID of the user prompt that initiated a request.
65///
66/// This equates to the user physically submitting a message to the model (e.g., by pressing the Enter key).
67#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
68pub struct PromptId(Arc<str>);
69
70impl PromptId {
71 pub fn new() -> Self {
72 Self(Uuid::new_v4().to_string().into())
73 }
74}
75
76impl std::fmt::Display for PromptId {
77 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
78 write!(f, "{}", self.0)
79 }
80}
81
82pub(crate) const MAX_RETRY_ATTEMPTS: u8 = 4;
83pub(crate) const BASE_RETRY_DELAY: Duration = Duration::from_secs(5);
84
85#[derive(Debug, Clone)]
86enum RetryStrategy {
87 ExponentialBackoff {
88 initial_delay: Duration,
89 max_attempts: u8,
90 },
91 Fixed {
92 delay: Duration,
93 max_attempts: u8,
94 },
95}
96
97#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
98pub enum Message {
99 User(UserMessage),
100 Agent(AgentMessage),
101 Resume,
102}
103
104impl Message {
105 pub fn as_agent_message(&self) -> Option<&AgentMessage> {
106 match self {
107 Message::Agent(agent_message) => Some(agent_message),
108 _ => None,
109 }
110 }
111
112 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
113 match self {
114 Message::User(message) => vec![message.to_request()],
115 Message::Agent(message) => message.to_request(),
116 Message::Resume => vec![LanguageModelRequestMessage {
117 role: Role::User,
118 content: vec!["Continue where you left off".into()],
119 cache: false,
120 }],
121 }
122 }
123
124 pub fn to_markdown(&self) -> String {
125 match self {
126 Message::User(message) => message.to_markdown(),
127 Message::Agent(message) => message.to_markdown(),
128 Message::Resume => "[resume]\n".into(),
129 }
130 }
131
132 pub fn role(&self) -> Role {
133 match self {
134 Message::User(_) | Message::Resume => Role::User,
135 Message::Agent(_) => Role::Assistant,
136 }
137 }
138}
139
140#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
141pub struct UserMessage {
142 pub id: UserMessageId,
143 pub content: Vec<UserMessageContent>,
144}
145
146#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
147pub enum UserMessageContent {
148 Text(String),
149 Mention { uri: MentionUri, content: String },
150 Image(LanguageModelImage),
151}
152
153impl UserMessage {
154 pub fn to_markdown(&self) -> String {
155 let mut markdown = String::from("## User\n\n");
156
157 for content in &self.content {
158 match content {
159 UserMessageContent::Text(text) => {
160 markdown.push_str(text);
161 markdown.push('\n');
162 }
163 UserMessageContent::Image(_) => {
164 markdown.push_str("<image />\n");
165 }
166 UserMessageContent::Mention { uri, content } => {
167 if !content.is_empty() {
168 let _ = writeln!(&mut markdown, "{}\n\n{}", uri.as_link(), content);
169 } else {
170 let _ = writeln!(&mut markdown, "{}", uri.as_link());
171 }
172 }
173 }
174 }
175
176 markdown
177 }
178
179 fn to_request(&self) -> LanguageModelRequestMessage {
180 let mut message = LanguageModelRequestMessage {
181 role: Role::User,
182 content: Vec::with_capacity(self.content.len()),
183 cache: false,
184 };
185
186 const OPEN_CONTEXT: &str = "<context>\n\
187 The following items were attached by the user. \
188 They are up-to-date and don't need to be re-read.\n\n";
189
190 const OPEN_FILES_TAG: &str = "<files>";
191 const OPEN_DIRECTORIES_TAG: &str = "<directories>";
192 const OPEN_SYMBOLS_TAG: &str = "<symbols>";
193 const OPEN_SELECTIONS_TAG: &str = "<selections>";
194 const OPEN_THREADS_TAG: &str = "<threads>";
195 const OPEN_FETCH_TAG: &str = "<fetched_urls>";
196 const OPEN_RULES_TAG: &str =
197 "<rules>\nThe user has specified the following rules that should be applied:\n";
198
199 let mut file_context = OPEN_FILES_TAG.to_string();
200 let mut directory_context = OPEN_DIRECTORIES_TAG.to_string();
201 let mut symbol_context = OPEN_SYMBOLS_TAG.to_string();
202 let mut selection_context = OPEN_SELECTIONS_TAG.to_string();
203 let mut thread_context = OPEN_THREADS_TAG.to_string();
204 let mut fetch_context = OPEN_FETCH_TAG.to_string();
205 let mut rules_context = OPEN_RULES_TAG.to_string();
206
207 for chunk in &self.content {
208 let chunk = match chunk {
209 UserMessageContent::Text(text) => {
210 language_model::MessageContent::Text(text.clone())
211 }
212 UserMessageContent::Image(value) => {
213 language_model::MessageContent::Image(value.clone())
214 }
215 UserMessageContent::Mention { uri, content } => {
216 match uri {
217 MentionUri::File { abs_path } => {
218 write!(
219 &mut file_context,
220 "\n{}",
221 MarkdownCodeBlock {
222 tag: &codeblock_tag(abs_path, None),
223 text: &content.to_string(),
224 }
225 )
226 .ok();
227 }
228 MentionUri::PastedImage => {
229 debug_panic!("pasted image URI should not be used in mention content")
230 }
231 MentionUri::Directory { .. } => {
232 write!(&mut directory_context, "\n{}\n", content).ok();
233 }
234 MentionUri::Symbol {
235 abs_path: path,
236 line_range,
237 ..
238 } => {
239 write!(
240 &mut symbol_context,
241 "\n{}",
242 MarkdownCodeBlock {
243 tag: &codeblock_tag(path, Some(line_range)),
244 text: content
245 }
246 )
247 .ok();
248 }
249 MentionUri::Selection {
250 abs_path: path,
251 line_range,
252 ..
253 } => {
254 write!(
255 &mut selection_context,
256 "\n{}",
257 MarkdownCodeBlock {
258 tag: &codeblock_tag(
259 path.as_deref().unwrap_or("Untitled".as_ref()),
260 Some(line_range)
261 ),
262 text: content
263 }
264 )
265 .ok();
266 }
267 MentionUri::Thread { .. } => {
268 write!(&mut thread_context, "\n{}\n", content).ok();
269 }
270 MentionUri::TextThread { .. } => {
271 write!(&mut thread_context, "\n{}\n", content).ok();
272 }
273 MentionUri::Rule { .. } => {
274 write!(
275 &mut rules_context,
276 "\n{}",
277 MarkdownCodeBlock {
278 tag: "",
279 text: content
280 }
281 )
282 .ok();
283 }
284 MentionUri::Fetch { url } => {
285 write!(&mut fetch_context, "\nFetch: {}\n\n{}", url, content).ok();
286 }
287 }
288
289 language_model::MessageContent::Text(uri.as_link().to_string())
290 }
291 };
292
293 message.content.push(chunk);
294 }
295
296 let len_before_context = message.content.len();
297
298 if file_context.len() > OPEN_FILES_TAG.len() {
299 file_context.push_str("</files>\n");
300 message
301 .content
302 .push(language_model::MessageContent::Text(file_context));
303 }
304
305 if directory_context.len() > OPEN_DIRECTORIES_TAG.len() {
306 directory_context.push_str("</directories>\n");
307 message
308 .content
309 .push(language_model::MessageContent::Text(directory_context));
310 }
311
312 if symbol_context.len() > OPEN_SYMBOLS_TAG.len() {
313 symbol_context.push_str("</symbols>\n");
314 message
315 .content
316 .push(language_model::MessageContent::Text(symbol_context));
317 }
318
319 if selection_context.len() > OPEN_SELECTIONS_TAG.len() {
320 selection_context.push_str("</selections>\n");
321 message
322 .content
323 .push(language_model::MessageContent::Text(selection_context));
324 }
325
326 if thread_context.len() > OPEN_THREADS_TAG.len() {
327 thread_context.push_str("</threads>\n");
328 message
329 .content
330 .push(language_model::MessageContent::Text(thread_context));
331 }
332
333 if fetch_context.len() > OPEN_FETCH_TAG.len() {
334 fetch_context.push_str("</fetched_urls>\n");
335 message
336 .content
337 .push(language_model::MessageContent::Text(fetch_context));
338 }
339
340 if rules_context.len() > OPEN_RULES_TAG.len() {
341 rules_context.push_str("</user_rules>\n");
342 message
343 .content
344 .push(language_model::MessageContent::Text(rules_context));
345 }
346
347 if message.content.len() > len_before_context {
348 message.content.insert(
349 len_before_context,
350 language_model::MessageContent::Text(OPEN_CONTEXT.into()),
351 );
352 message
353 .content
354 .push(language_model::MessageContent::Text("</context>".into()));
355 }
356
357 message
358 }
359}
360
361fn codeblock_tag(full_path: &Path, line_range: Option<&RangeInclusive<u32>>) -> String {
362 let mut result = String::new();
363
364 if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
365 let _ = write!(result, "{} ", extension);
366 }
367
368 let _ = write!(result, "{}", full_path.display());
369
370 if let Some(range) = line_range {
371 if range.start() == range.end() {
372 let _ = write!(result, ":{}", range.start() + 1);
373 } else {
374 let _ = write!(result, ":{}-{}", range.start() + 1, range.end() + 1);
375 }
376 }
377
378 result
379}
380
381impl AgentMessage {
382 pub fn to_markdown(&self) -> String {
383 let mut markdown = String::from("## Assistant\n\n");
384
385 for content in &self.content {
386 match content {
387 AgentMessageContent::Text(text) => {
388 markdown.push_str(text);
389 markdown.push('\n');
390 }
391 AgentMessageContent::Thinking { text, .. } => {
392 markdown.push_str("<think>");
393 markdown.push_str(text);
394 markdown.push_str("</think>\n");
395 }
396 AgentMessageContent::RedactedThinking(_) => {
397 markdown.push_str("<redacted_thinking />\n")
398 }
399 AgentMessageContent::ToolUse(tool_use) => {
400 markdown.push_str(&format!(
401 "**Tool Use**: {} (ID: {})\n",
402 tool_use.name, tool_use.id
403 ));
404 markdown.push_str(&format!(
405 "{}\n",
406 MarkdownCodeBlock {
407 tag: "json",
408 text: &format!("{:#}", tool_use.input)
409 }
410 ));
411 }
412 }
413 }
414
415 for tool_result in self.tool_results.values() {
416 markdown.push_str(&format!(
417 "**Tool Result**: {} (ID: {})\n\n",
418 tool_result.tool_name, tool_result.tool_use_id
419 ));
420 if tool_result.is_error {
421 markdown.push_str("**ERROR:**\n");
422 }
423
424 match &tool_result.content {
425 LanguageModelToolResultContent::Text(text) => {
426 writeln!(markdown, "{text}\n").ok();
427 }
428 LanguageModelToolResultContent::Image(_) => {
429 writeln!(markdown, "<image />\n").ok();
430 }
431 }
432
433 if let Some(output) = tool_result.output.as_ref() {
434 writeln!(
435 markdown,
436 "**Debug Output**:\n\n```json\n{}\n```\n",
437 serde_json::to_string_pretty(output).unwrap()
438 )
439 .unwrap();
440 }
441 }
442
443 markdown
444 }
445
446 pub fn to_request(&self) -> Vec<LanguageModelRequestMessage> {
447 let mut assistant_message = LanguageModelRequestMessage {
448 role: Role::Assistant,
449 content: Vec::with_capacity(self.content.len()),
450 cache: false,
451 };
452 for chunk in &self.content {
453 match chunk {
454 AgentMessageContent::Text(text) => {
455 assistant_message
456 .content
457 .push(language_model::MessageContent::Text(text.clone()));
458 }
459 AgentMessageContent::Thinking { text, signature } => {
460 assistant_message
461 .content
462 .push(language_model::MessageContent::Thinking {
463 text: text.clone(),
464 signature: signature.clone(),
465 });
466 }
467 AgentMessageContent::RedactedThinking(value) => {
468 assistant_message.content.push(
469 language_model::MessageContent::RedactedThinking(value.clone()),
470 );
471 }
472 AgentMessageContent::ToolUse(tool_use) => {
473 if self.tool_results.contains_key(&tool_use.id) {
474 assistant_message
475 .content
476 .push(language_model::MessageContent::ToolUse(tool_use.clone()));
477 }
478 }
479 };
480 }
481
482 let mut user_message = LanguageModelRequestMessage {
483 role: Role::User,
484 content: Vec::new(),
485 cache: false,
486 };
487
488 for tool_result in self.tool_results.values() {
489 let mut tool_result = tool_result.clone();
490 // Surprisingly, the API fails if we return an empty string here.
491 // It thinks we are sending a tool use without a tool result.
492 if tool_result.content.is_empty() {
493 tool_result.content = "<Tool returned an empty string>".into();
494 }
495 user_message
496 .content
497 .push(language_model::MessageContent::ToolResult(tool_result));
498 }
499
500 let mut messages = Vec::new();
501 if !assistant_message.content.is_empty() {
502 messages.push(assistant_message);
503 }
504 if !user_message.content.is_empty() {
505 messages.push(user_message);
506 }
507 messages
508 }
509}
510
511#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
512pub struct AgentMessage {
513 pub content: Vec<AgentMessageContent>,
514 pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
515}
516
517#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
518pub enum AgentMessageContent {
519 Text(String),
520 Thinking {
521 text: String,
522 signature: Option<String>,
523 },
524 RedactedThinking(String),
525 ToolUse(LanguageModelToolUse),
526}
527
528pub trait TerminalHandle {
529 fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
530 fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
531 fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
532}
533
534pub trait ThreadEnvironment {
535 fn create_terminal(
536 &self,
537 command: String,
538 cwd: Option<PathBuf>,
539 output_byte_limit: Option<u64>,
540 cx: &mut AsyncApp,
541 ) -> Task<Result<Rc<dyn TerminalHandle>>>;
542}
543
544#[derive(Debug)]
545pub enum ThreadEvent {
546 UserMessage(UserMessage),
547 AgentText(String),
548 AgentThinking(String),
549 ToolCall(acp::ToolCall),
550 ToolCallUpdate(acp_thread::ToolCallUpdate),
551 ToolCallAuthorization(ToolCallAuthorization),
552 Retry(acp_thread::RetryStatus),
553 Stop(acp::StopReason),
554}
555
556#[derive(Debug)]
557pub struct NewTerminal {
558 pub command: String,
559 pub output_byte_limit: Option<u64>,
560 pub cwd: Option<PathBuf>,
561 pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
562}
563
564#[derive(Debug)]
565pub struct ToolCallAuthorization {
566 pub tool_call: acp::ToolCallUpdate,
567 pub options: Vec<acp::PermissionOption>,
568 pub response: oneshot::Sender<acp::PermissionOptionId>,
569}
570
571#[derive(Debug, thiserror::Error)]
572enum CompletionError {
573 #[error("max tokens")]
574 MaxTokens,
575 #[error("refusal")]
576 Refusal,
577 #[error(transparent)]
578 Other(#[from] anyhow::Error),
579}
580
581pub struct Thread {
582 id: acp::SessionId,
583 prompt_id: PromptId,
584 updated_at: DateTime<Utc>,
585 title: Option<SharedString>,
586 pending_title_generation: Option<Task<()>>,
587 pending_summary_generation: Option<Shared<Task<Option<SharedString>>>>,
588 summary: Option<SharedString>,
589 messages: Vec<Message>,
590 user_store: Entity<UserStore>,
591 completion_mode: CompletionMode,
592 /// Holds the task that handles agent interaction until the end of the turn.
593 /// Survives across multiple requests as the model performs tool calls and
594 /// we run tools, report their results.
595 running_turn: Option<RunningTurn>,
596 pending_message: Option<AgentMessage>,
597 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
598 tool_use_limit_reached: bool,
599 request_token_usage: HashMap<UserMessageId, language_model::TokenUsage>,
600 #[allow(unused)]
601 cumulative_token_usage: TokenUsage,
602 #[allow(unused)]
603 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
604 context_server_registry: Entity<ContextServerRegistry>,
605 profile_id: AgentProfileId,
606 project_context: Entity<ProjectContext>,
607 templates: Arc<Templates>,
608 model: Option<Arc<dyn LanguageModel>>,
609 summarization_model: Option<Arc<dyn LanguageModel>>,
610 prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
611 pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
612 pub(crate) project: Entity<Project>,
613 pub(crate) action_log: Entity<ActionLog>,
614}
615
616impl Thread {
617 fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
618 let image = model.map_or(true, |model| model.supports_images());
619 acp::PromptCapabilities {
620 meta: None,
621 image,
622 audio: false,
623 embedded_context: true,
624 }
625 }
626
627 pub fn new(
628 project: Entity<Project>,
629 project_context: Entity<ProjectContext>,
630 context_server_registry: Entity<ContextServerRegistry>,
631 templates: Arc<Templates>,
632 model: Option<Arc<dyn LanguageModel>>,
633 cx: &mut Context<Self>,
634 ) -> Self {
635 let profile_id = AgentSettings::get_global(cx).default_profile.clone();
636 let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
637 let (prompt_capabilities_tx, prompt_capabilities_rx) =
638 watch::channel(Self::prompt_capabilities(model.as_deref()));
639 Self {
640 id: acp::SessionId(uuid::Uuid::new_v4().to_string().into()),
641 prompt_id: PromptId::new(),
642 updated_at: Utc::now(),
643 title: None,
644 pending_title_generation: None,
645 pending_summary_generation: None,
646 summary: None,
647 messages: Vec::new(),
648 user_store: project.read(cx).user_store(),
649 completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
650 running_turn: None,
651 pending_message: None,
652 tools: BTreeMap::default(),
653 tool_use_limit_reached: false,
654 request_token_usage: HashMap::default(),
655 cumulative_token_usage: TokenUsage::default(),
656 initial_project_snapshot: {
657 let project_snapshot = Self::project_snapshot(project.clone(), cx);
658 cx.foreground_executor()
659 .spawn(async move { Some(project_snapshot.await) })
660 .shared()
661 },
662 context_server_registry,
663 profile_id,
664 project_context,
665 templates,
666 model,
667 summarization_model: None,
668 prompt_capabilities_tx,
669 prompt_capabilities_rx,
670 project,
671 action_log,
672 }
673 }
674
675 pub fn id(&self) -> &acp::SessionId {
676 &self.id
677 }
678
679 pub fn replay(
680 &mut self,
681 cx: &mut Context<Self>,
682 ) -> mpsc::UnboundedReceiver<Result<ThreadEvent>> {
683 let (tx, rx) = mpsc::unbounded();
684 let stream = ThreadEventStream(tx);
685 for message in &self.messages {
686 match message {
687 Message::User(user_message) => stream.send_user_message(user_message),
688 Message::Agent(assistant_message) => {
689 for content in &assistant_message.content {
690 match content {
691 AgentMessageContent::Text(text) => stream.send_text(text),
692 AgentMessageContent::Thinking { text, .. } => {
693 stream.send_thinking(text)
694 }
695 AgentMessageContent::RedactedThinking(_) => {}
696 AgentMessageContent::ToolUse(tool_use) => {
697 self.replay_tool_call(
698 tool_use,
699 assistant_message.tool_results.get(&tool_use.id),
700 &stream,
701 cx,
702 );
703 }
704 }
705 }
706 }
707 Message::Resume => {}
708 }
709 }
710 rx
711 }
712
713 fn replay_tool_call(
714 &self,
715 tool_use: &LanguageModelToolUse,
716 tool_result: Option<&LanguageModelToolResult>,
717 stream: &ThreadEventStream,
718 cx: &mut Context<Self>,
719 ) {
720 let tool = self.tools.get(tool_use.name.as_ref()).cloned().or_else(|| {
721 self.context_server_registry
722 .read(cx)
723 .servers()
724 .find_map(|(_, tools)| {
725 if let Some(tool) = tools.get(tool_use.name.as_ref()) {
726 Some(tool.clone())
727 } else {
728 None
729 }
730 })
731 });
732
733 let Some(tool) = tool else {
734 stream
735 .0
736 .unbounded_send(Ok(ThreadEvent::ToolCall(acp::ToolCall {
737 meta: None,
738 id: acp::ToolCallId(tool_use.id.to_string().into()),
739 title: tool_use.name.to_string(),
740 kind: acp::ToolKind::Other,
741 status: acp::ToolCallStatus::Failed,
742 content: Vec::new(),
743 locations: Vec::new(),
744 raw_input: Some(tool_use.input.clone()),
745 raw_output: None,
746 })))
747 .ok();
748 return;
749 };
750
751 let title = tool.initial_title(tool_use.input.clone(), cx);
752 let kind = tool.kind();
753 stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
754
755 let output = tool_result
756 .as_ref()
757 .and_then(|result| result.output.clone());
758 if let Some(output) = output.clone() {
759 let tool_event_stream = ToolCallEventStream::new(
760 tool_use.id.clone(),
761 stream.clone(),
762 Some(self.project.read(cx).fs().clone()),
763 );
764 tool.replay(tool_use.input.clone(), output, tool_event_stream, cx)
765 .log_err();
766 }
767
768 stream.update_tool_call_fields(
769 &tool_use.id,
770 acp::ToolCallUpdateFields {
771 status: Some(
772 tool_result
773 .as_ref()
774 .map_or(acp::ToolCallStatus::Failed, |result| {
775 if result.is_error {
776 acp::ToolCallStatus::Failed
777 } else {
778 acp::ToolCallStatus::Completed
779 }
780 }),
781 ),
782 raw_output: output,
783 ..Default::default()
784 },
785 );
786 }
787
788 pub fn from_db(
789 id: acp::SessionId,
790 db_thread: DbThread,
791 project: Entity<Project>,
792 project_context: Entity<ProjectContext>,
793 context_server_registry: Entity<ContextServerRegistry>,
794 templates: Arc<Templates>,
795 cx: &mut Context<Self>,
796 ) -> Self {
797 let profile_id = db_thread
798 .profile
799 .unwrap_or_else(|| AgentSettings::get_global(cx).default_profile.clone());
800 let model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
801 db_thread
802 .model
803 .and_then(|model| {
804 let model = SelectedModel {
805 provider: model.provider.clone().into(),
806 model: model.model.into(),
807 };
808 registry.select_model(&model, cx)
809 })
810 .or_else(|| registry.default_model())
811 .map(|model| model.model)
812 });
813 let (prompt_capabilities_tx, prompt_capabilities_rx) =
814 watch::channel(Self::prompt_capabilities(model.as_deref()));
815
816 let action_log = cx.new(|_| ActionLog::new(project.clone()));
817
818 Self {
819 id,
820 prompt_id: PromptId::new(),
821 title: if db_thread.title.is_empty() {
822 None
823 } else {
824 Some(db_thread.title.clone())
825 },
826 pending_title_generation: None,
827 pending_summary_generation: None,
828 summary: db_thread.detailed_summary,
829 messages: db_thread.messages,
830 user_store: project.read(cx).user_store(),
831 completion_mode: db_thread.completion_mode.unwrap_or_default(),
832 running_turn: None,
833 pending_message: None,
834 tools: BTreeMap::default(),
835 tool_use_limit_reached: false,
836 request_token_usage: db_thread.request_token_usage.clone(),
837 cumulative_token_usage: db_thread.cumulative_token_usage,
838 initial_project_snapshot: Task::ready(db_thread.initial_project_snapshot).shared(),
839 context_server_registry,
840 profile_id,
841 project_context,
842 templates,
843 model,
844 summarization_model: None,
845 project,
846 action_log,
847 updated_at: db_thread.updated_at,
848 prompt_capabilities_tx,
849 prompt_capabilities_rx,
850 }
851 }
852
853 pub fn to_db(&self, cx: &App) -> Task<DbThread> {
854 let initial_project_snapshot = self.initial_project_snapshot.clone();
855 let mut thread = DbThread {
856 title: self.title(),
857 messages: self.messages.clone(),
858 updated_at: self.updated_at,
859 detailed_summary: self.summary.clone(),
860 initial_project_snapshot: None,
861 cumulative_token_usage: self.cumulative_token_usage,
862 request_token_usage: self.request_token_usage.clone(),
863 model: self.model.as_ref().map(|model| DbLanguageModel {
864 provider: model.provider_id().to_string(),
865 model: model.name().0.to_string(),
866 }),
867 completion_mode: Some(self.completion_mode),
868 profile: Some(self.profile_id.clone()),
869 };
870
871 cx.background_spawn(async move {
872 let initial_project_snapshot = initial_project_snapshot.await;
873 thread.initial_project_snapshot = initial_project_snapshot;
874 thread
875 })
876 }
877
878 /// Create a snapshot of the current project state including git information and unsaved buffers.
879 fn project_snapshot(
880 project: Entity<Project>,
881 cx: &mut Context<Self>,
882 ) -> Task<Arc<ProjectSnapshot>> {
883 let git_store = project.read(cx).git_store().clone();
884 let worktree_snapshots: Vec<_> = project
885 .read(cx)
886 .visible_worktrees(cx)
887 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
888 .collect();
889
890 cx.spawn(async move |_, _| {
891 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
892
893 Arc::new(ProjectSnapshot {
894 worktree_snapshots,
895 timestamp: Utc::now(),
896 })
897 })
898 }
899
900 fn worktree_snapshot(
901 worktree: Entity<project::Worktree>,
902 git_store: Entity<GitStore>,
903 cx: &App,
904 ) -> Task<WorktreeSnapshot> {
905 cx.spawn(async move |cx| {
906 // Get worktree path and snapshot
907 let worktree_info = cx.update(|app_cx| {
908 let worktree = worktree.read(app_cx);
909 let path = worktree.abs_path().to_string_lossy().into_owned();
910 let snapshot = worktree.snapshot();
911 (path, snapshot)
912 });
913
914 let Ok((worktree_path, _snapshot)) = worktree_info else {
915 return WorktreeSnapshot {
916 worktree_path: String::new(),
917 git_state: None,
918 };
919 };
920
921 let git_state = git_store
922 .update(cx, |git_store, cx| {
923 git_store
924 .repositories()
925 .values()
926 .find(|repo| {
927 repo.read(cx)
928 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
929 .is_some()
930 })
931 .cloned()
932 })
933 .ok()
934 .flatten()
935 .map(|repo| {
936 repo.update(cx, |repo, _| {
937 let current_branch =
938 repo.branch.as_ref().map(|branch| branch.name().to_owned());
939 repo.send_job(None, |state, _| async move {
940 let RepositoryState::Local { backend, .. } = state else {
941 return GitState {
942 remote_url: None,
943 head_sha: None,
944 current_branch,
945 diff: None,
946 };
947 };
948
949 let remote_url = backend.remote_url("origin");
950 let head_sha = backend.head_sha().await;
951 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
952
953 GitState {
954 remote_url,
955 head_sha,
956 current_branch,
957 diff,
958 }
959 })
960 })
961 });
962
963 let git_state = match git_state {
964 Some(git_state) => match git_state.ok() {
965 Some(git_state) => git_state.await.ok(),
966 None => None,
967 },
968 None => None,
969 };
970
971 WorktreeSnapshot {
972 worktree_path,
973 git_state,
974 }
975 })
976 }
977
978 pub fn project_context(&self) -> &Entity<ProjectContext> {
979 &self.project_context
980 }
981
982 pub fn project(&self) -> &Entity<Project> {
983 &self.project
984 }
985
986 pub fn action_log(&self) -> &Entity<ActionLog> {
987 &self.action_log
988 }
989
990 pub fn is_empty(&self) -> bool {
991 self.messages.is_empty() && self.title.is_none()
992 }
993
994 pub fn model(&self) -> Option<&Arc<dyn LanguageModel>> {
995 self.model.as_ref()
996 }
997
998 pub fn set_model(&mut self, model: Arc<dyn LanguageModel>, cx: &mut Context<Self>) {
999 let old_usage = self.latest_token_usage();
1000 self.model = Some(model);
1001 let new_caps = Self::prompt_capabilities(self.model.as_deref());
1002 let new_usage = self.latest_token_usage();
1003 if old_usage != new_usage {
1004 cx.emit(TokenUsageUpdated(new_usage));
1005 }
1006 self.prompt_capabilities_tx.send(new_caps).log_err();
1007 cx.notify()
1008 }
1009
1010 pub fn summarization_model(&self) -> Option<&Arc<dyn LanguageModel>> {
1011 self.summarization_model.as_ref()
1012 }
1013
1014 pub fn set_summarization_model(
1015 &mut self,
1016 model: Option<Arc<dyn LanguageModel>>,
1017 cx: &mut Context<Self>,
1018 ) {
1019 self.summarization_model = model;
1020 cx.notify()
1021 }
1022
1023 pub fn completion_mode(&self) -> CompletionMode {
1024 self.completion_mode
1025 }
1026
1027 pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
1028 let old_usage = self.latest_token_usage();
1029 self.completion_mode = mode;
1030 let new_usage = self.latest_token_usage();
1031 if old_usage != new_usage {
1032 cx.emit(TokenUsageUpdated(new_usage));
1033 }
1034 cx.notify()
1035 }
1036
1037 #[cfg(any(test, feature = "test-support"))]
1038 pub fn last_message(&self) -> Option<Message> {
1039 if let Some(message) = self.pending_message.clone() {
1040 Some(Message::Agent(message))
1041 } else {
1042 self.messages.last().cloned()
1043 }
1044 }
1045
1046 pub fn add_default_tools(
1047 &mut self,
1048 environment: Rc<dyn ThreadEnvironment>,
1049 cx: &mut Context<Self>,
1050 ) {
1051 let language_registry = self.project.read(cx).languages().clone();
1052 self.add_tool(CopyPathTool::new(self.project.clone()));
1053 self.add_tool(CreateDirectoryTool::new(self.project.clone()));
1054 self.add_tool(DeletePathTool::new(
1055 self.project.clone(),
1056 self.action_log.clone(),
1057 ));
1058 self.add_tool(DiagnosticsTool::new(self.project.clone()));
1059 self.add_tool(EditFileTool::new(
1060 self.project.clone(),
1061 cx.weak_entity(),
1062 language_registry,
1063 Templates::new(),
1064 ));
1065 self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
1066 self.add_tool(FindPathTool::new(self.project.clone()));
1067 self.add_tool(GrepTool::new(self.project.clone()));
1068 self.add_tool(ListDirectoryTool::new(self.project.clone()));
1069 self.add_tool(MovePathTool::new(self.project.clone()));
1070 self.add_tool(NowTool);
1071 self.add_tool(OpenTool::new(self.project.clone()));
1072 self.add_tool(ReadFileTool::new(
1073 self.project.clone(),
1074 self.action_log.clone(),
1075 ));
1076 self.add_tool(TerminalTool::new(self.project.clone(), environment));
1077 self.add_tool(ThinkingTool);
1078 self.add_tool(WebSearchTool);
1079 }
1080
1081 pub fn add_tool<T: AgentTool>(&mut self, tool: T) {
1082 self.tools.insert(T::name().into(), tool.erase());
1083 }
1084
1085 pub fn remove_tool(&mut self, name: &str) -> bool {
1086 self.tools.remove(name).is_some()
1087 }
1088
1089 pub fn profile(&self) -> &AgentProfileId {
1090 &self.profile_id
1091 }
1092
1093 pub fn set_profile(&mut self, profile_id: AgentProfileId) {
1094 self.profile_id = profile_id;
1095 }
1096
1097 pub fn cancel(&mut self, cx: &mut Context<Self>) {
1098 if let Some(running_turn) = self.running_turn.take() {
1099 running_turn.cancel();
1100 }
1101 self.flush_pending_message(cx);
1102 }
1103
1104 fn update_token_usage(&mut self, update: language_model::TokenUsage, cx: &mut Context<Self>) {
1105 let Some(last_user_message) = self.last_user_message() else {
1106 return;
1107 };
1108
1109 self.request_token_usage
1110 .insert(last_user_message.id.clone(), update);
1111 cx.emit(TokenUsageUpdated(self.latest_token_usage()));
1112 cx.notify();
1113 }
1114
1115 pub fn truncate(&mut self, message_id: UserMessageId, cx: &mut Context<Self>) -> Result<()> {
1116 self.cancel(cx);
1117 let Some(position) = self.messages.iter().position(
1118 |msg| matches!(msg, Message::User(UserMessage { id, .. }) if id == &message_id),
1119 ) else {
1120 return Err(anyhow!("Message not found"));
1121 };
1122
1123 for message in self.messages.drain(position..) {
1124 match message {
1125 Message::User(message) => {
1126 self.request_token_usage.remove(&message.id);
1127 }
1128 Message::Agent(_) | Message::Resume => {}
1129 }
1130 }
1131 self.clear_summary();
1132 cx.notify();
1133 Ok(())
1134 }
1135
1136 pub fn latest_token_usage(&self) -> Option<acp_thread::TokenUsage> {
1137 let last_user_message = self.last_user_message()?;
1138 let tokens = self.request_token_usage.get(&last_user_message.id)?;
1139 let model = self.model.clone()?;
1140
1141 Some(acp_thread::TokenUsage {
1142 max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
1143 used_tokens: tokens.total_tokens(),
1144 })
1145 }
1146
1147 pub fn resume(
1148 &mut self,
1149 cx: &mut Context<Self>,
1150 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1151 self.messages.push(Message::Resume);
1152 cx.notify();
1153
1154 log::debug!("Total messages in thread: {}", self.messages.len());
1155 self.run_turn(cx)
1156 }
1157
1158 /// Sending a message results in the model streaming a response, which could include tool calls.
1159 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
1160 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
1161 pub fn send<T>(
1162 &mut self,
1163 id: UserMessageId,
1164 content: impl IntoIterator<Item = T>,
1165 cx: &mut Context<Self>,
1166 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>>
1167 where
1168 T: Into<UserMessageContent>,
1169 {
1170 let model = self.model().context("No language model configured")?;
1171
1172 log::info!("Thread::send called with model: {}", model.name().0);
1173 self.advance_prompt_id();
1174
1175 let content = content.into_iter().map(Into::into).collect::<Vec<_>>();
1176 log::debug!("Thread::send content: {:?}", content);
1177
1178 self.messages
1179 .push(Message::User(UserMessage { id, content }));
1180 cx.notify();
1181
1182 log::debug!("Total messages in thread: {}", self.messages.len());
1183 self.run_turn(cx)
1184 }
1185
1186 fn run_turn(
1187 &mut self,
1188 cx: &mut Context<Self>,
1189 ) -> Result<mpsc::UnboundedReceiver<Result<ThreadEvent>>> {
1190 self.cancel(cx);
1191
1192 let model = self.model.clone().context("No language model configured")?;
1193 let profile = AgentSettings::get_global(cx)
1194 .profiles
1195 .get(&self.profile_id)
1196 .context("Profile not found")?;
1197 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
1198 let event_stream = ThreadEventStream(events_tx);
1199 let message_ix = self.messages.len().saturating_sub(1);
1200 self.tool_use_limit_reached = false;
1201 self.clear_summary();
1202 self.running_turn = Some(RunningTurn {
1203 event_stream: event_stream.clone(),
1204 tools: self.enabled_tools(profile, &model, cx),
1205 _task: cx.spawn(async move |this, cx| {
1206 log::debug!("Starting agent turn execution");
1207
1208 let turn_result = Self::run_turn_internal(&this, model, &event_stream, cx).await;
1209 _ = this.update(cx, |this, cx| this.flush_pending_message(cx));
1210
1211 match turn_result {
1212 Ok(()) => {
1213 log::debug!("Turn execution completed");
1214 event_stream.send_stop(acp::StopReason::EndTurn);
1215 }
1216 Err(error) => {
1217 log::error!("Turn execution failed: {:?}", error);
1218 match error.downcast::<CompletionError>() {
1219 Ok(CompletionError::Refusal) => {
1220 event_stream.send_stop(acp::StopReason::Refusal);
1221 _ = this.update(cx, |this, _| this.messages.truncate(message_ix));
1222 }
1223 Ok(CompletionError::MaxTokens) => {
1224 event_stream.send_stop(acp::StopReason::MaxTokens);
1225 }
1226 Ok(CompletionError::Other(error)) | Err(error) => {
1227 event_stream.send_error(error);
1228 }
1229 }
1230 }
1231 }
1232
1233 _ = this.update(cx, |this, _| this.running_turn.take());
1234 }),
1235 });
1236 Ok(events_rx)
1237 }
1238
1239 async fn run_turn_internal(
1240 this: &WeakEntity<Self>,
1241 model: Arc<dyn LanguageModel>,
1242 event_stream: &ThreadEventStream,
1243 cx: &mut AsyncApp,
1244 ) -> Result<()> {
1245 let mut attempt = 0;
1246 let mut intent = CompletionIntent::UserPrompt;
1247 loop {
1248 let request =
1249 this.update(cx, |this, cx| this.build_completion_request(intent, cx))??;
1250
1251 telemetry::event!(
1252 "Agent Thread Completion",
1253 thread_id = this.read_with(cx, |this, _| this.id.to_string())?,
1254 prompt_id = this.read_with(cx, |this, _| this.prompt_id.to_string())?,
1255 model = model.telemetry_id(),
1256 model_provider = model.provider_id().to_string(),
1257 attempt
1258 );
1259
1260 log::debug!("Calling model.stream_completion, attempt {}", attempt);
1261
1262 let (mut events, mut error) = match model.stream_completion(request, cx).await {
1263 Ok(events) => (events, None),
1264 Err(err) => (stream::empty().boxed(), Some(err)),
1265 };
1266 let mut tool_results = FuturesUnordered::new();
1267 while let Some(event) = events.next().await {
1268 log::trace!("Received completion event: {:?}", event);
1269 match event {
1270 Ok(event) => {
1271 tool_results.extend(this.update(cx, |this, cx| {
1272 this.handle_completion_event(event, event_stream, cx)
1273 })??);
1274 }
1275 Err(err) => {
1276 error = Some(err);
1277 break;
1278 }
1279 }
1280 }
1281
1282 let end_turn = tool_results.is_empty();
1283 while let Some(tool_result) = tool_results.next().await {
1284 log::debug!("Tool finished {:?}", tool_result);
1285
1286 event_stream.update_tool_call_fields(
1287 &tool_result.tool_use_id,
1288 acp::ToolCallUpdateFields {
1289 status: Some(if tool_result.is_error {
1290 acp::ToolCallStatus::Failed
1291 } else {
1292 acp::ToolCallStatus::Completed
1293 }),
1294 raw_output: tool_result.output.clone(),
1295 ..Default::default()
1296 },
1297 );
1298 this.update(cx, |this, _cx| {
1299 this.pending_message()
1300 .tool_results
1301 .insert(tool_result.tool_use_id.clone(), tool_result);
1302 })?;
1303 }
1304
1305 this.update(cx, |this, cx| {
1306 this.flush_pending_message(cx);
1307 if this.title.is_none() && this.pending_title_generation.is_none() {
1308 this.generate_title(cx);
1309 }
1310 })?;
1311
1312 if let Some(error) = error {
1313 attempt += 1;
1314 let retry = this.update(cx, |this, cx| {
1315 let user_store = this.user_store.read(cx);
1316 this.handle_completion_error(error, attempt, user_store.plan())
1317 })??;
1318 let timer = cx.background_executor().timer(retry.duration);
1319 event_stream.send_retry(retry);
1320 timer.await;
1321 this.update(cx, |this, _cx| {
1322 if let Some(Message::Agent(message)) = this.messages.last() {
1323 if message.tool_results.is_empty() {
1324 intent = CompletionIntent::UserPrompt;
1325 this.messages.push(Message::Resume);
1326 }
1327 }
1328 })?;
1329 } else if this.read_with(cx, |this, _| this.tool_use_limit_reached)? {
1330 return Err(language_model::ToolUseLimitReachedError.into());
1331 } else if end_turn {
1332 return Ok(());
1333 } else {
1334 intent = CompletionIntent::ToolResults;
1335 attempt = 0;
1336 }
1337 }
1338 }
1339
1340 fn handle_completion_error(
1341 &mut self,
1342 error: LanguageModelCompletionError,
1343 attempt: u8,
1344 plan: Option<Plan>,
1345 ) -> Result<acp_thread::RetryStatus> {
1346 let Some(model) = self.model.as_ref() else {
1347 return Err(anyhow!(error));
1348 };
1349
1350 let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
1351 match plan {
1352 Some(Plan::V2(_)) => true,
1353 Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
1354 None => false,
1355 }
1356 } else {
1357 true
1358 };
1359
1360 if !auto_retry {
1361 return Err(anyhow!(error));
1362 }
1363
1364 let Some(strategy) = Self::retry_strategy_for(&error) else {
1365 return Err(anyhow!(error));
1366 };
1367
1368 let max_attempts = match &strategy {
1369 RetryStrategy::ExponentialBackoff { max_attempts, .. } => *max_attempts,
1370 RetryStrategy::Fixed { max_attempts, .. } => *max_attempts,
1371 };
1372
1373 if attempt > max_attempts {
1374 return Err(anyhow!(error));
1375 }
1376
1377 let delay = match &strategy {
1378 RetryStrategy::ExponentialBackoff { initial_delay, .. } => {
1379 let delay_secs = initial_delay.as_secs() * 2u64.pow((attempt - 1) as u32);
1380 Duration::from_secs(delay_secs)
1381 }
1382 RetryStrategy::Fixed { delay, .. } => *delay,
1383 };
1384 log::debug!("Retry attempt {attempt} with delay {delay:?}");
1385
1386 Ok(acp_thread::RetryStatus {
1387 last_error: error.to_string().into(),
1388 attempt: attempt as usize,
1389 max_attempts: max_attempts as usize,
1390 started_at: Instant::now(),
1391 duration: delay,
1392 })
1393 }
1394
1395 /// A helper method that's called on every streamed completion event.
1396 /// Returns an optional tool result task, which the main agentic loop will
1397 /// send back to the model when it resolves.
1398 fn handle_completion_event(
1399 &mut self,
1400 event: LanguageModelCompletionEvent,
1401 event_stream: &ThreadEventStream,
1402 cx: &mut Context<Self>,
1403 ) -> Result<Option<Task<LanguageModelToolResult>>> {
1404 log::trace!("Handling streamed completion event: {:?}", event);
1405 use LanguageModelCompletionEvent::*;
1406
1407 match event {
1408 StartMessage { .. } => {
1409 self.flush_pending_message(cx);
1410 self.pending_message = Some(AgentMessage::default());
1411 }
1412 Text(new_text) => self.handle_text_event(new_text, event_stream, cx),
1413 Thinking { text, signature } => {
1414 self.handle_thinking_event(text, signature, event_stream, cx)
1415 }
1416 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
1417 ToolUse(tool_use) => {
1418 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
1419 }
1420 ToolUseJsonParseError {
1421 id,
1422 tool_name,
1423 raw_input,
1424 json_parse_error,
1425 } => {
1426 return Ok(Some(Task::ready(
1427 self.handle_tool_use_json_parse_error_event(
1428 id,
1429 tool_name,
1430 raw_input,
1431 json_parse_error,
1432 ),
1433 )));
1434 }
1435 UsageUpdate(usage) => {
1436 telemetry::event!(
1437 "Agent Thread Completion Usage Updated",
1438 thread_id = self.id.to_string(),
1439 prompt_id = self.prompt_id.to_string(),
1440 model = self.model.as_ref().map(|m| m.telemetry_id()),
1441 model_provider = self.model.as_ref().map(|m| m.provider_id().to_string()),
1442 input_tokens = usage.input_tokens,
1443 output_tokens = usage.output_tokens,
1444 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1445 cache_read_input_tokens = usage.cache_read_input_tokens,
1446 );
1447 self.update_token_usage(usage, cx);
1448 }
1449 StatusUpdate(CompletionRequestStatus::UsageUpdated { amount, limit }) => {
1450 self.update_model_request_usage(amount, limit, cx);
1451 }
1452 StatusUpdate(
1453 CompletionRequestStatus::Started
1454 | CompletionRequestStatus::Queued { .. }
1455 | CompletionRequestStatus::Failed { .. },
1456 ) => {}
1457 StatusUpdate(CompletionRequestStatus::ToolUseLimitReached) => {
1458 self.tool_use_limit_reached = true;
1459 }
1460 Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()),
1461 Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()),
1462 Stop(StopReason::ToolUse | StopReason::EndTurn) => {}
1463 }
1464
1465 Ok(None)
1466 }
1467
1468 fn handle_text_event(
1469 &mut self,
1470 new_text: String,
1471 event_stream: &ThreadEventStream,
1472 cx: &mut Context<Self>,
1473 ) {
1474 event_stream.send_text(&new_text);
1475
1476 let last_message = self.pending_message();
1477 if let Some(AgentMessageContent::Text(text)) = last_message.content.last_mut() {
1478 text.push_str(&new_text);
1479 } else {
1480 last_message
1481 .content
1482 .push(AgentMessageContent::Text(new_text));
1483 }
1484
1485 cx.notify();
1486 }
1487
1488 fn handle_thinking_event(
1489 &mut self,
1490 new_text: String,
1491 new_signature: Option<String>,
1492 event_stream: &ThreadEventStream,
1493 cx: &mut Context<Self>,
1494 ) {
1495 event_stream.send_thinking(&new_text);
1496
1497 let last_message = self.pending_message();
1498 if let Some(AgentMessageContent::Thinking { text, signature }) =
1499 last_message.content.last_mut()
1500 {
1501 text.push_str(&new_text);
1502 *signature = new_signature.or(signature.take());
1503 } else {
1504 last_message.content.push(AgentMessageContent::Thinking {
1505 text: new_text,
1506 signature: new_signature,
1507 });
1508 }
1509
1510 cx.notify();
1511 }
1512
1513 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
1514 let last_message = self.pending_message();
1515 last_message
1516 .content
1517 .push(AgentMessageContent::RedactedThinking(data));
1518 cx.notify();
1519 }
1520
1521 fn handle_tool_use_event(
1522 &mut self,
1523 tool_use: LanguageModelToolUse,
1524 event_stream: &ThreadEventStream,
1525 cx: &mut Context<Self>,
1526 ) -> Option<Task<LanguageModelToolResult>> {
1527 cx.notify();
1528
1529 let tool = self.tool(tool_use.name.as_ref());
1530 let mut title = SharedString::from(&tool_use.name);
1531 let mut kind = acp::ToolKind::Other;
1532 if let Some(tool) = tool.as_ref() {
1533 title = tool.initial_title(tool_use.input.clone(), cx);
1534 kind = tool.kind();
1535 }
1536
1537 // Ensure the last message ends in the current tool use
1538 let last_message = self.pending_message();
1539 let push_new_tool_use = last_message.content.last_mut().is_none_or(|content| {
1540 if let AgentMessageContent::ToolUse(last_tool_use) = content {
1541 if last_tool_use.id == tool_use.id {
1542 *last_tool_use = tool_use.clone();
1543 false
1544 } else {
1545 true
1546 }
1547 } else {
1548 true
1549 }
1550 });
1551
1552 if push_new_tool_use {
1553 event_stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
1554 last_message
1555 .content
1556 .push(AgentMessageContent::ToolUse(tool_use.clone()));
1557 } else {
1558 event_stream.update_tool_call_fields(
1559 &tool_use.id,
1560 acp::ToolCallUpdateFields {
1561 title: Some(title.into()),
1562 kind: Some(kind),
1563 raw_input: Some(tool_use.input.clone()),
1564 ..Default::default()
1565 },
1566 );
1567 }
1568
1569 if !tool_use.is_input_complete {
1570 return None;
1571 }
1572
1573 let Some(tool) = tool else {
1574 let content = format!("No tool named {} exists", tool_use.name);
1575 return Some(Task::ready(LanguageModelToolResult {
1576 content: LanguageModelToolResultContent::Text(Arc::from(content)),
1577 tool_use_id: tool_use.id,
1578 tool_name: tool_use.name,
1579 is_error: true,
1580 output: None,
1581 }));
1582 };
1583
1584 let fs = self.project.read(cx).fs().clone();
1585 let tool_event_stream =
1586 ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
1587 tool_event_stream.update_fields(acp::ToolCallUpdateFields {
1588 status: Some(acp::ToolCallStatus::InProgress),
1589 ..Default::default()
1590 });
1591 let supports_images = self.model().is_some_and(|model| model.supports_images());
1592 let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
1593 log::debug!("Running tool {}", tool_use.name);
1594 Some(cx.foreground_executor().spawn(async move {
1595 let tool_result = tool_result.await.and_then(|output| {
1596 if let LanguageModelToolResultContent::Image(_) = &output.llm_output
1597 && !supports_images
1598 {
1599 return Err(anyhow!(
1600 "Attempted to read an image, but this model doesn't support it.",
1601 ));
1602 }
1603 Ok(output)
1604 });
1605
1606 match tool_result {
1607 Ok(output) => LanguageModelToolResult {
1608 tool_use_id: tool_use.id,
1609 tool_name: tool_use.name,
1610 is_error: false,
1611 content: output.llm_output,
1612 output: Some(output.raw_output),
1613 },
1614 Err(error) => LanguageModelToolResult {
1615 tool_use_id: tool_use.id,
1616 tool_name: tool_use.name,
1617 is_error: true,
1618 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
1619 output: Some(error.to_string().into()),
1620 },
1621 }
1622 }))
1623 }
1624
1625 fn handle_tool_use_json_parse_error_event(
1626 &mut self,
1627 tool_use_id: LanguageModelToolUseId,
1628 tool_name: Arc<str>,
1629 raw_input: Arc<str>,
1630 json_parse_error: String,
1631 ) -> LanguageModelToolResult {
1632 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
1633 LanguageModelToolResult {
1634 tool_use_id,
1635 tool_name,
1636 is_error: true,
1637 content: LanguageModelToolResultContent::Text(tool_output.into()),
1638 output: Some(serde_json::Value::String(raw_input.to_string())),
1639 }
1640 }
1641
1642 fn update_model_request_usage(&self, amount: usize, limit: UsageLimit, cx: &mut Context<Self>) {
1643 self.project
1644 .read(cx)
1645 .user_store()
1646 .update(cx, |user_store, cx| {
1647 user_store.update_model_request_usage(
1648 ModelRequestUsage(RequestUsage {
1649 amount: amount as i32,
1650 limit,
1651 }),
1652 cx,
1653 )
1654 });
1655 }
1656
1657 pub fn title(&self) -> SharedString {
1658 self.title.clone().unwrap_or("New Thread".into())
1659 }
1660
1661 pub fn is_generating_summary(&self) -> bool {
1662 self.pending_summary_generation.is_some()
1663 }
1664
1665 pub fn summary(&mut self, cx: &mut Context<Self>) -> Shared<Task<Option<SharedString>>> {
1666 if let Some(summary) = self.summary.as_ref() {
1667 return Task::ready(Some(summary.clone())).shared();
1668 }
1669 if let Some(task) = self.pending_summary_generation.clone() {
1670 return task;
1671 }
1672 let Some(model) = self.summarization_model.clone() else {
1673 log::error!("No summarization model available");
1674 return Task::ready(None).shared();
1675 };
1676 let mut request = LanguageModelRequest {
1677 intent: Some(CompletionIntent::ThreadContextSummarization),
1678 temperature: AgentSettings::temperature_for_model(&model, cx),
1679 ..Default::default()
1680 };
1681
1682 for message in &self.messages {
1683 request.messages.extend(message.to_request());
1684 }
1685
1686 request.messages.push(LanguageModelRequestMessage {
1687 role: Role::User,
1688 content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
1689 cache: false,
1690 });
1691
1692 let task = cx
1693 .spawn(async move |this, cx| {
1694 let mut summary = String::new();
1695 let mut messages = model.stream_completion(request, cx).await.log_err()?;
1696 while let Some(event) = messages.next().await {
1697 let event = event.log_err()?;
1698 let text = match event {
1699 LanguageModelCompletionEvent::Text(text) => text,
1700 LanguageModelCompletionEvent::StatusUpdate(
1701 CompletionRequestStatus::UsageUpdated { amount, limit },
1702 ) => {
1703 this.update(cx, |thread, cx| {
1704 thread.update_model_request_usage(amount, limit, cx);
1705 })
1706 .ok()?;
1707 continue;
1708 }
1709 _ => continue,
1710 };
1711
1712 let mut lines = text.lines();
1713 summary.extend(lines.next());
1714 }
1715
1716 log::debug!("Setting summary: {}", summary);
1717 let summary = SharedString::from(summary);
1718
1719 this.update(cx, |this, cx| {
1720 this.summary = Some(summary.clone());
1721 this.pending_summary_generation = None;
1722 cx.notify()
1723 })
1724 .ok()?;
1725
1726 Some(summary)
1727 })
1728 .shared();
1729 self.pending_summary_generation = Some(task.clone());
1730 task
1731 }
1732
1733 fn generate_title(&mut self, cx: &mut Context<Self>) {
1734 let Some(model) = self.summarization_model.clone() else {
1735 return;
1736 };
1737
1738 log::debug!(
1739 "Generating title with model: {:?}",
1740 self.summarization_model.as_ref().map(|model| model.name())
1741 );
1742 let mut request = LanguageModelRequest {
1743 intent: Some(CompletionIntent::ThreadSummarization),
1744 temperature: AgentSettings::temperature_for_model(&model, cx),
1745 ..Default::default()
1746 };
1747
1748 for message in &self.messages {
1749 request.messages.extend(message.to_request());
1750 }
1751
1752 request.messages.push(LanguageModelRequestMessage {
1753 role: Role::User,
1754 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
1755 cache: false,
1756 });
1757 self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
1758 let mut title = String::new();
1759
1760 let generate = async {
1761 let mut messages = model.stream_completion(request, cx).await?;
1762 while let Some(event) = messages.next().await {
1763 let event = event?;
1764 let text = match event {
1765 LanguageModelCompletionEvent::Text(text) => text,
1766 LanguageModelCompletionEvent::StatusUpdate(
1767 CompletionRequestStatus::UsageUpdated { amount, limit },
1768 ) => {
1769 this.update(cx, |thread, cx| {
1770 thread.update_model_request_usage(amount, limit, cx);
1771 })?;
1772 continue;
1773 }
1774 _ => continue,
1775 };
1776
1777 let mut lines = text.lines();
1778 title.extend(lines.next());
1779
1780 // Stop if the LLM generated multiple lines.
1781 if lines.next().is_some() {
1782 break;
1783 }
1784 }
1785 anyhow::Ok(())
1786 };
1787
1788 if generate.await.context("failed to generate title").is_ok() {
1789 _ = this.update(cx, |this, cx| this.set_title(title.into(), cx));
1790 }
1791 _ = this.update(cx, |this, _| this.pending_title_generation = None);
1792 }));
1793 }
1794
1795 pub fn set_title(&mut self, title: SharedString, cx: &mut Context<Self>) {
1796 self.pending_title_generation = None;
1797 if Some(&title) != self.title.as_ref() {
1798 self.title = Some(title);
1799 cx.emit(TitleUpdated);
1800 cx.notify();
1801 }
1802 }
1803
1804 fn clear_summary(&mut self) {
1805 self.summary = None;
1806 self.pending_summary_generation = None;
1807 }
1808
1809 fn last_user_message(&self) -> Option<&UserMessage> {
1810 self.messages
1811 .iter()
1812 .rev()
1813 .find_map(|message| match message {
1814 Message::User(user_message) => Some(user_message),
1815 Message::Agent(_) => None,
1816 Message::Resume => None,
1817 })
1818 }
1819
1820 fn pending_message(&mut self) -> &mut AgentMessage {
1821 self.pending_message.get_or_insert_default()
1822 }
1823
1824 fn flush_pending_message(&mut self, cx: &mut Context<Self>) {
1825 let Some(mut message) = self.pending_message.take() else {
1826 return;
1827 };
1828
1829 if message.content.is_empty() {
1830 return;
1831 }
1832
1833 for content in &message.content {
1834 let AgentMessageContent::ToolUse(tool_use) = content else {
1835 continue;
1836 };
1837
1838 if !message.tool_results.contains_key(&tool_use.id) {
1839 message.tool_results.insert(
1840 tool_use.id.clone(),
1841 LanguageModelToolResult {
1842 tool_use_id: tool_use.id.clone(),
1843 tool_name: tool_use.name.clone(),
1844 is_error: true,
1845 content: LanguageModelToolResultContent::Text(TOOL_CANCELED_MESSAGE.into()),
1846 output: None,
1847 },
1848 );
1849 }
1850 }
1851
1852 self.messages.push(Message::Agent(message));
1853 self.updated_at = Utc::now();
1854 self.clear_summary();
1855 cx.notify()
1856 }
1857
1858 pub(crate) fn build_completion_request(
1859 &self,
1860 completion_intent: CompletionIntent,
1861 cx: &App,
1862 ) -> Result<LanguageModelRequest> {
1863 let model = self.model().context("No language model configured")?;
1864 let tools = if let Some(turn) = self.running_turn.as_ref() {
1865 turn.tools
1866 .iter()
1867 .filter_map(|(tool_name, tool)| {
1868 log::trace!("Including tool: {}", tool_name);
1869 Some(LanguageModelRequestTool {
1870 name: tool_name.to_string(),
1871 description: tool.description().to_string(),
1872 input_schema: tool.input_schema(model.tool_input_format()).log_err()?,
1873 })
1874 })
1875 .collect::<Vec<_>>()
1876 } else {
1877 Vec::new()
1878 };
1879
1880 log::debug!("Building completion request");
1881 log::debug!("Completion intent: {:?}", completion_intent);
1882 log::debug!("Completion mode: {:?}", self.completion_mode);
1883
1884 let messages = self.build_request_messages(cx);
1885 log::debug!("Request will include {} messages", messages.len());
1886 log::debug!("Request includes {} tools", tools.len());
1887
1888 let request = LanguageModelRequest {
1889 thread_id: Some(self.id.to_string()),
1890 prompt_id: Some(self.prompt_id.to_string()),
1891 intent: Some(completion_intent),
1892 mode: Some(self.completion_mode.into()),
1893 messages,
1894 tools,
1895 tool_choice: None,
1896 stop: Vec::new(),
1897 temperature: AgentSettings::temperature_for_model(model, cx),
1898 thinking_allowed: true,
1899 };
1900
1901 log::debug!("Completion request built successfully");
1902 Ok(request)
1903 }
1904
1905 fn enabled_tools(
1906 &self,
1907 profile: &AgentProfileSettings,
1908 model: &Arc<dyn LanguageModel>,
1909 cx: &App,
1910 ) -> BTreeMap<SharedString, Arc<dyn AnyAgentTool>> {
1911 fn truncate(tool_name: &SharedString) -> SharedString {
1912 if tool_name.len() > MAX_TOOL_NAME_LENGTH {
1913 let mut truncated = tool_name.to_string();
1914 truncated.truncate(MAX_TOOL_NAME_LENGTH);
1915 truncated.into()
1916 } else {
1917 tool_name.clone()
1918 }
1919 }
1920
1921 let mut tools = self
1922 .tools
1923 .iter()
1924 .filter_map(|(tool_name, tool)| {
1925 if tool.supported_provider(&model.provider_id())
1926 && profile.is_tool_enabled(tool_name)
1927 {
1928 Some((truncate(tool_name), tool.clone()))
1929 } else {
1930 None
1931 }
1932 })
1933 .collect::<BTreeMap<_, _>>();
1934
1935 let mut context_server_tools = Vec::new();
1936 let mut seen_tools = tools.keys().cloned().collect::<HashSet<_>>();
1937 let mut duplicate_tool_names = HashSet::default();
1938 for (server_id, server_tools) in self.context_server_registry.read(cx).servers() {
1939 for (tool_name, tool) in server_tools {
1940 if profile.is_context_server_tool_enabled(&server_id.0, &tool_name) {
1941 let tool_name = truncate(tool_name);
1942 if !seen_tools.insert(tool_name.clone()) {
1943 duplicate_tool_names.insert(tool_name.clone());
1944 }
1945 context_server_tools.push((server_id.clone(), tool_name, tool.clone()));
1946 }
1947 }
1948 }
1949
1950 // When there are duplicate tool names, disambiguate by prefixing them
1951 // with the server ID. In the rare case there isn't enough space for the
1952 // disambiguated tool name, keep only the last tool with this name.
1953 for (server_id, tool_name, tool) in context_server_tools {
1954 if duplicate_tool_names.contains(&tool_name) {
1955 let available = MAX_TOOL_NAME_LENGTH.saturating_sub(tool_name.len());
1956 if available >= 2 {
1957 let mut disambiguated = server_id.0.to_string();
1958 disambiguated.truncate(available - 1);
1959 disambiguated.push('_');
1960 disambiguated.push_str(&tool_name);
1961 tools.insert(disambiguated.into(), tool.clone());
1962 } else {
1963 tools.insert(tool_name, tool.clone());
1964 }
1965 } else {
1966 tools.insert(tool_name, tool.clone());
1967 }
1968 }
1969
1970 tools
1971 }
1972
1973 fn tool(&self, name: &str) -> Option<Arc<dyn AnyAgentTool>> {
1974 self.running_turn.as_ref()?.tools.get(name).cloned()
1975 }
1976
1977 fn build_request_messages(&self, cx: &App) -> Vec<LanguageModelRequestMessage> {
1978 log::trace!(
1979 "Building request messages from {} thread messages",
1980 self.messages.len()
1981 );
1982
1983 let system_prompt = SystemPromptTemplate {
1984 project: self.project_context.read(cx),
1985 available_tools: self.tools.keys().cloned().collect(),
1986 }
1987 .render(&self.templates)
1988 .context("failed to build system prompt")
1989 .expect("Invalid template");
1990 let mut messages = vec![LanguageModelRequestMessage {
1991 role: Role::System,
1992 content: vec![system_prompt.into()],
1993 cache: false,
1994 }];
1995 for message in &self.messages {
1996 messages.extend(message.to_request());
1997 }
1998
1999 if let Some(last_message) = messages.last_mut() {
2000 last_message.cache = true;
2001 }
2002
2003 if let Some(message) = self.pending_message.as_ref() {
2004 messages.extend(message.to_request());
2005 }
2006
2007 messages
2008 }
2009
2010 pub fn to_markdown(&self) -> String {
2011 let mut markdown = String::new();
2012 for (ix, message) in self.messages.iter().enumerate() {
2013 if ix > 0 {
2014 markdown.push('\n');
2015 }
2016 markdown.push_str(&message.to_markdown());
2017 }
2018
2019 if let Some(message) = self.pending_message.as_ref() {
2020 markdown.push('\n');
2021 markdown.push_str(&message.to_markdown());
2022 }
2023
2024 markdown
2025 }
2026
2027 fn advance_prompt_id(&mut self) {
2028 self.prompt_id = PromptId::new();
2029 }
2030
2031 fn retry_strategy_for(error: &LanguageModelCompletionError) -> Option<RetryStrategy> {
2032 use LanguageModelCompletionError::*;
2033 use http_client::StatusCode;
2034
2035 // General strategy here:
2036 // - If retrying won't help (e.g. invalid API key or payload too large), return None so we don't retry at all.
2037 // - If it's a time-based issue (e.g. server overloaded, rate limit exceeded), retry up to 4 times with exponential backoff.
2038 // - If it's an issue that *might* be fixed by retrying (e.g. internal server error), retry up to 3 times.
2039 match error {
2040 HttpResponseError {
2041 status_code: StatusCode::TOO_MANY_REQUESTS,
2042 ..
2043 } => Some(RetryStrategy::ExponentialBackoff {
2044 initial_delay: BASE_RETRY_DELAY,
2045 max_attempts: MAX_RETRY_ATTEMPTS,
2046 }),
2047 ServerOverloaded { retry_after, .. } | RateLimitExceeded { retry_after, .. } => {
2048 Some(RetryStrategy::Fixed {
2049 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2050 max_attempts: MAX_RETRY_ATTEMPTS,
2051 })
2052 }
2053 UpstreamProviderError {
2054 status,
2055 retry_after,
2056 ..
2057 } => match *status {
2058 StatusCode::TOO_MANY_REQUESTS | StatusCode::SERVICE_UNAVAILABLE => {
2059 Some(RetryStrategy::Fixed {
2060 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2061 max_attempts: MAX_RETRY_ATTEMPTS,
2062 })
2063 }
2064 StatusCode::INTERNAL_SERVER_ERROR => Some(RetryStrategy::Fixed {
2065 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2066 // Internal Server Error could be anything, retry up to 3 times.
2067 max_attempts: 3,
2068 }),
2069 status => {
2070 // There is no StatusCode variant for the unofficial HTTP 529 ("The service is overloaded"),
2071 // but we frequently get them in practice. See https://http.dev/529
2072 if status.as_u16() == 529 {
2073 Some(RetryStrategy::Fixed {
2074 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2075 max_attempts: MAX_RETRY_ATTEMPTS,
2076 })
2077 } else {
2078 Some(RetryStrategy::Fixed {
2079 delay: retry_after.unwrap_or(BASE_RETRY_DELAY),
2080 max_attempts: 2,
2081 })
2082 }
2083 }
2084 },
2085 ApiInternalServerError { .. } => Some(RetryStrategy::Fixed {
2086 delay: BASE_RETRY_DELAY,
2087 max_attempts: 3,
2088 }),
2089 ApiReadResponseError { .. }
2090 | HttpSend { .. }
2091 | DeserializeResponse { .. }
2092 | BadRequestFormat { .. } => Some(RetryStrategy::Fixed {
2093 delay: BASE_RETRY_DELAY,
2094 max_attempts: 3,
2095 }),
2096 // Retrying these errors definitely shouldn't help.
2097 HttpResponseError {
2098 status_code:
2099 StatusCode::PAYLOAD_TOO_LARGE | StatusCode::FORBIDDEN | StatusCode::UNAUTHORIZED,
2100 ..
2101 }
2102 | AuthenticationError { .. }
2103 | PermissionError { .. }
2104 | NoApiKey { .. }
2105 | ApiEndpointNotFound { .. }
2106 | PromptTooLarge { .. } => None,
2107 // These errors might be transient, so retry them
2108 SerializeRequest { .. } | BuildRequestBody { .. } => Some(RetryStrategy::Fixed {
2109 delay: BASE_RETRY_DELAY,
2110 max_attempts: 1,
2111 }),
2112 // Retry all other 4xx and 5xx errors once.
2113 HttpResponseError { status_code, .. }
2114 if status_code.is_client_error() || status_code.is_server_error() =>
2115 {
2116 Some(RetryStrategy::Fixed {
2117 delay: BASE_RETRY_DELAY,
2118 max_attempts: 3,
2119 })
2120 }
2121 Other(err)
2122 if err.is::<language_model::PaymentRequiredError>()
2123 || err.is::<language_model::ModelRequestLimitReachedError>() =>
2124 {
2125 // Retrying won't help for Payment Required or Model Request Limit errors (where
2126 // the user must upgrade to usage-based billing to get more requests, or else wait
2127 // for a significant amount of time for the request limit to reset).
2128 None
2129 }
2130 // Conservatively assume that any other errors are non-retryable
2131 HttpResponseError { .. } | Other(..) => Some(RetryStrategy::Fixed {
2132 delay: BASE_RETRY_DELAY,
2133 max_attempts: 2,
2134 }),
2135 }
2136 }
2137}
2138
2139struct RunningTurn {
2140 /// Holds the task that handles agent interaction until the end of the turn.
2141 /// Survives across multiple requests as the model performs tool calls and
2142 /// we run tools, report their results.
2143 _task: Task<()>,
2144 /// The current event stream for the running turn. Used to report a final
2145 /// cancellation event if we cancel the turn.
2146 event_stream: ThreadEventStream,
2147 /// The tools that were enabled for this turn.
2148 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
2149}
2150
2151impl RunningTurn {
2152 fn cancel(self) {
2153 log::debug!("Cancelling in progress turn");
2154 self.event_stream.send_canceled();
2155 }
2156}
2157
2158pub struct TokenUsageUpdated(pub Option<acp_thread::TokenUsage>);
2159
2160impl EventEmitter<TokenUsageUpdated> for Thread {}
2161
2162pub struct TitleUpdated;
2163
2164impl EventEmitter<TitleUpdated> for Thread {}
2165
2166pub trait AgentTool
2167where
2168 Self: 'static + Sized,
2169{
2170 type Input: for<'de> Deserialize<'de> + Serialize + JsonSchema;
2171 type Output: for<'de> Deserialize<'de> + Serialize + Into<LanguageModelToolResultContent>;
2172
2173 fn name() -> &'static str;
2174
2175 fn description() -> SharedString {
2176 let schema = schemars::schema_for!(Self::Input);
2177 SharedString::new(
2178 schema
2179 .get("description")
2180 .and_then(|description| description.as_str())
2181 .unwrap_or_default(),
2182 )
2183 }
2184
2185 fn kind() -> acp::ToolKind;
2186
2187 /// The initial tool title to display. Can be updated during the tool run.
2188 fn initial_title(
2189 &self,
2190 input: Result<Self::Input, serde_json::Value>,
2191 cx: &mut App,
2192 ) -> SharedString;
2193
2194 /// Returns the JSON schema that describes the tool's input.
2195 fn input_schema(format: LanguageModelToolSchemaFormat) -> Schema {
2196 crate::tool_schema::root_schema_for::<Self::Input>(format)
2197 }
2198
2199 /// Some tools rely on a provider for the underlying billing or other reasons.
2200 /// Allow the tool to check if they are compatible, or should be filtered out.
2201 fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2202 true
2203 }
2204
2205 /// Runs the tool with the provided input.
2206 fn run(
2207 self: Arc<Self>,
2208 input: Self::Input,
2209 event_stream: ToolCallEventStream,
2210 cx: &mut App,
2211 ) -> Task<Result<Self::Output>>;
2212
2213 /// Emits events for a previous execution of the tool.
2214 fn replay(
2215 &self,
2216 _input: Self::Input,
2217 _output: Self::Output,
2218 _event_stream: ToolCallEventStream,
2219 _cx: &mut App,
2220 ) -> Result<()> {
2221 Ok(())
2222 }
2223
2224 fn erase(self) -> Arc<dyn AnyAgentTool> {
2225 Arc::new(Erased(Arc::new(self)))
2226 }
2227}
2228
2229pub struct Erased<T>(T);
2230
2231pub struct AgentToolOutput {
2232 pub llm_output: LanguageModelToolResultContent,
2233 pub raw_output: serde_json::Value,
2234}
2235
2236pub trait AnyAgentTool {
2237 fn name(&self) -> SharedString;
2238 fn description(&self) -> SharedString;
2239 fn kind(&self) -> acp::ToolKind;
2240 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
2241 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
2242 fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
2243 true
2244 }
2245 fn run(
2246 self: Arc<Self>,
2247 input: serde_json::Value,
2248 event_stream: ToolCallEventStream,
2249 cx: &mut App,
2250 ) -> Task<Result<AgentToolOutput>>;
2251 fn replay(
2252 &self,
2253 input: serde_json::Value,
2254 output: serde_json::Value,
2255 event_stream: ToolCallEventStream,
2256 cx: &mut App,
2257 ) -> Result<()>;
2258}
2259
2260impl<T> AnyAgentTool for Erased<Arc<T>>
2261where
2262 T: AgentTool,
2263{
2264 fn name(&self) -> SharedString {
2265 T::name().into()
2266 }
2267
2268 fn description(&self) -> SharedString {
2269 T::description()
2270 }
2271
2272 fn kind(&self) -> agent_client_protocol::ToolKind {
2273 T::kind()
2274 }
2275
2276 fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
2277 let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
2278 self.0.initial_title(parsed_input, _cx)
2279 }
2280
2281 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
2282 let mut json = serde_json::to_value(T::input_schema(format))?;
2283 crate::tool_schema::adapt_schema_to_format(&mut json, format)?;
2284 Ok(json)
2285 }
2286
2287 fn supported_provider(&self, provider: &LanguageModelProviderId) -> bool {
2288 self.0.supported_provider(provider)
2289 }
2290
2291 fn run(
2292 self: Arc<Self>,
2293 input: serde_json::Value,
2294 event_stream: ToolCallEventStream,
2295 cx: &mut App,
2296 ) -> Task<Result<AgentToolOutput>> {
2297 cx.spawn(async move |cx| {
2298 let input = serde_json::from_value(input)?;
2299 let output = cx
2300 .update(|cx| self.0.clone().run(input, event_stream, cx))?
2301 .await?;
2302 let raw_output = serde_json::to_value(&output)?;
2303 Ok(AgentToolOutput {
2304 llm_output: output.into(),
2305 raw_output,
2306 })
2307 })
2308 }
2309
2310 fn replay(
2311 &self,
2312 input: serde_json::Value,
2313 output: serde_json::Value,
2314 event_stream: ToolCallEventStream,
2315 cx: &mut App,
2316 ) -> Result<()> {
2317 let input = serde_json::from_value(input)?;
2318 let output = serde_json::from_value(output)?;
2319 self.0.replay(input, output, event_stream, cx)
2320 }
2321}
2322
2323#[derive(Clone)]
2324struct ThreadEventStream(mpsc::UnboundedSender<Result<ThreadEvent>>);
2325
2326impl ThreadEventStream {
2327 fn send_user_message(&self, message: &UserMessage) {
2328 self.0
2329 .unbounded_send(Ok(ThreadEvent::UserMessage(message.clone())))
2330 .ok();
2331 }
2332
2333 fn send_text(&self, text: &str) {
2334 self.0
2335 .unbounded_send(Ok(ThreadEvent::AgentText(text.to_string())))
2336 .ok();
2337 }
2338
2339 fn send_thinking(&self, text: &str) {
2340 self.0
2341 .unbounded_send(Ok(ThreadEvent::AgentThinking(text.to_string())))
2342 .ok();
2343 }
2344
2345 fn send_tool_call(
2346 &self,
2347 id: &LanguageModelToolUseId,
2348 title: SharedString,
2349 kind: acp::ToolKind,
2350 input: serde_json::Value,
2351 ) {
2352 self.0
2353 .unbounded_send(Ok(ThreadEvent::ToolCall(Self::initial_tool_call(
2354 id,
2355 title.to_string(),
2356 kind,
2357 input,
2358 ))))
2359 .ok();
2360 }
2361
2362 fn initial_tool_call(
2363 id: &LanguageModelToolUseId,
2364 title: String,
2365 kind: acp::ToolKind,
2366 input: serde_json::Value,
2367 ) -> acp::ToolCall {
2368 acp::ToolCall {
2369 meta: None,
2370 id: acp::ToolCallId(id.to_string().into()),
2371 title,
2372 kind,
2373 status: acp::ToolCallStatus::Pending,
2374 content: vec![],
2375 locations: vec![],
2376 raw_input: Some(input),
2377 raw_output: None,
2378 }
2379 }
2380
2381 fn update_tool_call_fields(
2382 &self,
2383 tool_use_id: &LanguageModelToolUseId,
2384 fields: acp::ToolCallUpdateFields,
2385 ) {
2386 self.0
2387 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2388 acp::ToolCallUpdate {
2389 meta: None,
2390 id: acp::ToolCallId(tool_use_id.to_string().into()),
2391 fields,
2392 }
2393 .into(),
2394 )))
2395 .ok();
2396 }
2397
2398 fn send_retry(&self, status: acp_thread::RetryStatus) {
2399 self.0.unbounded_send(Ok(ThreadEvent::Retry(status))).ok();
2400 }
2401
2402 fn send_stop(&self, reason: acp::StopReason) {
2403 self.0.unbounded_send(Ok(ThreadEvent::Stop(reason))).ok();
2404 }
2405
2406 fn send_canceled(&self) {
2407 self.0
2408 .unbounded_send(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2409 .ok();
2410 }
2411
2412 fn send_error(&self, error: impl Into<anyhow::Error>) {
2413 self.0.unbounded_send(Err(error.into())).ok();
2414 }
2415}
2416
2417#[derive(Clone)]
2418pub struct ToolCallEventStream {
2419 tool_use_id: LanguageModelToolUseId,
2420 stream: ThreadEventStream,
2421 fs: Option<Arc<dyn Fs>>,
2422}
2423
2424impl ToolCallEventStream {
2425 #[cfg(any(test, feature = "test-support"))]
2426 pub fn test() -> (Self, ToolCallEventStreamReceiver) {
2427 let (events_tx, events_rx) = mpsc::unbounded::<Result<ThreadEvent>>();
2428
2429 let stream = ToolCallEventStream::new("test_id".into(), ThreadEventStream(events_tx), None);
2430
2431 (stream, ToolCallEventStreamReceiver(events_rx))
2432 }
2433
2434 fn new(
2435 tool_use_id: LanguageModelToolUseId,
2436 stream: ThreadEventStream,
2437 fs: Option<Arc<dyn Fs>>,
2438 ) -> Self {
2439 Self {
2440 tool_use_id,
2441 stream,
2442 fs,
2443 }
2444 }
2445
2446 pub fn update_fields(&self, fields: acp::ToolCallUpdateFields) {
2447 self.stream
2448 .update_tool_call_fields(&self.tool_use_id, fields);
2449 }
2450
2451 pub fn update_diff(&self, diff: Entity<acp_thread::Diff>) {
2452 self.stream
2453 .0
2454 .unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
2455 acp_thread::ToolCallUpdateDiff {
2456 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2457 diff,
2458 }
2459 .into(),
2460 )))
2461 .ok();
2462 }
2463
2464 pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
2465 if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
2466 return Task::ready(Ok(()));
2467 }
2468
2469 let (response_tx, response_rx) = oneshot::channel();
2470 self.stream
2471 .0
2472 .unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
2473 ToolCallAuthorization {
2474 tool_call: acp::ToolCallUpdate {
2475 meta: None,
2476 id: acp::ToolCallId(self.tool_use_id.to_string().into()),
2477 fields: acp::ToolCallUpdateFields {
2478 title: Some(title.into()),
2479 ..Default::default()
2480 },
2481 },
2482 options: vec![
2483 acp::PermissionOption {
2484 id: acp::PermissionOptionId("always_allow".into()),
2485 name: "Always Allow".into(),
2486 kind: acp::PermissionOptionKind::AllowAlways,
2487 meta: None,
2488 },
2489 acp::PermissionOption {
2490 id: acp::PermissionOptionId("allow".into()),
2491 name: "Allow".into(),
2492 kind: acp::PermissionOptionKind::AllowOnce,
2493 meta: None,
2494 },
2495 acp::PermissionOption {
2496 id: acp::PermissionOptionId("deny".into()),
2497 name: "Deny".into(),
2498 kind: acp::PermissionOptionKind::RejectOnce,
2499 meta: None,
2500 },
2501 ],
2502 response: response_tx,
2503 },
2504 )))
2505 .ok();
2506 let fs = self.fs.clone();
2507 cx.spawn(async move |cx| match response_rx.await?.0.as_ref() {
2508 "always_allow" => {
2509 if let Some(fs) = fs.clone() {
2510 cx.update(|cx| {
2511 update_settings_file(fs, cx, |settings, _| {
2512 settings
2513 .agent
2514 .get_or_insert_default()
2515 .set_always_allow_tool_actions(true);
2516 });
2517 })?;
2518 }
2519
2520 Ok(())
2521 }
2522 "allow" => Ok(()),
2523 _ => Err(anyhow!("Permission to run tool denied by user")),
2524 })
2525 }
2526}
2527
2528#[cfg(any(test, feature = "test-support"))]
2529pub struct ToolCallEventStreamReceiver(mpsc::UnboundedReceiver<Result<ThreadEvent>>);
2530
2531#[cfg(any(test, feature = "test-support"))]
2532impl ToolCallEventStreamReceiver {
2533 pub async fn expect_authorization(&mut self) -> ToolCallAuthorization {
2534 let event = self.0.next().await;
2535 if let Some(Ok(ThreadEvent::ToolCallAuthorization(auth))) = event {
2536 auth
2537 } else {
2538 panic!("Expected ToolCallAuthorization but got: {:?}", event);
2539 }
2540 }
2541
2542 pub async fn expect_update_fields(&mut self) -> acp::ToolCallUpdateFields {
2543 let event = self.0.next().await;
2544 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2545 update,
2546 )))) = event
2547 {
2548 update.fields
2549 } else {
2550 panic!("Expected update fields but got: {:?}", event);
2551 }
2552 }
2553
2554 pub async fn expect_diff(&mut self) -> Entity<acp_thread::Diff> {
2555 let event = self.0.next().await;
2556 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateDiff(
2557 update,
2558 )))) = event
2559 {
2560 update.diff
2561 } else {
2562 panic!("Expected diff but got: {:?}", event);
2563 }
2564 }
2565
2566 pub async fn expect_terminal(&mut self) -> Entity<acp_thread::Terminal> {
2567 let event = self.0.next().await;
2568 if let Some(Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateTerminal(
2569 update,
2570 )))) = event
2571 {
2572 update.terminal
2573 } else {
2574 panic!("Expected terminal but got: {:?}", event);
2575 }
2576 }
2577}
2578
2579#[cfg(any(test, feature = "test-support"))]
2580impl std::ops::Deref for ToolCallEventStreamReceiver {
2581 type Target = mpsc::UnboundedReceiver<Result<ThreadEvent>>;
2582
2583 fn deref(&self) -> &Self::Target {
2584 &self.0
2585 }
2586}
2587
2588#[cfg(any(test, feature = "test-support"))]
2589impl std::ops::DerefMut for ToolCallEventStreamReceiver {
2590 fn deref_mut(&mut self) -> &mut Self::Target {
2591 &mut self.0
2592 }
2593}
2594
2595impl From<&str> for UserMessageContent {
2596 fn from(text: &str) -> Self {
2597 Self::Text(text.into())
2598 }
2599}
2600
2601impl From<acp::ContentBlock> for UserMessageContent {
2602 fn from(value: acp::ContentBlock) -> Self {
2603 match value {
2604 acp::ContentBlock::Text(text_content) => Self::Text(text_content.text),
2605 acp::ContentBlock::Image(image_content) => Self::Image(convert_image(image_content)),
2606 acp::ContentBlock::Audio(_) => {
2607 // TODO
2608 Self::Text("[audio]".to_string())
2609 }
2610 acp::ContentBlock::ResourceLink(resource_link) => {
2611 match MentionUri::parse(&resource_link.uri) {
2612 Ok(uri) => Self::Mention {
2613 uri,
2614 content: String::new(),
2615 },
2616 Err(err) => {
2617 log::error!("Failed to parse mention link: {}", err);
2618 Self::Text(format!("[{}]({})", resource_link.name, resource_link.uri))
2619 }
2620 }
2621 }
2622 acp::ContentBlock::Resource(resource) => match resource.resource {
2623 acp::EmbeddedResourceResource::TextResourceContents(resource) => {
2624 match MentionUri::parse(&resource.uri) {
2625 Ok(uri) => Self::Mention {
2626 uri,
2627 content: resource.text,
2628 },
2629 Err(err) => {
2630 log::error!("Failed to parse mention link: {}", err);
2631 Self::Text(
2632 MarkdownCodeBlock {
2633 tag: &resource.uri,
2634 text: &resource.text,
2635 }
2636 .to_string(),
2637 )
2638 }
2639 }
2640 }
2641 acp::EmbeddedResourceResource::BlobResourceContents(_) => {
2642 // TODO
2643 Self::Text("[blob]".to_string())
2644 }
2645 },
2646 }
2647 }
2648}
2649
2650impl From<UserMessageContent> for acp::ContentBlock {
2651 fn from(content: UserMessageContent) -> Self {
2652 match content {
2653 UserMessageContent::Text(text) => acp::ContentBlock::Text(acp::TextContent {
2654 text,
2655 annotations: None,
2656 meta: None,
2657 }),
2658 UserMessageContent::Image(image) => acp::ContentBlock::Image(acp::ImageContent {
2659 data: image.source.to_string(),
2660 mime_type: "image/png".to_string(),
2661 meta: None,
2662 annotations: None,
2663 uri: None,
2664 }),
2665 UserMessageContent::Mention { uri, content } => {
2666 acp::ContentBlock::Resource(acp::EmbeddedResource {
2667 meta: None,
2668 resource: acp::EmbeddedResourceResource::TextResourceContents(
2669 acp::TextResourceContents {
2670 meta: None,
2671 mime_type: None,
2672 text: content,
2673 uri: uri.to_uri().to_string(),
2674 },
2675 ),
2676 annotations: None,
2677 })
2678 }
2679 }
2680 }
2681}
2682
2683fn convert_image(image_content: acp::ImageContent) -> LanguageModelImage {
2684 LanguageModelImage {
2685 source: image_content.data.into(),
2686 // TODO: make this optional?
2687 size: gpui::Size::new(0.into(), 0.into()),
2688 }
2689}