1use std::fmt::Write as _;
2use std::io::Write;
3use std::ops::Range;
4use std::sync::Arc;
5
6use anyhow::{Context as _, Result};
7use assistant_settings::AssistantSettings;
8use assistant_tool::{ActionLog, Tool, ToolWorkingSet};
9use chrono::{DateTime, Utc};
10use collections::{BTreeMap, HashMap, HashSet};
11use fs::Fs;
12use futures::future::Shared;
13use futures::{FutureExt, StreamExt as _};
14use git;
15use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task, WeakEntity};
16use language_model::{
17 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
18 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
19 LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
20 Role, StopReason, TokenUsage,
21};
22use project::git_store::{GitStore, GitStoreCheckpoint};
23use project::{Project, Worktree};
24use prompt_store::{
25 AssistantSystemPromptContext, PromptBuilder, RulesFile, WorktreeInfoForSystemPrompt,
26};
27use serde::{Deserialize, Serialize};
28use settings::Settings;
29use util::{ResultExt as _, TryFutureExt as _, maybe, post_inc};
30use uuid::Uuid;
31
32use crate::context::{AssistantContext, ContextId, attach_context_to_message};
33use crate::thread_store::{
34 SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
35 SerializedToolUse,
36};
37use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState};
38
39#[derive(Debug, Clone, Copy)]
40pub enum RequestKind {
41 Chat,
42 /// Used when summarizing a thread.
43 Summarize,
44}
45
46#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
47pub struct ThreadId(Arc<str>);
48
49impl ThreadId {
50 pub fn new() -> Self {
51 Self(Uuid::new_v4().to_string().into())
52 }
53}
54
55impl std::fmt::Display for ThreadId {
56 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
57 write!(f, "{}", self.0)
58 }
59}
60
61#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
62pub struct MessageId(pub(crate) usize);
63
64impl MessageId {
65 fn post_inc(&mut self) -> Self {
66 Self(post_inc(&mut self.0))
67 }
68}
69
70/// A message in a [`Thread`].
71#[derive(Debug, Clone)]
72pub struct Message {
73 pub id: MessageId,
74 pub role: Role,
75 pub segments: Vec<MessageSegment>,
76}
77
78impl Message {
79 pub fn push_thinking(&mut self, text: &str) {
80 if let Some(MessageSegment::Thinking(segment)) = self.segments.last_mut() {
81 segment.push_str(text);
82 } else {
83 self.segments
84 .push(MessageSegment::Thinking(text.to_string()));
85 }
86 }
87
88 pub fn push_text(&mut self, text: &str) {
89 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
90 segment.push_str(text);
91 } else {
92 self.segments.push(MessageSegment::Text(text.to_string()));
93 }
94 }
95
96 pub fn to_string(&self) -> String {
97 let mut result = String::new();
98 for segment in &self.segments {
99 match segment {
100 MessageSegment::Text(text) => result.push_str(text),
101 MessageSegment::Thinking(text) => {
102 result.push_str("<think>");
103 result.push_str(text);
104 result.push_str("</think>");
105 }
106 }
107 }
108 result
109 }
110}
111
112#[derive(Debug, Clone)]
113pub enum MessageSegment {
114 Text(String),
115 Thinking(String),
116}
117
118#[derive(Debug, Clone, Serialize, Deserialize)]
119pub struct ProjectSnapshot {
120 pub worktree_snapshots: Vec<WorktreeSnapshot>,
121 pub unsaved_buffer_paths: Vec<String>,
122 pub timestamp: DateTime<Utc>,
123}
124
125#[derive(Debug, Clone, Serialize, Deserialize)]
126pub struct WorktreeSnapshot {
127 pub worktree_path: String,
128 pub git_state: Option<GitState>,
129}
130
131#[derive(Debug, Clone, Serialize, Deserialize)]
132pub struct GitState {
133 pub remote_url: Option<String>,
134 pub head_sha: Option<String>,
135 pub current_branch: Option<String>,
136 pub diff: Option<String>,
137}
138
139#[derive(Clone)]
140pub struct ThreadCheckpoint {
141 message_id: MessageId,
142 git_checkpoint: GitStoreCheckpoint,
143}
144
145#[derive(Copy, Clone, Debug)]
146pub enum ThreadFeedback {
147 Positive,
148 Negative,
149}
150
151pub enum LastRestoreCheckpoint {
152 Pending {
153 message_id: MessageId,
154 },
155 Error {
156 message_id: MessageId,
157 error: String,
158 },
159}
160
161impl LastRestoreCheckpoint {
162 pub fn message_id(&self) -> MessageId {
163 match self {
164 LastRestoreCheckpoint::Pending { message_id } => *message_id,
165 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
166 }
167 }
168}
169
170/// A thread of conversation with the LLM.
171pub struct Thread {
172 id: ThreadId,
173 updated_at: DateTime<Utc>,
174 summary: Option<SharedString>,
175 pending_summary: Task<Option<()>>,
176 messages: Vec<Message>,
177 next_message_id: MessageId,
178 context: BTreeMap<ContextId, AssistantContext>,
179 context_by_message: HashMap<MessageId, Vec<ContextId>>,
180 system_prompt_context: Option<AssistantSystemPromptContext>,
181 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
182 completion_count: usize,
183 pending_completions: Vec<PendingCompletion>,
184 project: Entity<Project>,
185 prompt_builder: Arc<PromptBuilder>,
186 tools: Arc<ToolWorkingSet>,
187 tool_use: ToolUseState,
188 action_log: Entity<ActionLog>,
189 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
190 pending_checkpoint: Option<ThreadCheckpoint>,
191 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
192 cumulative_token_usage: TokenUsage,
193 feedback: Option<ThreadFeedback>,
194}
195
196impl Thread {
197 pub fn new(
198 project: Entity<Project>,
199 tools: Arc<ToolWorkingSet>,
200 prompt_builder: Arc<PromptBuilder>,
201 cx: &mut Context<Self>,
202 ) -> Self {
203 Self {
204 id: ThreadId::new(),
205 updated_at: Utc::now(),
206 summary: None,
207 pending_summary: Task::ready(None),
208 messages: Vec::new(),
209 next_message_id: MessageId(0),
210 context: BTreeMap::default(),
211 context_by_message: HashMap::default(),
212 system_prompt_context: None,
213 checkpoints_by_message: HashMap::default(),
214 completion_count: 0,
215 pending_completions: Vec::new(),
216 project: project.clone(),
217 prompt_builder,
218 tools: tools.clone(),
219 last_restore_checkpoint: None,
220 pending_checkpoint: None,
221 tool_use: ToolUseState::new(tools.clone()),
222 action_log: cx.new(|_| ActionLog::new()),
223 initial_project_snapshot: {
224 let project_snapshot = Self::project_snapshot(project, cx);
225 cx.foreground_executor()
226 .spawn(async move { Some(project_snapshot.await) })
227 .shared()
228 },
229 cumulative_token_usage: TokenUsage::default(),
230 feedback: None,
231 }
232 }
233
234 pub fn deserialize(
235 id: ThreadId,
236 serialized: SerializedThread,
237 project: Entity<Project>,
238 tools: Arc<ToolWorkingSet>,
239 prompt_builder: Arc<PromptBuilder>,
240 cx: &mut Context<Self>,
241 ) -> Self {
242 let next_message_id = MessageId(
243 serialized
244 .messages
245 .last()
246 .map(|message| message.id.0 + 1)
247 .unwrap_or(0),
248 );
249 let tool_use =
250 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |_| true);
251
252 Self {
253 id,
254 updated_at: serialized.updated_at,
255 summary: Some(serialized.summary),
256 pending_summary: Task::ready(None),
257 messages: serialized
258 .messages
259 .into_iter()
260 .map(|message| Message {
261 id: message.id,
262 role: message.role,
263 segments: message
264 .segments
265 .into_iter()
266 .map(|segment| match segment {
267 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
268 SerializedMessageSegment::Thinking { text } => {
269 MessageSegment::Thinking(text)
270 }
271 })
272 .collect(),
273 })
274 .collect(),
275 next_message_id,
276 context: BTreeMap::default(),
277 context_by_message: HashMap::default(),
278 system_prompt_context: None,
279 checkpoints_by_message: HashMap::default(),
280 completion_count: 0,
281 pending_completions: Vec::new(),
282 last_restore_checkpoint: None,
283 pending_checkpoint: None,
284 project,
285 prompt_builder,
286 tools,
287 tool_use,
288 action_log: cx.new(|_| ActionLog::new()),
289 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
290 cumulative_token_usage: serialized.cumulative_token_usage,
291 feedback: None,
292 }
293 }
294
295 pub fn id(&self) -> &ThreadId {
296 &self.id
297 }
298
299 pub fn is_empty(&self) -> bool {
300 self.messages.is_empty()
301 }
302
303 pub fn updated_at(&self) -> DateTime<Utc> {
304 self.updated_at
305 }
306
307 pub fn touch_updated_at(&mut self) {
308 self.updated_at = Utc::now();
309 }
310
311 pub fn summary(&self) -> Option<SharedString> {
312 self.summary.clone()
313 }
314
315 pub fn summary_or_default(&self) -> SharedString {
316 const DEFAULT: SharedString = SharedString::new_static("New Thread");
317 self.summary.clone().unwrap_or(DEFAULT)
318 }
319
320 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
321 self.summary = Some(summary.into());
322 cx.emit(ThreadEvent::SummaryChanged);
323 }
324
325 pub fn message(&self, id: MessageId) -> Option<&Message> {
326 self.messages.iter().find(|message| message.id == id)
327 }
328
329 pub fn messages(&self) -> impl Iterator<Item = &Message> {
330 self.messages.iter()
331 }
332
333 pub fn is_generating(&self) -> bool {
334 !self.pending_completions.is_empty() || !self.all_tools_finished()
335 }
336
337 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
338 &self.tools
339 }
340
341 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
342 self.tool_use
343 .pending_tool_uses()
344 .into_iter()
345 .find(|tool_use| &tool_use.id == id)
346 }
347
348 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
349 self.tool_use
350 .pending_tool_uses()
351 .into_iter()
352 .filter(|tool_use| tool_use.status.needs_confirmation())
353 }
354
355 pub fn has_pending_tool_uses(&self) -> bool {
356 !self.tool_use.pending_tool_uses().is_empty()
357 }
358
359 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
360 self.checkpoints_by_message.get(&id).cloned()
361 }
362
363 pub fn restore_checkpoint(
364 &mut self,
365 checkpoint: ThreadCheckpoint,
366 cx: &mut Context<Self>,
367 ) -> Task<Result<()>> {
368 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
369 message_id: checkpoint.message_id,
370 });
371 cx.emit(ThreadEvent::CheckpointChanged);
372 cx.notify();
373
374 let project = self.project.read(cx);
375 let restore = project
376 .git_store()
377 .read(cx)
378 .restore_checkpoint(checkpoint.git_checkpoint.clone(), cx);
379 cx.spawn(async move |this, cx| {
380 let result = restore.await;
381 this.update(cx, |this, cx| {
382 if let Err(err) = result.as_ref() {
383 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
384 message_id: checkpoint.message_id,
385 error: err.to_string(),
386 });
387 } else {
388 this.truncate(checkpoint.message_id, cx);
389 this.last_restore_checkpoint = None;
390 }
391 this.pending_checkpoint = None;
392 cx.emit(ThreadEvent::CheckpointChanged);
393 cx.notify();
394 })?;
395 result
396 })
397 }
398
399 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
400 let pending_checkpoint = if self.is_generating() {
401 return;
402 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
403 checkpoint
404 } else {
405 return;
406 };
407
408 let git_store = self.project.read(cx).git_store().clone();
409 let final_checkpoint = git_store.read(cx).checkpoint(cx);
410 cx.spawn(async move |this, cx| match final_checkpoint.await {
411 Ok(final_checkpoint) => {
412 let equal = git_store
413 .read_with(cx, |store, cx| {
414 store.compare_checkpoints(
415 pending_checkpoint.git_checkpoint.clone(),
416 final_checkpoint.clone(),
417 cx,
418 )
419 })?
420 .await
421 .unwrap_or(false);
422
423 if equal {
424 git_store
425 .read_with(cx, |store, cx| {
426 store.delete_checkpoint(pending_checkpoint.git_checkpoint, cx)
427 })?
428 .detach();
429 } else {
430 this.update(cx, |this, cx| {
431 this.insert_checkpoint(pending_checkpoint, cx)
432 })?;
433 }
434
435 git_store
436 .read_with(cx, |store, cx| {
437 store.delete_checkpoint(final_checkpoint, cx)
438 })?
439 .detach();
440
441 Ok(())
442 }
443 Err(_) => this.update(cx, |this, cx| {
444 this.insert_checkpoint(pending_checkpoint, cx)
445 }),
446 })
447 .detach();
448 }
449
450 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
451 self.checkpoints_by_message
452 .insert(checkpoint.message_id, checkpoint);
453 cx.emit(ThreadEvent::CheckpointChanged);
454 cx.notify();
455 }
456
457 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
458 self.last_restore_checkpoint.as_ref()
459 }
460
461 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
462 let Some(message_ix) = self
463 .messages
464 .iter()
465 .rposition(|message| message.id == message_id)
466 else {
467 return;
468 };
469 for deleted_message in self.messages.drain(message_ix..) {
470 self.context_by_message.remove(&deleted_message.id);
471 self.checkpoints_by_message.remove(&deleted_message.id);
472 }
473 cx.notify();
474 }
475
476 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AssistantContext> {
477 self.context_by_message
478 .get(&id)
479 .into_iter()
480 .flat_map(|context| {
481 context
482 .iter()
483 .filter_map(|context_id| self.context.get(&context_id))
484 })
485 }
486
487 /// Returns whether all of the tool uses have finished running.
488 pub fn all_tools_finished(&self) -> bool {
489 // If the only pending tool uses left are the ones with errors, then
490 // that means that we've finished running all of the pending tools.
491 self.tool_use
492 .pending_tool_uses()
493 .iter()
494 .all(|tool_use| tool_use.status.is_error())
495 }
496
497 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
498 self.tool_use.tool_uses_for_message(id, cx)
499 }
500
501 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
502 self.tool_use.tool_results_for_message(id)
503 }
504
505 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
506 self.tool_use.tool_result(id)
507 }
508
509 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
510 self.tool_use.message_has_tool_results(message_id)
511 }
512
513 pub fn insert_user_message(
514 &mut self,
515 text: impl Into<String>,
516 context: Vec<AssistantContext>,
517 git_checkpoint: Option<GitStoreCheckpoint>,
518 cx: &mut Context<Self>,
519 ) -> MessageId {
520 let message_id =
521 self.insert_message(Role::User, vec![MessageSegment::Text(text.into())], cx);
522 let context_ids = context
523 .iter()
524 .map(|context| context.id())
525 .collect::<Vec<_>>();
526 self.context
527 .extend(context.into_iter().map(|context| (context.id(), context)));
528 self.context_by_message.insert(message_id, context_ids);
529 if let Some(git_checkpoint) = git_checkpoint {
530 self.pending_checkpoint = Some(ThreadCheckpoint {
531 message_id,
532 git_checkpoint,
533 });
534 }
535 message_id
536 }
537
538 pub fn insert_message(
539 &mut self,
540 role: Role,
541 segments: Vec<MessageSegment>,
542 cx: &mut Context<Self>,
543 ) -> MessageId {
544 let id = self.next_message_id.post_inc();
545 self.messages.push(Message { id, role, segments });
546 self.touch_updated_at();
547 cx.emit(ThreadEvent::MessageAdded(id));
548 id
549 }
550
551 pub fn edit_message(
552 &mut self,
553 id: MessageId,
554 new_role: Role,
555 new_segments: Vec<MessageSegment>,
556 cx: &mut Context<Self>,
557 ) -> bool {
558 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
559 return false;
560 };
561 message.role = new_role;
562 message.segments = new_segments;
563 self.touch_updated_at();
564 cx.emit(ThreadEvent::MessageEdited(id));
565 true
566 }
567
568 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
569 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
570 return false;
571 };
572 self.messages.remove(index);
573 self.context_by_message.remove(&id);
574 self.touch_updated_at();
575 cx.emit(ThreadEvent::MessageDeleted(id));
576 true
577 }
578
579 /// Returns the representation of this [`Thread`] in a textual form.
580 ///
581 /// This is the representation we use when attaching a thread as context to another thread.
582 pub fn text(&self) -> String {
583 let mut text = String::new();
584
585 for message in &self.messages {
586 text.push_str(match message.role {
587 language_model::Role::User => "User:",
588 language_model::Role::Assistant => "Assistant:",
589 language_model::Role::System => "System:",
590 });
591 text.push('\n');
592
593 for segment in &message.segments {
594 match segment {
595 MessageSegment::Text(content) => text.push_str(content),
596 MessageSegment::Thinking(content) => {
597 text.push_str(&format!("<think>{}</think>", content))
598 }
599 }
600 }
601 text.push('\n');
602 }
603
604 text
605 }
606
607 /// Serializes this thread into a format for storage or telemetry.
608 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
609 let initial_project_snapshot = self.initial_project_snapshot.clone();
610 cx.spawn(async move |this, cx| {
611 let initial_project_snapshot = initial_project_snapshot.await;
612 this.read_with(cx, |this, cx| SerializedThread {
613 version: SerializedThread::VERSION.to_string(),
614 summary: this.summary_or_default(),
615 updated_at: this.updated_at(),
616 messages: this
617 .messages()
618 .map(|message| SerializedMessage {
619 id: message.id,
620 role: message.role,
621 segments: message
622 .segments
623 .iter()
624 .map(|segment| match segment {
625 MessageSegment::Text(text) => {
626 SerializedMessageSegment::Text { text: text.clone() }
627 }
628 MessageSegment::Thinking(text) => {
629 SerializedMessageSegment::Thinking { text: text.clone() }
630 }
631 })
632 .collect(),
633 tool_uses: this
634 .tool_uses_for_message(message.id, cx)
635 .into_iter()
636 .map(|tool_use| SerializedToolUse {
637 id: tool_use.id,
638 name: tool_use.name,
639 input: tool_use.input,
640 })
641 .collect(),
642 tool_results: this
643 .tool_results_for_message(message.id)
644 .into_iter()
645 .map(|tool_result| SerializedToolResult {
646 tool_use_id: tool_result.tool_use_id.clone(),
647 is_error: tool_result.is_error,
648 content: tool_result.content.clone(),
649 })
650 .collect(),
651 })
652 .collect(),
653 initial_project_snapshot,
654 cumulative_token_usage: this.cumulative_token_usage.clone(),
655 })
656 })
657 }
658
659 pub fn set_system_prompt_context(&mut self, context: AssistantSystemPromptContext) {
660 self.system_prompt_context = Some(context);
661 }
662
663 pub fn system_prompt_context(&self) -> &Option<AssistantSystemPromptContext> {
664 &self.system_prompt_context
665 }
666
667 pub fn load_system_prompt_context(
668 &self,
669 cx: &App,
670 ) -> Task<(AssistantSystemPromptContext, Option<ThreadError>)> {
671 let project = self.project.read(cx);
672 let tasks = project
673 .visible_worktrees(cx)
674 .map(|worktree| {
675 Self::load_worktree_info_for_system_prompt(
676 project.fs().clone(),
677 worktree.read(cx),
678 cx,
679 )
680 })
681 .collect::<Vec<_>>();
682
683 cx.spawn(async |_cx| {
684 let results = futures::future::join_all(tasks).await;
685 let mut first_err = None;
686 let worktrees = results
687 .into_iter()
688 .map(|(worktree, err)| {
689 if first_err.is_none() && err.is_some() {
690 first_err = err;
691 }
692 worktree
693 })
694 .collect::<Vec<_>>();
695 (AssistantSystemPromptContext::new(worktrees), first_err)
696 })
697 }
698
699 fn load_worktree_info_for_system_prompt(
700 fs: Arc<dyn Fs>,
701 worktree: &Worktree,
702 cx: &App,
703 ) -> Task<(WorktreeInfoForSystemPrompt, Option<ThreadError>)> {
704 let root_name = worktree.root_name().into();
705 let abs_path = worktree.abs_path();
706
707 // Note that Cline supports `.clinerules` being a directory, but that is not currently
708 // supported. This doesn't seem to occur often in GitHub repositories.
709 const RULES_FILE_NAMES: [&'static str; 6] = [
710 ".rules",
711 ".cursorrules",
712 ".windsurfrules",
713 ".clinerules",
714 ".github/copilot-instructions.md",
715 "CLAUDE.md",
716 ];
717 let selected_rules_file = RULES_FILE_NAMES
718 .into_iter()
719 .filter_map(|name| {
720 worktree
721 .entry_for_path(name)
722 .filter(|entry| entry.is_file())
723 .map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
724 })
725 .next();
726
727 if let Some((rel_rules_path, abs_rules_path)) = selected_rules_file {
728 cx.spawn(async move |_| {
729 let rules_file_result = maybe!(async move {
730 let abs_rules_path = abs_rules_path?;
731 let text = fs.load(&abs_rules_path).await.with_context(|| {
732 format!("Failed to load assistant rules file {:?}", abs_rules_path)
733 })?;
734 anyhow::Ok(RulesFile {
735 rel_path: rel_rules_path,
736 abs_path: abs_rules_path.into(),
737 text: text.trim().to_string(),
738 })
739 })
740 .await;
741 let (rules_file, rules_file_error) = match rules_file_result {
742 Ok(rules_file) => (Some(rules_file), None),
743 Err(err) => (
744 None,
745 Some(ThreadError::Message {
746 header: "Error loading rules file".into(),
747 message: format!("{err}").into(),
748 }),
749 ),
750 };
751 let worktree_info = WorktreeInfoForSystemPrompt {
752 root_name,
753 abs_path,
754 rules_file,
755 };
756 (worktree_info, rules_file_error)
757 })
758 } else {
759 Task::ready((
760 WorktreeInfoForSystemPrompt {
761 root_name,
762 abs_path,
763 rules_file: None,
764 },
765 None,
766 ))
767 }
768 }
769
770 pub fn send_to_model(
771 &mut self,
772 model: Arc<dyn LanguageModel>,
773 request_kind: RequestKind,
774 cx: &mut Context<Self>,
775 ) {
776 let mut request = self.to_completion_request(request_kind, cx);
777 request.tools = {
778 let mut tools = Vec::new();
779 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
780 LanguageModelRequestTool {
781 name: tool.name(),
782 description: tool.description(),
783 input_schema: tool.input_schema(model.tool_input_format()),
784 }
785 }));
786
787 tools
788 };
789
790 self.stream_completion(request, model, cx);
791 }
792
793 pub fn used_tools_since_last_user_message(&self) -> bool {
794 for message in self.messages.iter().rev() {
795 if self.tool_use.message_has_tool_results(message.id) {
796 return true;
797 } else if message.role == Role::User {
798 return false;
799 }
800 }
801
802 false
803 }
804
805 pub fn to_completion_request(
806 &self,
807 request_kind: RequestKind,
808 cx: &App,
809 ) -> LanguageModelRequest {
810 let mut request = LanguageModelRequest {
811 messages: vec![],
812 tools: Vec::new(),
813 stop: Vec::new(),
814 temperature: None,
815 };
816
817 if let Some(system_prompt_context) = self.system_prompt_context.as_ref() {
818 if let Some(system_prompt) = self
819 .prompt_builder
820 .generate_assistant_system_prompt(system_prompt_context)
821 .context("failed to generate assistant system prompt")
822 .log_err()
823 {
824 request.messages.push(LanguageModelRequestMessage {
825 role: Role::System,
826 content: vec![MessageContent::Text(system_prompt)],
827 cache: true,
828 });
829 }
830 } else {
831 log::error!("system_prompt_context not set.")
832 }
833
834 let mut referenced_context_ids = HashSet::default();
835
836 for message in &self.messages {
837 if let Some(context_ids) = self.context_by_message.get(&message.id) {
838 referenced_context_ids.extend(context_ids);
839 }
840
841 let mut request_message = LanguageModelRequestMessage {
842 role: message.role,
843 content: Vec::new(),
844 cache: false,
845 };
846
847 match request_kind {
848 RequestKind::Chat => {
849 self.tool_use
850 .attach_tool_results(message.id, &mut request_message);
851 }
852 RequestKind::Summarize => {
853 // We don't care about tool use during summarization.
854 if self.tool_use.message_has_tool_results(message.id) {
855 continue;
856 }
857 }
858 }
859
860 if !message.segments.is_empty() {
861 request_message
862 .content
863 .push(MessageContent::Text(message.to_string()));
864 }
865
866 match request_kind {
867 RequestKind::Chat => {
868 self.tool_use
869 .attach_tool_uses(message.id, &mut request_message);
870 }
871 RequestKind::Summarize => {
872 // We don't care about tool use during summarization.
873 }
874 };
875
876 request.messages.push(request_message);
877 }
878
879 // Set a cache breakpoint at the second-to-last message.
880 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
881 let breakpoint_index = request.messages.len() - 2;
882 for (index, message) in request.messages.iter_mut().enumerate() {
883 message.cache = index == breakpoint_index;
884 }
885
886 if !referenced_context_ids.is_empty() {
887 let mut context_message = LanguageModelRequestMessage {
888 role: Role::User,
889 content: Vec::new(),
890 cache: false,
891 };
892
893 let referenced_context = referenced_context_ids
894 .into_iter()
895 .filter_map(|context_id| self.context.get(context_id));
896 attach_context_to_message(&mut context_message, referenced_context, cx);
897
898 request.messages.push(context_message);
899 }
900
901 self.attached_tracked_files_state(&mut request.messages, cx);
902
903 request
904 }
905
906 fn attached_tracked_files_state(
907 &self,
908 messages: &mut Vec<LanguageModelRequestMessage>,
909 cx: &App,
910 ) {
911 const STALE_FILES_HEADER: &str = "These files changed since last read:";
912
913 let mut stale_message = String::new();
914
915 let action_log = self.action_log.read(cx);
916
917 for stale_file in action_log.stale_buffers(cx) {
918 let Some(file) = stale_file.read(cx).file() else {
919 continue;
920 };
921
922 if stale_message.is_empty() {
923 write!(&mut stale_message, "{}", STALE_FILES_HEADER).ok();
924 }
925
926 writeln!(&mut stale_message, "- {}", file.path().display()).ok();
927 }
928
929 let mut content = Vec::with_capacity(2);
930
931 if !stale_message.is_empty() {
932 content.push(stale_message.into());
933 }
934
935 if action_log.has_edited_files_since_project_diagnostics_check() {
936 content.push(
937 "\n\nWhen you're done making changes, make sure to check project diagnostics \
938 and fix all errors AND warnings you introduced! \
939 DO NOT mention you're going to do this until you're done."
940 .into(),
941 );
942 }
943
944 if !content.is_empty() {
945 let context_message = LanguageModelRequestMessage {
946 role: Role::User,
947 content,
948 cache: false,
949 };
950
951 messages.push(context_message);
952 }
953 }
954
955 pub fn stream_completion(
956 &mut self,
957 request: LanguageModelRequest,
958 model: Arc<dyn LanguageModel>,
959 cx: &mut Context<Self>,
960 ) {
961 let pending_completion_id = post_inc(&mut self.completion_count);
962
963 let task = cx.spawn(async move |thread, cx| {
964 let stream = model.stream_completion(request, &cx);
965 let initial_token_usage =
966 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage.clone());
967 let stream_completion = async {
968 let mut events = stream.await?;
969 let mut stop_reason = StopReason::EndTurn;
970 let mut current_token_usage = TokenUsage::default();
971
972 while let Some(event) = events.next().await {
973 let event = event?;
974
975 thread.update(cx, |thread, cx| {
976 match event {
977 LanguageModelCompletionEvent::StartMessage { .. } => {
978 thread.insert_message(
979 Role::Assistant,
980 vec![MessageSegment::Text(String::new())],
981 cx,
982 );
983 }
984 LanguageModelCompletionEvent::Stop(reason) => {
985 stop_reason = reason;
986 }
987 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
988 thread.cumulative_token_usage =
989 thread.cumulative_token_usage.clone() + token_usage.clone()
990 - current_token_usage.clone();
991 current_token_usage = token_usage;
992 }
993 LanguageModelCompletionEvent::Text(chunk) => {
994 if let Some(last_message) = thread.messages.last_mut() {
995 if last_message.role == Role::Assistant {
996 last_message.push_text(&chunk);
997 cx.emit(ThreadEvent::StreamedAssistantText(
998 last_message.id,
999 chunk,
1000 ));
1001 } else {
1002 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1003 // of a new Assistant response.
1004 //
1005 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1006 // will result in duplicating the text of the chunk in the rendered Markdown.
1007 thread.insert_message(
1008 Role::Assistant,
1009 vec![MessageSegment::Text(chunk.to_string())],
1010 cx,
1011 );
1012 };
1013 }
1014 }
1015 LanguageModelCompletionEvent::Thinking(chunk) => {
1016 if let Some(last_message) = thread.messages.last_mut() {
1017 if last_message.role == Role::Assistant {
1018 last_message.push_thinking(&chunk);
1019 cx.emit(ThreadEvent::StreamedAssistantThinking(
1020 last_message.id,
1021 chunk,
1022 ));
1023 } else {
1024 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1025 // of a new Assistant response.
1026 //
1027 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1028 // will result in duplicating the text of the chunk in the rendered Markdown.
1029 thread.insert_message(
1030 Role::Assistant,
1031 vec![MessageSegment::Thinking(chunk.to_string())],
1032 cx,
1033 );
1034 };
1035 }
1036 }
1037 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1038 let last_assistant_message_id = thread
1039 .messages
1040 .iter()
1041 .rfind(|message| message.role == Role::Assistant)
1042 .map(|message| message.id)
1043 .unwrap_or_else(|| {
1044 thread.insert_message(
1045 Role::Assistant,
1046 vec![MessageSegment::Text("Using tool...".to_string())],
1047 cx,
1048 )
1049 });
1050 thread.tool_use.request_tool_use(
1051 last_assistant_message_id,
1052 tool_use,
1053 cx,
1054 );
1055 }
1056 }
1057
1058 thread.touch_updated_at();
1059 cx.emit(ThreadEvent::StreamedCompletion);
1060 cx.notify();
1061 })?;
1062
1063 smol::future::yield_now().await;
1064 }
1065
1066 thread.update(cx, |thread, cx| {
1067 thread
1068 .pending_completions
1069 .retain(|completion| completion.id != pending_completion_id);
1070
1071 if thread.summary.is_none() && thread.messages.len() >= 2 {
1072 thread.summarize(cx);
1073 }
1074 })?;
1075
1076 anyhow::Ok(stop_reason)
1077 };
1078
1079 let result = stream_completion.await;
1080
1081 thread
1082 .update(cx, |thread, cx| {
1083 thread.finalize_pending_checkpoint(cx);
1084 match result.as_ref() {
1085 Ok(stop_reason) => match stop_reason {
1086 StopReason::ToolUse => {
1087 cx.emit(ThreadEvent::UsePendingTools);
1088 }
1089 StopReason::EndTurn => {}
1090 StopReason::MaxTokens => {}
1091 },
1092 Err(error) => {
1093 if error.is::<PaymentRequiredError>() {
1094 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1095 } else if error.is::<MaxMonthlySpendReachedError>() {
1096 cx.emit(ThreadEvent::ShowError(
1097 ThreadError::MaxMonthlySpendReached,
1098 ));
1099 } else {
1100 let error_message = error
1101 .chain()
1102 .map(|err| err.to_string())
1103 .collect::<Vec<_>>()
1104 .join("\n");
1105 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1106 header: "Error interacting with language model".into(),
1107 message: SharedString::from(error_message.clone()),
1108 }));
1109 }
1110
1111 thread.cancel_last_completion(cx);
1112 }
1113 }
1114 cx.emit(ThreadEvent::DoneStreaming);
1115
1116 if let Ok(initial_usage) = initial_token_usage {
1117 let usage = thread.cumulative_token_usage.clone() - initial_usage;
1118
1119 telemetry::event!(
1120 "Assistant Thread Completion",
1121 thread_id = thread.id().to_string(),
1122 model = model.telemetry_id(),
1123 model_provider = model.provider_id().to_string(),
1124 input_tokens = usage.input_tokens,
1125 output_tokens = usage.output_tokens,
1126 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1127 cache_read_input_tokens = usage.cache_read_input_tokens,
1128 );
1129 }
1130 })
1131 .ok();
1132 });
1133
1134 self.pending_completions.push(PendingCompletion {
1135 id: pending_completion_id,
1136 _task: task,
1137 });
1138 }
1139
1140 pub fn summarize(&mut self, cx: &mut Context<Self>) {
1141 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
1142 return;
1143 };
1144 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
1145 return;
1146 };
1147
1148 if !provider.is_authenticated(cx) {
1149 return;
1150 }
1151
1152 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1153 request.messages.push(LanguageModelRequestMessage {
1154 role: Role::User,
1155 content: vec![
1156 "Generate a concise 3-7 word title for this conversation, omitting punctuation. \
1157 Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`. \
1158 If the conversation is about a specific subject, include it in the title. \
1159 Be descriptive. DO NOT speak in the first person."
1160 .into(),
1161 ],
1162 cache: false,
1163 });
1164
1165 self.pending_summary = cx.spawn(async move |this, cx| {
1166 async move {
1167 let stream = model.stream_completion_text(request, &cx);
1168 let mut messages = stream.await?;
1169
1170 let mut new_summary = String::new();
1171 while let Some(message) = messages.stream.next().await {
1172 let text = message?;
1173 let mut lines = text.lines();
1174 new_summary.extend(lines.next());
1175
1176 // Stop if the LLM generated multiple lines.
1177 if lines.next().is_some() {
1178 break;
1179 }
1180 }
1181
1182 this.update(cx, |this, cx| {
1183 if !new_summary.is_empty() {
1184 this.summary = Some(new_summary.into());
1185 }
1186
1187 cx.emit(ThreadEvent::SummaryChanged);
1188 })?;
1189
1190 anyhow::Ok(())
1191 }
1192 .log_err()
1193 .await
1194 });
1195 }
1196
1197 pub fn use_pending_tools(
1198 &mut self,
1199 cx: &mut Context<Self>,
1200 ) -> impl IntoIterator<Item = PendingToolUse> + use<> {
1201 let request = self.to_completion_request(RequestKind::Chat, cx);
1202 let messages = Arc::new(request.messages);
1203 let pending_tool_uses = self
1204 .tool_use
1205 .pending_tool_uses()
1206 .into_iter()
1207 .filter(|tool_use| tool_use.status.is_idle())
1208 .cloned()
1209 .collect::<Vec<_>>();
1210
1211 for tool_use in pending_tool_uses.iter() {
1212 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1213 if tool.needs_confirmation()
1214 && !AssistantSettings::get_global(cx).always_allow_tool_actions
1215 {
1216 self.tool_use.confirm_tool_use(
1217 tool_use.id.clone(),
1218 tool_use.ui_text.clone(),
1219 tool_use.input.clone(),
1220 messages.clone(),
1221 tool,
1222 );
1223 cx.emit(ThreadEvent::ToolConfirmationNeeded);
1224 } else {
1225 self.run_tool(
1226 tool_use.id.clone(),
1227 tool_use.ui_text.clone(),
1228 tool_use.input.clone(),
1229 &messages,
1230 tool,
1231 cx,
1232 );
1233 }
1234 } else if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1235 self.run_tool(
1236 tool_use.id.clone(),
1237 tool_use.ui_text.clone(),
1238 tool_use.input.clone(),
1239 &messages,
1240 tool,
1241 cx,
1242 );
1243 }
1244 }
1245
1246 pending_tool_uses
1247 }
1248
1249 pub fn run_tool(
1250 &mut self,
1251 tool_use_id: LanguageModelToolUseId,
1252 ui_text: impl Into<SharedString>,
1253 input: serde_json::Value,
1254 messages: &[LanguageModelRequestMessage],
1255 tool: Arc<dyn Tool>,
1256 cx: &mut Context<Thread>,
1257 ) {
1258 let task = self.spawn_tool_use(tool_use_id.clone(), messages, input, tool, cx);
1259 self.tool_use
1260 .run_pending_tool(tool_use_id, ui_text.into(), task);
1261 }
1262
1263 fn spawn_tool_use(
1264 &mut self,
1265 tool_use_id: LanguageModelToolUseId,
1266 messages: &[LanguageModelRequestMessage],
1267 input: serde_json::Value,
1268 tool: Arc<dyn Tool>,
1269 cx: &mut Context<Thread>,
1270 ) -> Task<()> {
1271 let tool_name: Arc<str> = tool.name().into();
1272 let run_tool = tool.run(
1273 input,
1274 messages,
1275 self.project.clone(),
1276 self.action_log.clone(),
1277 cx,
1278 );
1279
1280 cx.spawn({
1281 async move |thread: WeakEntity<Thread>, cx| {
1282 let output = run_tool.await;
1283
1284 thread
1285 .update(cx, |thread, cx| {
1286 let pending_tool_use = thread.tool_use.insert_tool_output(
1287 tool_use_id.clone(),
1288 tool_name,
1289 output,
1290 );
1291
1292 cx.emit(ThreadEvent::ToolFinished {
1293 tool_use_id,
1294 pending_tool_use,
1295 canceled: false,
1296 });
1297 })
1298 .ok();
1299 }
1300 })
1301 }
1302
1303 pub fn attach_tool_results(
1304 &mut self,
1305 updated_context: Vec<AssistantContext>,
1306 cx: &mut Context<Self>,
1307 ) {
1308 self.context.extend(
1309 updated_context
1310 .into_iter()
1311 .map(|context| (context.id(), context)),
1312 );
1313
1314 // Insert a user message to contain the tool results.
1315 self.insert_user_message(
1316 // TODO: Sending up a user message without any content results in the model sending back
1317 // responses that also don't have any content. We currently don't handle this case well,
1318 // so for now we provide some text to keep the model on track.
1319 "Here are the tool results.",
1320 Vec::new(),
1321 None,
1322 cx,
1323 );
1324 }
1325
1326 /// Cancels the last pending completion, if there are any pending.
1327 ///
1328 /// Returns whether a completion was canceled.
1329 pub fn cancel_last_completion(&mut self, cx: &mut Context<Self>) -> bool {
1330 let canceled = if self.pending_completions.pop().is_some() {
1331 true
1332 } else {
1333 let mut canceled = false;
1334 for pending_tool_use in self.tool_use.cancel_pending() {
1335 canceled = true;
1336 cx.emit(ThreadEvent::ToolFinished {
1337 tool_use_id: pending_tool_use.id.clone(),
1338 pending_tool_use: Some(pending_tool_use),
1339 canceled: true,
1340 });
1341 }
1342 canceled
1343 };
1344 self.finalize_pending_checkpoint(cx);
1345 canceled
1346 }
1347
1348 /// Returns the feedback given to the thread, if any.
1349 pub fn feedback(&self) -> Option<ThreadFeedback> {
1350 self.feedback
1351 }
1352
1353 /// Reports feedback about the thread and stores it in our telemetry backend.
1354 pub fn report_feedback(
1355 &mut self,
1356 feedback: ThreadFeedback,
1357 cx: &mut Context<Self>,
1358 ) -> Task<Result<()>> {
1359 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
1360 let serialized_thread = self.serialize(cx);
1361 let thread_id = self.id().clone();
1362 let client = self.project.read(cx).client();
1363 self.feedback = Some(feedback);
1364 cx.notify();
1365
1366 cx.background_spawn(async move {
1367 let final_project_snapshot = final_project_snapshot.await;
1368 let serialized_thread = serialized_thread.await?;
1369 let thread_data =
1370 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
1371
1372 let rating = match feedback {
1373 ThreadFeedback::Positive => "positive",
1374 ThreadFeedback::Negative => "negative",
1375 };
1376 telemetry::event!(
1377 "Assistant Thread Rated",
1378 rating,
1379 thread_id,
1380 thread_data,
1381 final_project_snapshot
1382 );
1383 client.telemetry().flush_events();
1384
1385 Ok(())
1386 })
1387 }
1388
1389 /// Create a snapshot of the current project state including git information and unsaved buffers.
1390 fn project_snapshot(
1391 project: Entity<Project>,
1392 cx: &mut Context<Self>,
1393 ) -> Task<Arc<ProjectSnapshot>> {
1394 let git_store = project.read(cx).git_store().clone();
1395 let worktree_snapshots: Vec<_> = project
1396 .read(cx)
1397 .visible_worktrees(cx)
1398 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
1399 .collect();
1400
1401 cx.spawn(async move |_, cx| {
1402 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
1403
1404 let mut unsaved_buffers = Vec::new();
1405 cx.update(|app_cx| {
1406 let buffer_store = project.read(app_cx).buffer_store();
1407 for buffer_handle in buffer_store.read(app_cx).buffers() {
1408 let buffer = buffer_handle.read(app_cx);
1409 if buffer.is_dirty() {
1410 if let Some(file) = buffer.file() {
1411 let path = file.path().to_string_lossy().to_string();
1412 unsaved_buffers.push(path);
1413 }
1414 }
1415 }
1416 })
1417 .ok();
1418
1419 Arc::new(ProjectSnapshot {
1420 worktree_snapshots,
1421 unsaved_buffer_paths: unsaved_buffers,
1422 timestamp: Utc::now(),
1423 })
1424 })
1425 }
1426
1427 fn worktree_snapshot(
1428 worktree: Entity<project::Worktree>,
1429 git_store: Entity<GitStore>,
1430 cx: &App,
1431 ) -> Task<WorktreeSnapshot> {
1432 cx.spawn(async move |cx| {
1433 // Get worktree path and snapshot
1434 let worktree_info = cx.update(|app_cx| {
1435 let worktree = worktree.read(app_cx);
1436 let path = worktree.abs_path().to_string_lossy().to_string();
1437 let snapshot = worktree.snapshot();
1438 (path, snapshot)
1439 });
1440
1441 let Ok((worktree_path, snapshot)) = worktree_info else {
1442 return WorktreeSnapshot {
1443 worktree_path: String::new(),
1444 git_state: None,
1445 };
1446 };
1447
1448 let repo_info = git_store
1449 .update(cx, |git_store, cx| {
1450 git_store
1451 .repositories()
1452 .values()
1453 .find(|repo| repo.read(cx).worktree_id == Some(snapshot.id()))
1454 .and_then(|repo| {
1455 let repo = repo.read(cx);
1456 Some((repo.branch().cloned(), repo.local_repository()?))
1457 })
1458 })
1459 .ok()
1460 .flatten();
1461
1462 // Extract git information
1463 let git_state = match repo_info {
1464 None => None,
1465 Some((branch, repo)) => {
1466 let current_branch = branch.map(|branch| branch.name.to_string());
1467 let remote_url = repo.remote_url("origin");
1468 let head_sha = repo.head_sha();
1469
1470 // Get diff asynchronously
1471 let diff = repo
1472 .diff(git::repository::DiffType::HeadToWorktree)
1473 .await
1474 .ok();
1475
1476 Some(GitState {
1477 remote_url,
1478 head_sha,
1479 current_branch,
1480 diff,
1481 })
1482 }
1483 };
1484
1485 WorktreeSnapshot {
1486 worktree_path,
1487 git_state,
1488 }
1489 })
1490 }
1491
1492 pub fn to_markdown(&self, cx: &App) -> Result<String> {
1493 let mut markdown = Vec::new();
1494
1495 if let Some(summary) = self.summary() {
1496 writeln!(markdown, "# {summary}\n")?;
1497 };
1498
1499 for message in self.messages() {
1500 writeln!(
1501 markdown,
1502 "## {role}\n",
1503 role = match message.role {
1504 Role::User => "User",
1505 Role::Assistant => "Assistant",
1506 Role::System => "System",
1507 }
1508 )?;
1509 for segment in &message.segments {
1510 match segment {
1511 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
1512 MessageSegment::Thinking(text) => {
1513 writeln!(markdown, "<think>{}</think>\n", text)?
1514 }
1515 }
1516 }
1517
1518 for tool_use in self.tool_uses_for_message(message.id, cx) {
1519 writeln!(
1520 markdown,
1521 "**Use Tool: {} ({})**",
1522 tool_use.name, tool_use.id
1523 )?;
1524 writeln!(markdown, "```json")?;
1525 writeln!(
1526 markdown,
1527 "{}",
1528 serde_json::to_string_pretty(&tool_use.input)?
1529 )?;
1530 writeln!(markdown, "```")?;
1531 }
1532
1533 for tool_result in self.tool_results_for_message(message.id) {
1534 write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
1535 if tool_result.is_error {
1536 write!(markdown, " (Error)")?;
1537 }
1538
1539 writeln!(markdown, "**\n")?;
1540 writeln!(markdown, "{}", tool_result.content)?;
1541 }
1542 }
1543
1544 Ok(String::from_utf8_lossy(&markdown).to_string())
1545 }
1546
1547 pub fn review_edits_in_range(
1548 &mut self,
1549 buffer: Entity<language::Buffer>,
1550 buffer_range: Range<language::Anchor>,
1551 accept: bool,
1552 cx: &mut Context<Self>,
1553 ) {
1554 self.action_log.update(cx, |action_log, cx| {
1555 action_log.review_edits_in_range(buffer, buffer_range, accept, cx)
1556 });
1557 }
1558
1559 /// Keeps all edits across all buffers at once.
1560 /// This provides a more performant alternative to calling review_edits_in_range for each buffer.
1561 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
1562 self.action_log
1563 .update(cx, |action_log, _cx| action_log.keep_all_edits());
1564 }
1565
1566 pub fn action_log(&self) -> &Entity<ActionLog> {
1567 &self.action_log
1568 }
1569
1570 pub fn project(&self) -> &Entity<Project> {
1571 &self.project
1572 }
1573
1574 pub fn cumulative_token_usage(&self) -> TokenUsage {
1575 self.cumulative_token_usage.clone()
1576 }
1577
1578 pub fn deny_tool_use(
1579 &mut self,
1580 tool_use_id: LanguageModelToolUseId,
1581 tool_name: Arc<str>,
1582 cx: &mut Context<Self>,
1583 ) {
1584 let err = Err(anyhow::anyhow!(
1585 "Permission to run tool action denied by user"
1586 ));
1587
1588 self.tool_use
1589 .insert_tool_output(tool_use_id.clone(), tool_name, err);
1590
1591 cx.emit(ThreadEvent::ToolFinished {
1592 tool_use_id,
1593 pending_tool_use: None,
1594 canceled: true,
1595 });
1596 }
1597}
1598
1599#[derive(Debug, Clone)]
1600pub enum ThreadError {
1601 PaymentRequired,
1602 MaxMonthlySpendReached,
1603 Message {
1604 header: SharedString,
1605 message: SharedString,
1606 },
1607}
1608
1609#[derive(Debug, Clone)]
1610pub enum ThreadEvent {
1611 ShowError(ThreadError),
1612 StreamedCompletion,
1613 StreamedAssistantText(MessageId, String),
1614 StreamedAssistantThinking(MessageId, String),
1615 DoneStreaming,
1616 MessageAdded(MessageId),
1617 MessageEdited(MessageId),
1618 MessageDeleted(MessageId),
1619 SummaryChanged,
1620 UsePendingTools,
1621 ToolFinished {
1622 #[allow(unused)]
1623 tool_use_id: LanguageModelToolUseId,
1624 /// The pending tool use that corresponds to this tool.
1625 pending_tool_use: Option<PendingToolUse>,
1626 /// Whether the tool was canceled by the user.
1627 canceled: bool,
1628 },
1629 CheckpointChanged,
1630 ToolConfirmationNeeded,
1631}
1632
1633impl EventEmitter<ThreadEvent> for Thread {}
1634
1635struct PendingCompletion {
1636 id: usize,
1637 _task: Task<()>,
1638}