1use std::fmt::Write as _;
2use std::io::Write;
3use std::ops::Range;
4use std::sync::Arc;
5
6use anyhow::{Context as _, Result, anyhow};
7use assistant_settings::AssistantSettings;
8use assistant_tool::{ActionLog, Tool, ToolWorkingSet};
9use chrono::{DateTime, Utc};
10use collections::{BTreeMap, HashMap, HashSet};
11use fs::Fs;
12use futures::future::Shared;
13use futures::{FutureExt, StreamExt as _};
14use git::repository::DiffType;
15use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task, WeakEntity};
16use language_model::{
17 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
18 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
19 LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
20 Role, StopReason, TokenUsage,
21};
22use project::git_store::{GitStore, GitStoreCheckpoint, RepositoryState};
23use project::{Project, Worktree};
24use prompt_store::{
25 AssistantSystemPromptContext, PromptBuilder, RulesFile, WorktreeInfoForSystemPrompt,
26};
27use schemars::JsonSchema;
28use serde::{Deserialize, Serialize};
29use settings::Settings;
30use util::{ResultExt as _, TryFutureExt as _, maybe, post_inc};
31use uuid::Uuid;
32
33use crate::context::{AssistantContext, ContextId, attach_context_to_message};
34use crate::thread_store::{
35 SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
36 SerializedToolUse,
37};
38use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState, USING_TOOL_MARKER};
39
40#[derive(Debug, Clone, Copy)]
41pub enum RequestKind {
42 Chat,
43 /// Used when summarizing a thread.
44 Summarize,
45}
46
47#[derive(
48 Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
49)]
50pub struct ThreadId(Arc<str>);
51
52impl ThreadId {
53 pub fn new() -> Self {
54 Self(Uuid::new_v4().to_string().into())
55 }
56}
57
58impl std::fmt::Display for ThreadId {
59 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
60 write!(f, "{}", self.0)
61 }
62}
63
64impl From<&str> for ThreadId {
65 fn from(value: &str) -> Self {
66 Self(value.into())
67 }
68}
69
70#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
71pub struct MessageId(pub(crate) usize);
72
73impl MessageId {
74 fn post_inc(&mut self) -> Self {
75 Self(post_inc(&mut self.0))
76 }
77}
78
79/// A message in a [`Thread`].
80#[derive(Debug, Clone)]
81pub struct Message {
82 pub id: MessageId,
83 pub role: Role,
84 pub segments: Vec<MessageSegment>,
85}
86
87impl Message {
88 /// Returns whether the message contains any meaningful text that should be displayed
89 /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
90 pub fn should_display_content(&self) -> bool {
91 self.segments.iter().all(|segment| segment.should_display())
92 }
93
94 pub fn push_thinking(&mut self, text: &str) {
95 if let Some(MessageSegment::Thinking(segment)) = self.segments.last_mut() {
96 segment.push_str(text);
97 } else {
98 self.segments
99 .push(MessageSegment::Thinking(text.to_string()));
100 }
101 }
102
103 pub fn push_text(&mut self, text: &str) {
104 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
105 segment.push_str(text);
106 } else {
107 self.segments.push(MessageSegment::Text(text.to_string()));
108 }
109 }
110
111 pub fn to_string(&self) -> String {
112 let mut result = String::new();
113 for segment in &self.segments {
114 match segment {
115 MessageSegment::Text(text) => result.push_str(text),
116 MessageSegment::Thinking(text) => {
117 result.push_str("<think>");
118 result.push_str(text);
119 result.push_str("</think>");
120 }
121 }
122 }
123 result
124 }
125}
126
127#[derive(Debug, Clone)]
128pub enum MessageSegment {
129 Text(String),
130 Thinking(String),
131}
132
133impl MessageSegment {
134 pub fn text_mut(&mut self) -> &mut String {
135 match self {
136 Self::Text(text) => text,
137 Self::Thinking(text) => text,
138 }
139 }
140
141 pub fn should_display(&self) -> bool {
142 // We add USING_TOOL_MARKER when making a request that includes tool uses
143 // without non-whitespace text around them, and this can cause the model
144 // to mimic the pattern, so we consider those segments not displayable.
145 match self {
146 Self::Text(text) => text.is_empty() || text.trim() == USING_TOOL_MARKER,
147 Self::Thinking(text) => text.is_empty() || text.trim() == USING_TOOL_MARKER,
148 }
149 }
150}
151
152#[derive(Debug, Clone, Serialize, Deserialize)]
153pub struct ProjectSnapshot {
154 pub worktree_snapshots: Vec<WorktreeSnapshot>,
155 pub unsaved_buffer_paths: Vec<String>,
156 pub timestamp: DateTime<Utc>,
157}
158
159#[derive(Debug, Clone, Serialize, Deserialize)]
160pub struct WorktreeSnapshot {
161 pub worktree_path: String,
162 pub git_state: Option<GitState>,
163}
164
165#[derive(Debug, Clone, Serialize, Deserialize)]
166pub struct GitState {
167 pub remote_url: Option<String>,
168 pub head_sha: Option<String>,
169 pub current_branch: Option<String>,
170 pub diff: Option<String>,
171}
172
173#[derive(Clone)]
174pub struct ThreadCheckpoint {
175 message_id: MessageId,
176 git_checkpoint: GitStoreCheckpoint,
177}
178
179#[derive(Copy, Clone, Debug)]
180pub enum ThreadFeedback {
181 Positive,
182 Negative,
183}
184
185pub enum LastRestoreCheckpoint {
186 Pending {
187 message_id: MessageId,
188 },
189 Error {
190 message_id: MessageId,
191 error: String,
192 },
193}
194
195impl LastRestoreCheckpoint {
196 pub fn message_id(&self) -> MessageId {
197 match self {
198 LastRestoreCheckpoint::Pending { message_id } => *message_id,
199 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
200 }
201 }
202}
203
204#[derive(Clone, Debug, Default, Serialize, Deserialize)]
205pub enum DetailedSummaryState {
206 #[default]
207 NotGenerated,
208 Generating {
209 message_id: MessageId,
210 },
211 Generated {
212 text: SharedString,
213 message_id: MessageId,
214 },
215}
216
217/// A thread of conversation with the LLM.
218pub struct Thread {
219 id: ThreadId,
220 updated_at: DateTime<Utc>,
221 summary: Option<SharedString>,
222 pending_summary: Task<Option<()>>,
223 detailed_summary_state: DetailedSummaryState,
224 messages: Vec<Message>,
225 next_message_id: MessageId,
226 context: BTreeMap<ContextId, AssistantContext>,
227 context_by_message: HashMap<MessageId, Vec<ContextId>>,
228 system_prompt_context: Option<AssistantSystemPromptContext>,
229 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
230 completion_count: usize,
231 pending_completions: Vec<PendingCompletion>,
232 project: Entity<Project>,
233 prompt_builder: Arc<PromptBuilder>,
234 tools: Arc<ToolWorkingSet>,
235 tool_use: ToolUseState,
236 action_log: Entity<ActionLog>,
237 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
238 pending_checkpoint: Option<ThreadCheckpoint>,
239 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
240 cumulative_token_usage: TokenUsage,
241 feedback: Option<ThreadFeedback>,
242}
243
244impl Thread {
245 pub fn new(
246 project: Entity<Project>,
247 tools: Arc<ToolWorkingSet>,
248 prompt_builder: Arc<PromptBuilder>,
249 cx: &mut Context<Self>,
250 ) -> Self {
251 Self {
252 id: ThreadId::new(),
253 updated_at: Utc::now(),
254 summary: None,
255 pending_summary: Task::ready(None),
256 detailed_summary_state: DetailedSummaryState::NotGenerated,
257 messages: Vec::new(),
258 next_message_id: MessageId(0),
259 context: BTreeMap::default(),
260 context_by_message: HashMap::default(),
261 system_prompt_context: None,
262 checkpoints_by_message: HashMap::default(),
263 completion_count: 0,
264 pending_completions: Vec::new(),
265 project: project.clone(),
266 prompt_builder,
267 tools: tools.clone(),
268 last_restore_checkpoint: None,
269 pending_checkpoint: None,
270 tool_use: ToolUseState::new(tools.clone()),
271 action_log: cx.new(|_| ActionLog::new()),
272 initial_project_snapshot: {
273 let project_snapshot = Self::project_snapshot(project, cx);
274 cx.foreground_executor()
275 .spawn(async move { Some(project_snapshot.await) })
276 .shared()
277 },
278 cumulative_token_usage: TokenUsage::default(),
279 feedback: None,
280 }
281 }
282
283 pub fn deserialize(
284 id: ThreadId,
285 serialized: SerializedThread,
286 project: Entity<Project>,
287 tools: Arc<ToolWorkingSet>,
288 prompt_builder: Arc<PromptBuilder>,
289 cx: &mut Context<Self>,
290 ) -> Self {
291 let next_message_id = MessageId(
292 serialized
293 .messages
294 .last()
295 .map(|message| message.id.0 + 1)
296 .unwrap_or(0),
297 );
298 let tool_use =
299 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |_| true);
300
301 Self {
302 id,
303 updated_at: serialized.updated_at,
304 summary: Some(serialized.summary),
305 pending_summary: Task::ready(None),
306 detailed_summary_state: serialized.detailed_summary_state,
307 messages: serialized
308 .messages
309 .into_iter()
310 .map(|message| Message {
311 id: message.id,
312 role: message.role,
313 segments: message
314 .segments
315 .into_iter()
316 .map(|segment| match segment {
317 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
318 SerializedMessageSegment::Thinking { text } => {
319 MessageSegment::Thinking(text)
320 }
321 })
322 .collect(),
323 })
324 .collect(),
325 next_message_id,
326 context: BTreeMap::default(),
327 context_by_message: HashMap::default(),
328 system_prompt_context: None,
329 checkpoints_by_message: HashMap::default(),
330 completion_count: 0,
331 pending_completions: Vec::new(),
332 last_restore_checkpoint: None,
333 pending_checkpoint: None,
334 project,
335 prompt_builder,
336 tools,
337 tool_use,
338 action_log: cx.new(|_| ActionLog::new()),
339 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
340 cumulative_token_usage: serialized.cumulative_token_usage,
341 feedback: None,
342 }
343 }
344
345 pub fn id(&self) -> &ThreadId {
346 &self.id
347 }
348
349 pub fn is_empty(&self) -> bool {
350 self.messages.is_empty()
351 }
352
353 pub fn updated_at(&self) -> DateTime<Utc> {
354 self.updated_at
355 }
356
357 pub fn touch_updated_at(&mut self) {
358 self.updated_at = Utc::now();
359 }
360
361 pub fn summary(&self) -> Option<SharedString> {
362 self.summary.clone()
363 }
364
365 pub fn summary_or_default(&self) -> SharedString {
366 const DEFAULT: SharedString = SharedString::new_static("New Thread");
367 self.summary.clone().unwrap_or(DEFAULT)
368 }
369
370 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
371 self.summary = Some(summary.into());
372 cx.emit(ThreadEvent::SummaryChanged);
373 }
374
375 pub fn latest_detailed_summary_or_text(&self) -> SharedString {
376 self.latest_detailed_summary()
377 .unwrap_or_else(|| self.text().into())
378 }
379
380 fn latest_detailed_summary(&self) -> Option<SharedString> {
381 if let DetailedSummaryState::Generated { text, .. } = &self.detailed_summary_state {
382 Some(text.clone())
383 } else {
384 None
385 }
386 }
387
388 pub fn message(&self, id: MessageId) -> Option<&Message> {
389 self.messages.iter().find(|message| message.id == id)
390 }
391
392 pub fn messages(&self) -> impl Iterator<Item = &Message> {
393 self.messages.iter()
394 }
395
396 pub fn is_generating(&self) -> bool {
397 !self.pending_completions.is_empty() || !self.all_tools_finished()
398 }
399
400 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
401 &self.tools
402 }
403
404 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
405 self.tool_use
406 .pending_tool_uses()
407 .into_iter()
408 .find(|tool_use| &tool_use.id == id)
409 }
410
411 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
412 self.tool_use
413 .pending_tool_uses()
414 .into_iter()
415 .filter(|tool_use| tool_use.status.needs_confirmation())
416 }
417
418 pub fn has_pending_tool_uses(&self) -> bool {
419 !self.tool_use.pending_tool_uses().is_empty()
420 }
421
422 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
423 self.checkpoints_by_message.get(&id).cloned()
424 }
425
426 pub fn restore_checkpoint(
427 &mut self,
428 checkpoint: ThreadCheckpoint,
429 cx: &mut Context<Self>,
430 ) -> Task<Result<()>> {
431 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
432 message_id: checkpoint.message_id,
433 });
434 cx.emit(ThreadEvent::CheckpointChanged);
435 cx.notify();
436
437 let project = self.project.read(cx);
438 let restore = project
439 .git_store()
440 .read(cx)
441 .restore_checkpoint(checkpoint.git_checkpoint.clone(), cx);
442 cx.spawn(async move |this, cx| {
443 let result = restore.await;
444 this.update(cx, |this, cx| {
445 if let Err(err) = result.as_ref() {
446 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
447 message_id: checkpoint.message_id,
448 error: err.to_string(),
449 });
450 } else {
451 this.truncate(checkpoint.message_id, cx);
452 this.last_restore_checkpoint = None;
453 }
454 this.pending_checkpoint = None;
455 cx.emit(ThreadEvent::CheckpointChanged);
456 cx.notify();
457 })?;
458 result
459 })
460 }
461
462 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
463 let pending_checkpoint = if self.is_generating() {
464 return;
465 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
466 checkpoint
467 } else {
468 return;
469 };
470
471 let git_store = self.project.read(cx).git_store().clone();
472 let final_checkpoint = git_store.read(cx).checkpoint(cx);
473 cx.spawn(async move |this, cx| match final_checkpoint.await {
474 Ok(final_checkpoint) => {
475 let equal = git_store
476 .read_with(cx, |store, cx| {
477 store.compare_checkpoints(
478 pending_checkpoint.git_checkpoint.clone(),
479 final_checkpoint.clone(),
480 cx,
481 )
482 })?
483 .await
484 .unwrap_or(false);
485
486 if equal {
487 git_store
488 .read_with(cx, |store, cx| {
489 store.delete_checkpoint(pending_checkpoint.git_checkpoint, cx)
490 })?
491 .detach();
492 } else {
493 this.update(cx, |this, cx| {
494 this.insert_checkpoint(pending_checkpoint, cx)
495 })?;
496 }
497
498 git_store
499 .read_with(cx, |store, cx| {
500 store.delete_checkpoint(final_checkpoint, cx)
501 })?
502 .detach();
503
504 Ok(())
505 }
506 Err(_) => this.update(cx, |this, cx| {
507 this.insert_checkpoint(pending_checkpoint, cx)
508 }),
509 })
510 .detach();
511 }
512
513 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
514 self.checkpoints_by_message
515 .insert(checkpoint.message_id, checkpoint);
516 cx.emit(ThreadEvent::CheckpointChanged);
517 cx.notify();
518 }
519
520 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
521 self.last_restore_checkpoint.as_ref()
522 }
523
524 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
525 let Some(message_ix) = self
526 .messages
527 .iter()
528 .rposition(|message| message.id == message_id)
529 else {
530 return;
531 };
532 for deleted_message in self.messages.drain(message_ix..) {
533 self.context_by_message.remove(&deleted_message.id);
534 self.checkpoints_by_message.remove(&deleted_message.id);
535 }
536 cx.notify();
537 }
538
539 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AssistantContext> {
540 self.context_by_message
541 .get(&id)
542 .into_iter()
543 .flat_map(|context| {
544 context
545 .iter()
546 .filter_map(|context_id| self.context.get(&context_id))
547 })
548 }
549
550 /// Returns whether all of the tool uses have finished running.
551 pub fn all_tools_finished(&self) -> bool {
552 // If the only pending tool uses left are the ones with errors, then
553 // that means that we've finished running all of the pending tools.
554 self.tool_use
555 .pending_tool_uses()
556 .iter()
557 .all(|tool_use| tool_use.status.is_error())
558 }
559
560 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
561 self.tool_use.tool_uses_for_message(id, cx)
562 }
563
564 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
565 self.tool_use.tool_results_for_message(id)
566 }
567
568 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
569 self.tool_use.tool_result(id)
570 }
571
572 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
573 self.tool_use.message_has_tool_results(message_id)
574 }
575
576 pub fn insert_user_message(
577 &mut self,
578 text: impl Into<String>,
579 context: Vec<AssistantContext>,
580 git_checkpoint: Option<GitStoreCheckpoint>,
581 cx: &mut Context<Self>,
582 ) -> MessageId {
583 let message_id =
584 self.insert_message(Role::User, vec![MessageSegment::Text(text.into())], cx);
585 let context_ids = context
586 .iter()
587 .map(|context| context.id())
588 .collect::<Vec<_>>();
589 self.context
590 .extend(context.into_iter().map(|context| (context.id(), context)));
591 self.context_by_message.insert(message_id, context_ids);
592 if let Some(git_checkpoint) = git_checkpoint {
593 self.pending_checkpoint = Some(ThreadCheckpoint {
594 message_id,
595 git_checkpoint,
596 });
597 }
598 message_id
599 }
600
601 pub fn insert_message(
602 &mut self,
603 role: Role,
604 segments: Vec<MessageSegment>,
605 cx: &mut Context<Self>,
606 ) -> MessageId {
607 let id = self.next_message_id.post_inc();
608 self.messages.push(Message { id, role, segments });
609 self.touch_updated_at();
610 cx.emit(ThreadEvent::MessageAdded(id));
611 id
612 }
613
614 pub fn edit_message(
615 &mut self,
616 id: MessageId,
617 new_role: Role,
618 new_segments: Vec<MessageSegment>,
619 cx: &mut Context<Self>,
620 ) -> bool {
621 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
622 return false;
623 };
624 message.role = new_role;
625 message.segments = new_segments;
626 self.touch_updated_at();
627 cx.emit(ThreadEvent::MessageEdited(id));
628 true
629 }
630
631 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
632 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
633 return false;
634 };
635 self.messages.remove(index);
636 self.context_by_message.remove(&id);
637 self.touch_updated_at();
638 cx.emit(ThreadEvent::MessageDeleted(id));
639 true
640 }
641
642 /// Returns the representation of this [`Thread`] in a textual form.
643 ///
644 /// This is the representation we use when attaching a thread as context to another thread.
645 pub fn text(&self) -> String {
646 let mut text = String::new();
647
648 for message in &self.messages {
649 text.push_str(match message.role {
650 language_model::Role::User => "User:",
651 language_model::Role::Assistant => "Assistant:",
652 language_model::Role::System => "System:",
653 });
654 text.push('\n');
655
656 for segment in &message.segments {
657 match segment {
658 MessageSegment::Text(content) => text.push_str(content),
659 MessageSegment::Thinking(content) => {
660 text.push_str(&format!("<think>{}</think>", content))
661 }
662 }
663 }
664 text.push('\n');
665 }
666
667 text
668 }
669
670 /// Serializes this thread into a format for storage or telemetry.
671 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
672 let initial_project_snapshot = self.initial_project_snapshot.clone();
673 cx.spawn(async move |this, cx| {
674 let initial_project_snapshot = initial_project_snapshot.await;
675 this.read_with(cx, |this, cx| SerializedThread {
676 version: SerializedThread::VERSION.to_string(),
677 summary: this.summary_or_default(),
678 updated_at: this.updated_at(),
679 messages: this
680 .messages()
681 .map(|message| SerializedMessage {
682 id: message.id,
683 role: message.role,
684 segments: message
685 .segments
686 .iter()
687 .map(|segment| match segment {
688 MessageSegment::Text(text) => {
689 SerializedMessageSegment::Text { text: text.clone() }
690 }
691 MessageSegment::Thinking(text) => {
692 SerializedMessageSegment::Thinking { text: text.clone() }
693 }
694 })
695 .collect(),
696 tool_uses: this
697 .tool_uses_for_message(message.id, cx)
698 .into_iter()
699 .map(|tool_use| SerializedToolUse {
700 id: tool_use.id,
701 name: tool_use.name,
702 input: tool_use.input,
703 })
704 .collect(),
705 tool_results: this
706 .tool_results_for_message(message.id)
707 .into_iter()
708 .map(|tool_result| SerializedToolResult {
709 tool_use_id: tool_result.tool_use_id.clone(),
710 is_error: tool_result.is_error,
711 content: tool_result.content.clone(),
712 })
713 .collect(),
714 })
715 .collect(),
716 initial_project_snapshot,
717 cumulative_token_usage: this.cumulative_token_usage.clone(),
718 detailed_summary_state: this.detailed_summary_state.clone(),
719 })
720 })
721 }
722
723 pub fn set_system_prompt_context(&mut self, context: AssistantSystemPromptContext) {
724 self.system_prompt_context = Some(context);
725 }
726
727 pub fn system_prompt_context(&self) -> &Option<AssistantSystemPromptContext> {
728 &self.system_prompt_context
729 }
730
731 pub fn load_system_prompt_context(
732 &self,
733 cx: &App,
734 ) -> Task<(AssistantSystemPromptContext, Option<ThreadError>)> {
735 let project = self.project.read(cx);
736 let tasks = project
737 .visible_worktrees(cx)
738 .map(|worktree| {
739 Self::load_worktree_info_for_system_prompt(
740 project.fs().clone(),
741 worktree.read(cx),
742 cx,
743 )
744 })
745 .collect::<Vec<_>>();
746
747 cx.spawn(async |_cx| {
748 let results = futures::future::join_all(tasks).await;
749 let mut first_err = None;
750 let worktrees = results
751 .into_iter()
752 .map(|(worktree, err)| {
753 if first_err.is_none() && err.is_some() {
754 first_err = err;
755 }
756 worktree
757 })
758 .collect::<Vec<_>>();
759 (AssistantSystemPromptContext::new(worktrees), first_err)
760 })
761 }
762
763 fn load_worktree_info_for_system_prompt(
764 fs: Arc<dyn Fs>,
765 worktree: &Worktree,
766 cx: &App,
767 ) -> Task<(WorktreeInfoForSystemPrompt, Option<ThreadError>)> {
768 let root_name = worktree.root_name().into();
769 let abs_path = worktree.abs_path();
770
771 // Note that Cline supports `.clinerules` being a directory, but that is not currently
772 // supported. This doesn't seem to occur often in GitHub repositories.
773 const RULES_FILE_NAMES: [&'static str; 6] = [
774 ".rules",
775 ".cursorrules",
776 ".windsurfrules",
777 ".clinerules",
778 ".github/copilot-instructions.md",
779 "CLAUDE.md",
780 ];
781 let selected_rules_file = RULES_FILE_NAMES
782 .into_iter()
783 .filter_map(|name| {
784 worktree
785 .entry_for_path(name)
786 .filter(|entry| entry.is_file())
787 .map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
788 })
789 .next();
790
791 if let Some((rel_rules_path, abs_rules_path)) = selected_rules_file {
792 cx.spawn(async move |_| {
793 let rules_file_result = maybe!(async move {
794 let abs_rules_path = abs_rules_path?;
795 let text = fs.load(&abs_rules_path).await.with_context(|| {
796 format!("Failed to load assistant rules file {:?}", abs_rules_path)
797 })?;
798 anyhow::Ok(RulesFile {
799 rel_path: rel_rules_path,
800 abs_path: abs_rules_path.into(),
801 text: text.trim().to_string(),
802 })
803 })
804 .await;
805 let (rules_file, rules_file_error) = match rules_file_result {
806 Ok(rules_file) => (Some(rules_file), None),
807 Err(err) => (
808 None,
809 Some(ThreadError::Message {
810 header: "Error loading rules file".into(),
811 message: format!("{err}").into(),
812 }),
813 ),
814 };
815 let worktree_info = WorktreeInfoForSystemPrompt {
816 root_name,
817 abs_path,
818 rules_file,
819 };
820 (worktree_info, rules_file_error)
821 })
822 } else {
823 Task::ready((
824 WorktreeInfoForSystemPrompt {
825 root_name,
826 abs_path,
827 rules_file: None,
828 },
829 None,
830 ))
831 }
832 }
833
834 pub fn send_to_model(
835 &mut self,
836 model: Arc<dyn LanguageModel>,
837 request_kind: RequestKind,
838 cx: &mut Context<Self>,
839 ) {
840 let mut request = self.to_completion_request(request_kind, cx);
841 if model.supports_tools() {
842 request.tools = {
843 let mut tools = Vec::new();
844 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
845 LanguageModelRequestTool {
846 name: tool.name(),
847 description: tool.description(),
848 input_schema: tool.input_schema(model.tool_input_format()),
849 }
850 }));
851
852 tools
853 };
854 }
855
856 self.stream_completion(request, model, cx);
857 }
858
859 pub fn used_tools_since_last_user_message(&self) -> bool {
860 for message in self.messages.iter().rev() {
861 if self.tool_use.message_has_tool_results(message.id) {
862 return true;
863 } else if message.role == Role::User {
864 return false;
865 }
866 }
867
868 false
869 }
870
871 pub fn to_completion_request(
872 &self,
873 request_kind: RequestKind,
874 cx: &App,
875 ) -> LanguageModelRequest {
876 let mut request = LanguageModelRequest {
877 messages: vec![],
878 tools: Vec::new(),
879 stop: Vec::new(),
880 temperature: None,
881 };
882
883 if let Some(system_prompt_context) = self.system_prompt_context.as_ref() {
884 if let Some(system_prompt) = self
885 .prompt_builder
886 .generate_assistant_system_prompt(system_prompt_context)
887 .context("failed to generate assistant system prompt")
888 .log_err()
889 {
890 request.messages.push(LanguageModelRequestMessage {
891 role: Role::System,
892 content: vec![MessageContent::Text(system_prompt)],
893 cache: true,
894 });
895 }
896 } else {
897 log::error!("system_prompt_context not set.")
898 }
899
900 let mut added_context_ids = HashSet::<ContextId>::default();
901
902 for message in &self.messages {
903 let mut request_message = LanguageModelRequestMessage {
904 role: message.role,
905 content: Vec::new(),
906 cache: false,
907 };
908
909 match request_kind {
910 RequestKind::Chat => {
911 self.tool_use
912 .attach_tool_results(message.id, &mut request_message);
913 }
914 RequestKind::Summarize => {
915 // We don't care about tool use during summarization.
916 if self.tool_use.message_has_tool_results(message.id) {
917 continue;
918 }
919 }
920 }
921
922 // Attach context to this message if it's the first to reference it
923 if let Some(context_ids) = self.context_by_message.get(&message.id) {
924 let new_context_ids: Vec<_> = context_ids
925 .iter()
926 .filter(|id| !added_context_ids.contains(id))
927 .collect();
928
929 if !new_context_ids.is_empty() {
930 let referenced_context = new_context_ids
931 .iter()
932 .filter_map(|context_id| self.context.get(*context_id));
933
934 attach_context_to_message(&mut request_message, referenced_context, cx);
935 added_context_ids.extend(context_ids.iter());
936 }
937 }
938
939 if !message.segments.is_empty() {
940 request_message
941 .content
942 .push(MessageContent::Text(message.to_string()));
943 }
944
945 match request_kind {
946 RequestKind::Chat => {
947 self.tool_use
948 .attach_tool_uses(message.id, &mut request_message);
949 }
950 RequestKind::Summarize => {
951 // We don't care about tool use during summarization.
952 }
953 };
954
955 request.messages.push(request_message);
956 }
957
958 // Set a cache breakpoint at the second-to-last message.
959 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
960 let breakpoint_index = request.messages.len() - 2;
961 for (index, message) in request.messages.iter_mut().enumerate() {
962 message.cache = index == breakpoint_index;
963 }
964
965 self.attached_tracked_files_state(&mut request.messages, cx);
966
967 request
968 }
969
970 fn attached_tracked_files_state(
971 &self,
972 messages: &mut Vec<LanguageModelRequestMessage>,
973 cx: &App,
974 ) {
975 const STALE_FILES_HEADER: &str = "These files changed since last read:";
976
977 let mut stale_message = String::new();
978
979 let action_log = self.action_log.read(cx);
980
981 for stale_file in action_log.stale_buffers(cx) {
982 let Some(file) = stale_file.read(cx).file() else {
983 continue;
984 };
985
986 if stale_message.is_empty() {
987 write!(&mut stale_message, "{}", STALE_FILES_HEADER).ok();
988 }
989
990 writeln!(&mut stale_message, "- {}", file.path().display()).ok();
991 }
992
993 let mut content = Vec::with_capacity(2);
994
995 if !stale_message.is_empty() {
996 content.push(stale_message.into());
997 }
998
999 if action_log.has_edited_files_since_project_diagnostics_check() {
1000 content.push(
1001 "\n\nWhen you're done making changes, make sure to check project diagnostics \
1002 and fix all errors AND warnings you introduced! \
1003 DO NOT mention you're going to do this until you're done."
1004 .into(),
1005 );
1006 }
1007
1008 if !content.is_empty() {
1009 let context_message = LanguageModelRequestMessage {
1010 role: Role::User,
1011 content,
1012 cache: false,
1013 };
1014
1015 messages.push(context_message);
1016 }
1017 }
1018
1019 pub fn stream_completion(
1020 &mut self,
1021 request: LanguageModelRequest,
1022 model: Arc<dyn LanguageModel>,
1023 cx: &mut Context<Self>,
1024 ) {
1025 let pending_completion_id = post_inc(&mut self.completion_count);
1026
1027 let task = cx.spawn(async move |thread, cx| {
1028 let stream = model.stream_completion(request, &cx);
1029 let initial_token_usage =
1030 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage.clone());
1031 let stream_completion = async {
1032 let mut events = stream.await?;
1033 let mut stop_reason = StopReason::EndTurn;
1034 let mut current_token_usage = TokenUsage::default();
1035
1036 while let Some(event) = events.next().await {
1037 let event = event?;
1038
1039 thread.update(cx, |thread, cx| {
1040 match event {
1041 LanguageModelCompletionEvent::StartMessage { .. } => {
1042 thread.insert_message(
1043 Role::Assistant,
1044 vec![MessageSegment::Text(String::new())],
1045 cx,
1046 );
1047 }
1048 LanguageModelCompletionEvent::Stop(reason) => {
1049 stop_reason = reason;
1050 }
1051 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1052 thread.cumulative_token_usage =
1053 thread.cumulative_token_usage.clone() + token_usage.clone()
1054 - current_token_usage.clone();
1055 current_token_usage = token_usage;
1056 }
1057 LanguageModelCompletionEvent::Text(chunk) => {
1058 if let Some(last_message) = thread.messages.last_mut() {
1059 if last_message.role == Role::Assistant {
1060 last_message.push_text(&chunk);
1061 cx.emit(ThreadEvent::StreamedAssistantText(
1062 last_message.id,
1063 chunk,
1064 ));
1065 } else {
1066 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1067 // of a new Assistant response.
1068 //
1069 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1070 // will result in duplicating the text of the chunk in the rendered Markdown.
1071 thread.insert_message(
1072 Role::Assistant,
1073 vec![MessageSegment::Text(chunk.to_string())],
1074 cx,
1075 );
1076 };
1077 }
1078 }
1079 LanguageModelCompletionEvent::Thinking(chunk) => {
1080 if let Some(last_message) = thread.messages.last_mut() {
1081 if last_message.role == Role::Assistant {
1082 last_message.push_thinking(&chunk);
1083 cx.emit(ThreadEvent::StreamedAssistantThinking(
1084 last_message.id,
1085 chunk,
1086 ));
1087 } else {
1088 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1089 // of a new Assistant response.
1090 //
1091 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1092 // will result in duplicating the text of the chunk in the rendered Markdown.
1093 thread.insert_message(
1094 Role::Assistant,
1095 vec![MessageSegment::Thinking(chunk.to_string())],
1096 cx,
1097 );
1098 };
1099 }
1100 }
1101 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1102 let last_assistant_message_id = thread
1103 .messages
1104 .iter_mut()
1105 .rfind(|message| message.role == Role::Assistant)
1106 .map(|message| message.id)
1107 .unwrap_or_else(|| {
1108 thread.insert_message(Role::Assistant, vec![], cx)
1109 });
1110
1111 thread.tool_use.request_tool_use(
1112 last_assistant_message_id,
1113 tool_use,
1114 cx,
1115 );
1116 }
1117 }
1118
1119 thread.touch_updated_at();
1120 cx.emit(ThreadEvent::StreamedCompletion);
1121 cx.notify();
1122 })?;
1123
1124 smol::future::yield_now().await;
1125 }
1126
1127 thread.update(cx, |thread, cx| {
1128 thread
1129 .pending_completions
1130 .retain(|completion| completion.id != pending_completion_id);
1131
1132 if thread.summary.is_none() && thread.messages.len() >= 2 {
1133 thread.summarize(cx);
1134 }
1135 })?;
1136
1137 anyhow::Ok(stop_reason)
1138 };
1139
1140 let result = stream_completion.await;
1141
1142 thread
1143 .update(cx, |thread, cx| {
1144 thread.finalize_pending_checkpoint(cx);
1145 match result.as_ref() {
1146 Ok(stop_reason) => match stop_reason {
1147 StopReason::ToolUse => {
1148 cx.emit(ThreadEvent::UsePendingTools);
1149 }
1150 StopReason::EndTurn => {}
1151 StopReason::MaxTokens => {}
1152 },
1153 Err(error) => {
1154 if error.is::<PaymentRequiredError>() {
1155 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1156 } else if error.is::<MaxMonthlySpendReachedError>() {
1157 cx.emit(ThreadEvent::ShowError(
1158 ThreadError::MaxMonthlySpendReached,
1159 ));
1160 } else {
1161 let error_message = error
1162 .chain()
1163 .map(|err| err.to_string())
1164 .collect::<Vec<_>>()
1165 .join("\n");
1166 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1167 header: "Error interacting with language model".into(),
1168 message: SharedString::from(error_message.clone()),
1169 }));
1170 }
1171
1172 thread.cancel_last_completion(cx);
1173 }
1174 }
1175 cx.emit(ThreadEvent::DoneStreaming);
1176
1177 if let Ok(initial_usage) = initial_token_usage {
1178 let usage = thread.cumulative_token_usage.clone() - initial_usage;
1179
1180 telemetry::event!(
1181 "Assistant Thread Completion",
1182 thread_id = thread.id().to_string(),
1183 model = model.telemetry_id(),
1184 model_provider = model.provider_id().to_string(),
1185 input_tokens = usage.input_tokens,
1186 output_tokens = usage.output_tokens,
1187 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1188 cache_read_input_tokens = usage.cache_read_input_tokens,
1189 );
1190 }
1191 })
1192 .ok();
1193 });
1194
1195 self.pending_completions.push(PendingCompletion {
1196 id: pending_completion_id,
1197 _task: task,
1198 });
1199 }
1200
1201 pub fn summarize(&mut self, cx: &mut Context<Self>) {
1202 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
1203 return;
1204 };
1205 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
1206 return;
1207 };
1208
1209 if !provider.is_authenticated(cx) {
1210 return;
1211 }
1212
1213 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1214 request.messages.push(LanguageModelRequestMessage {
1215 role: Role::User,
1216 content: vec![
1217 "Generate a concise 3-7 word title for this conversation, omitting punctuation. \
1218 Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`. \
1219 If the conversation is about a specific subject, include it in the title. \
1220 Be descriptive. DO NOT speak in the first person."
1221 .into(),
1222 ],
1223 cache: false,
1224 });
1225
1226 self.pending_summary = cx.spawn(async move |this, cx| {
1227 async move {
1228 let stream = model.stream_completion_text(request, &cx);
1229 let mut messages = stream.await?;
1230
1231 let mut new_summary = String::new();
1232 while let Some(message) = messages.stream.next().await {
1233 let text = message?;
1234 let mut lines = text.lines();
1235 new_summary.extend(lines.next());
1236
1237 // Stop if the LLM generated multiple lines.
1238 if lines.next().is_some() {
1239 break;
1240 }
1241 }
1242
1243 this.update(cx, |this, cx| {
1244 if !new_summary.is_empty() {
1245 this.summary = Some(new_summary.into());
1246 }
1247
1248 cx.emit(ThreadEvent::SummaryChanged);
1249 })?;
1250
1251 anyhow::Ok(())
1252 }
1253 .log_err()
1254 .await
1255 });
1256 }
1257
1258 pub fn generate_detailed_summary(&mut self, cx: &mut Context<Self>) -> Option<Task<()>> {
1259 let last_message_id = self.messages.last().map(|message| message.id)?;
1260
1261 match &self.detailed_summary_state {
1262 DetailedSummaryState::Generating { message_id, .. }
1263 | DetailedSummaryState::Generated { message_id, .. }
1264 if *message_id == last_message_id =>
1265 {
1266 // Already up-to-date
1267 return None;
1268 }
1269 _ => {}
1270 }
1271
1272 let provider = LanguageModelRegistry::read_global(cx).active_provider()?;
1273 let model = LanguageModelRegistry::read_global(cx).active_model()?;
1274
1275 if !provider.is_authenticated(cx) {
1276 return None;
1277 }
1278
1279 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1280
1281 request.messages.push(LanguageModelRequestMessage {
1282 role: Role::User,
1283 content: vec![
1284 "Generate a detailed summary of this conversation. Include:\n\
1285 1. A brief overview of what was discussed\n\
1286 2. Key facts or information discovered\n\
1287 3. Outcomes or conclusions reached\n\
1288 4. Any action items or next steps if any\n\
1289 Format it in Markdown with headings and bullet points."
1290 .into(),
1291 ],
1292 cache: false,
1293 });
1294
1295 let task = cx.spawn(async move |thread, cx| {
1296 let stream = model.stream_completion_text(request, &cx);
1297 let Some(mut messages) = stream.await.log_err() else {
1298 thread
1299 .update(cx, |this, _cx| {
1300 this.detailed_summary_state = DetailedSummaryState::NotGenerated;
1301 })
1302 .log_err();
1303
1304 return;
1305 };
1306
1307 let mut new_detailed_summary = String::new();
1308
1309 while let Some(chunk) = messages.stream.next().await {
1310 if let Some(chunk) = chunk.log_err() {
1311 new_detailed_summary.push_str(&chunk);
1312 }
1313 }
1314
1315 thread
1316 .update(cx, |this, _cx| {
1317 this.detailed_summary_state = DetailedSummaryState::Generated {
1318 text: new_detailed_summary.into(),
1319 message_id: last_message_id,
1320 };
1321 })
1322 .log_err();
1323 });
1324
1325 self.detailed_summary_state = DetailedSummaryState::Generating {
1326 message_id: last_message_id,
1327 };
1328
1329 Some(task)
1330 }
1331
1332 pub fn is_generating_detailed_summary(&self) -> bool {
1333 matches!(
1334 self.detailed_summary_state,
1335 DetailedSummaryState::Generating { .. }
1336 )
1337 }
1338
1339 pub fn use_pending_tools(
1340 &mut self,
1341 cx: &mut Context<Self>,
1342 ) -> impl IntoIterator<Item = PendingToolUse> + use<> {
1343 let request = self.to_completion_request(RequestKind::Chat, cx);
1344 let messages = Arc::new(request.messages);
1345 let pending_tool_uses = self
1346 .tool_use
1347 .pending_tool_uses()
1348 .into_iter()
1349 .filter(|tool_use| tool_use.status.is_idle())
1350 .cloned()
1351 .collect::<Vec<_>>();
1352
1353 for tool_use in pending_tool_uses.iter() {
1354 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1355 if tool.needs_confirmation()
1356 && !AssistantSettings::get_global(cx).always_allow_tool_actions
1357 {
1358 self.tool_use.confirm_tool_use(
1359 tool_use.id.clone(),
1360 tool_use.ui_text.clone(),
1361 tool_use.input.clone(),
1362 messages.clone(),
1363 tool,
1364 );
1365 cx.emit(ThreadEvent::ToolConfirmationNeeded);
1366 } else {
1367 self.run_tool(
1368 tool_use.id.clone(),
1369 tool_use.ui_text.clone(),
1370 tool_use.input.clone(),
1371 &messages,
1372 tool,
1373 cx,
1374 );
1375 }
1376 }
1377 }
1378
1379 pending_tool_uses
1380 }
1381
1382 pub fn run_tool(
1383 &mut self,
1384 tool_use_id: LanguageModelToolUseId,
1385 ui_text: impl Into<SharedString>,
1386 input: serde_json::Value,
1387 messages: &[LanguageModelRequestMessage],
1388 tool: Arc<dyn Tool>,
1389 cx: &mut Context<Thread>,
1390 ) {
1391 let task = self.spawn_tool_use(tool_use_id.clone(), messages, input, tool, cx);
1392 self.tool_use
1393 .run_pending_tool(tool_use_id, ui_text.into(), task);
1394 }
1395
1396 fn spawn_tool_use(
1397 &mut self,
1398 tool_use_id: LanguageModelToolUseId,
1399 messages: &[LanguageModelRequestMessage],
1400 input: serde_json::Value,
1401 tool: Arc<dyn Tool>,
1402 cx: &mut Context<Thread>,
1403 ) -> Task<()> {
1404 let tool_name: Arc<str> = tool.name().into();
1405
1406 let run_tool = if self.tools.is_disabled(&tool.source(), &tool_name) {
1407 Task::ready(Err(anyhow!("tool is disabled: {tool_name}")))
1408 } else {
1409 tool.run(
1410 input,
1411 messages,
1412 self.project.clone(),
1413 self.action_log.clone(),
1414 cx,
1415 )
1416 };
1417
1418 cx.spawn({
1419 async move |thread: WeakEntity<Thread>, cx| {
1420 let output = run_tool.await;
1421
1422 thread
1423 .update(cx, |thread, cx| {
1424 let pending_tool_use = thread.tool_use.insert_tool_output(
1425 tool_use_id.clone(),
1426 tool_name,
1427 output,
1428 );
1429
1430 cx.emit(ThreadEvent::ToolFinished {
1431 tool_use_id,
1432 pending_tool_use,
1433 canceled: false,
1434 });
1435 })
1436 .ok();
1437 }
1438 })
1439 }
1440
1441 pub fn attach_tool_results(
1442 &mut self,
1443 updated_context: Vec<AssistantContext>,
1444 cx: &mut Context<Self>,
1445 ) {
1446 self.context.extend(
1447 updated_context
1448 .into_iter()
1449 .map(|context| (context.id(), context)),
1450 );
1451
1452 // Insert a user message to contain the tool results.
1453 self.insert_user_message(
1454 // TODO: Sending up a user message without any content results in the model sending back
1455 // responses that also don't have any content. We currently don't handle this case well,
1456 // so for now we provide some text to keep the model on track.
1457 "Here are the tool results.",
1458 Vec::new(),
1459 None,
1460 cx,
1461 );
1462 }
1463
1464 /// Cancels the last pending completion, if there are any pending.
1465 ///
1466 /// Returns whether a completion was canceled.
1467 pub fn cancel_last_completion(&mut self, cx: &mut Context<Self>) -> bool {
1468 let canceled = if self.pending_completions.pop().is_some() {
1469 true
1470 } else {
1471 let mut canceled = false;
1472 for pending_tool_use in self.tool_use.cancel_pending() {
1473 canceled = true;
1474 cx.emit(ThreadEvent::ToolFinished {
1475 tool_use_id: pending_tool_use.id.clone(),
1476 pending_tool_use: Some(pending_tool_use),
1477 canceled: true,
1478 });
1479 }
1480 canceled
1481 };
1482 self.finalize_pending_checkpoint(cx);
1483 canceled
1484 }
1485
1486 /// Returns the feedback given to the thread, if any.
1487 pub fn feedback(&self) -> Option<ThreadFeedback> {
1488 self.feedback
1489 }
1490
1491 /// Reports feedback about the thread and stores it in our telemetry backend.
1492 pub fn report_feedback(
1493 &mut self,
1494 feedback: ThreadFeedback,
1495 cx: &mut Context<Self>,
1496 ) -> Task<Result<()>> {
1497 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
1498 let serialized_thread = self.serialize(cx);
1499 let thread_id = self.id().clone();
1500 let client = self.project.read(cx).client();
1501 self.feedback = Some(feedback);
1502 cx.notify();
1503
1504 cx.background_spawn(async move {
1505 let final_project_snapshot = final_project_snapshot.await;
1506 let serialized_thread = serialized_thread.await?;
1507 let thread_data =
1508 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
1509
1510 let rating = match feedback {
1511 ThreadFeedback::Positive => "positive",
1512 ThreadFeedback::Negative => "negative",
1513 };
1514 telemetry::event!(
1515 "Assistant Thread Rated",
1516 rating,
1517 thread_id,
1518 thread_data,
1519 final_project_snapshot
1520 );
1521 client.telemetry().flush_events();
1522
1523 Ok(())
1524 })
1525 }
1526
1527 /// Create a snapshot of the current project state including git information and unsaved buffers.
1528 fn project_snapshot(
1529 project: Entity<Project>,
1530 cx: &mut Context<Self>,
1531 ) -> Task<Arc<ProjectSnapshot>> {
1532 let git_store = project.read(cx).git_store().clone();
1533 let worktree_snapshots: Vec<_> = project
1534 .read(cx)
1535 .visible_worktrees(cx)
1536 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
1537 .collect();
1538
1539 cx.spawn(async move |_, cx| {
1540 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
1541
1542 let mut unsaved_buffers = Vec::new();
1543 cx.update(|app_cx| {
1544 let buffer_store = project.read(app_cx).buffer_store();
1545 for buffer_handle in buffer_store.read(app_cx).buffers() {
1546 let buffer = buffer_handle.read(app_cx);
1547 if buffer.is_dirty() {
1548 if let Some(file) = buffer.file() {
1549 let path = file.path().to_string_lossy().to_string();
1550 unsaved_buffers.push(path);
1551 }
1552 }
1553 }
1554 })
1555 .ok();
1556
1557 Arc::new(ProjectSnapshot {
1558 worktree_snapshots,
1559 unsaved_buffer_paths: unsaved_buffers,
1560 timestamp: Utc::now(),
1561 })
1562 })
1563 }
1564
1565 fn worktree_snapshot(
1566 worktree: Entity<project::Worktree>,
1567 git_store: Entity<GitStore>,
1568 cx: &App,
1569 ) -> Task<WorktreeSnapshot> {
1570 cx.spawn(async move |cx| {
1571 // Get worktree path and snapshot
1572 let worktree_info = cx.update(|app_cx| {
1573 let worktree = worktree.read(app_cx);
1574 let path = worktree.abs_path().to_string_lossy().to_string();
1575 let snapshot = worktree.snapshot();
1576 (path, snapshot)
1577 });
1578
1579 let Ok((worktree_path, _snapshot)) = worktree_info else {
1580 return WorktreeSnapshot {
1581 worktree_path: String::new(),
1582 git_state: None,
1583 };
1584 };
1585
1586 let git_state = git_store
1587 .update(cx, |git_store, cx| {
1588 git_store
1589 .repositories()
1590 .values()
1591 .find(|repo| {
1592 repo.read(cx)
1593 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
1594 .is_some()
1595 })
1596 .cloned()
1597 })
1598 .ok()
1599 .flatten()
1600 .map(|repo| {
1601 repo.read_with(cx, |repo, _| {
1602 let current_branch =
1603 repo.branch.as_ref().map(|branch| branch.name.to_string());
1604 repo.send_job(|state, _| async move {
1605 let RepositoryState::Local { backend, .. } = state else {
1606 return GitState {
1607 remote_url: None,
1608 head_sha: None,
1609 current_branch,
1610 diff: None,
1611 };
1612 };
1613
1614 let remote_url = backend.remote_url("origin");
1615 let head_sha = backend.head_sha();
1616 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
1617
1618 GitState {
1619 remote_url,
1620 head_sha,
1621 current_branch,
1622 diff,
1623 }
1624 })
1625 })
1626 });
1627
1628 let git_state = match git_state {
1629 Some(git_state) => match git_state.ok() {
1630 Some(git_state) => git_state.await.ok(),
1631 None => None,
1632 },
1633 None => None,
1634 };
1635
1636 WorktreeSnapshot {
1637 worktree_path,
1638 git_state,
1639 }
1640 })
1641 }
1642
1643 pub fn to_markdown(&self, cx: &App) -> Result<String> {
1644 let mut markdown = Vec::new();
1645
1646 if let Some(summary) = self.summary() {
1647 writeln!(markdown, "# {summary}\n")?;
1648 };
1649
1650 for message in self.messages() {
1651 writeln!(
1652 markdown,
1653 "## {role}\n",
1654 role = match message.role {
1655 Role::User => "User",
1656 Role::Assistant => "Assistant",
1657 Role::System => "System",
1658 }
1659 )?;
1660 for segment in &message.segments {
1661 match segment {
1662 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
1663 MessageSegment::Thinking(text) => {
1664 writeln!(markdown, "<think>{}</think>\n", text)?
1665 }
1666 }
1667 }
1668
1669 for tool_use in self.tool_uses_for_message(message.id, cx) {
1670 writeln!(
1671 markdown,
1672 "**Use Tool: {} ({})**",
1673 tool_use.name, tool_use.id
1674 )?;
1675 writeln!(markdown, "```json")?;
1676 writeln!(
1677 markdown,
1678 "{}",
1679 serde_json::to_string_pretty(&tool_use.input)?
1680 )?;
1681 writeln!(markdown, "```")?;
1682 }
1683
1684 for tool_result in self.tool_results_for_message(message.id) {
1685 write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
1686 if tool_result.is_error {
1687 write!(markdown, " (Error)")?;
1688 }
1689
1690 writeln!(markdown, "**\n")?;
1691 writeln!(markdown, "{}", tool_result.content)?;
1692 }
1693 }
1694
1695 Ok(String::from_utf8_lossy(&markdown).to_string())
1696 }
1697
1698 pub fn keep_edits_in_range(
1699 &mut self,
1700 buffer: Entity<language::Buffer>,
1701 buffer_range: Range<language::Anchor>,
1702 cx: &mut Context<Self>,
1703 ) {
1704 self.action_log.update(cx, |action_log, cx| {
1705 action_log.keep_edits_in_range(buffer, buffer_range, cx)
1706 });
1707 }
1708
1709 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
1710 self.action_log
1711 .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
1712 }
1713
1714 pub fn action_log(&self) -> &Entity<ActionLog> {
1715 &self.action_log
1716 }
1717
1718 pub fn project(&self) -> &Entity<Project> {
1719 &self.project
1720 }
1721
1722 pub fn cumulative_token_usage(&self) -> TokenUsage {
1723 self.cumulative_token_usage.clone()
1724 }
1725
1726 pub fn is_getting_too_long(&self, cx: &App) -> bool {
1727 let model_registry = LanguageModelRegistry::read_global(cx);
1728 let Some(model) = model_registry.active_model() else {
1729 return false;
1730 };
1731
1732 let max_tokens = model.max_token_count();
1733
1734 let current_usage =
1735 self.cumulative_token_usage.input_tokens + self.cumulative_token_usage.output_tokens;
1736
1737 #[cfg(debug_assertions)]
1738 let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
1739 .unwrap_or("0.9".to_string())
1740 .parse()
1741 .unwrap();
1742 #[cfg(not(debug_assertions))]
1743 let warning_threshold: f32 = 0.9;
1744
1745 current_usage as f32 >= (max_tokens as f32 * warning_threshold)
1746 }
1747
1748 pub fn deny_tool_use(
1749 &mut self,
1750 tool_use_id: LanguageModelToolUseId,
1751 tool_name: Arc<str>,
1752 cx: &mut Context<Self>,
1753 ) {
1754 let err = Err(anyhow::anyhow!(
1755 "Permission to run tool action denied by user"
1756 ));
1757
1758 self.tool_use
1759 .insert_tool_output(tool_use_id.clone(), tool_name, err);
1760
1761 cx.emit(ThreadEvent::ToolFinished {
1762 tool_use_id,
1763 pending_tool_use: None,
1764 canceled: true,
1765 });
1766 }
1767}
1768
1769#[derive(Debug, Clone)]
1770pub enum ThreadError {
1771 PaymentRequired,
1772 MaxMonthlySpendReached,
1773 Message {
1774 header: SharedString,
1775 message: SharedString,
1776 },
1777}
1778
1779#[derive(Debug, Clone)]
1780pub enum ThreadEvent {
1781 ShowError(ThreadError),
1782 StreamedCompletion,
1783 StreamedAssistantText(MessageId, String),
1784 StreamedAssistantThinking(MessageId, String),
1785 DoneStreaming,
1786 MessageAdded(MessageId),
1787 MessageEdited(MessageId),
1788 MessageDeleted(MessageId),
1789 SummaryChanged,
1790 UsePendingTools,
1791 ToolFinished {
1792 #[allow(unused)]
1793 tool_use_id: LanguageModelToolUseId,
1794 /// The pending tool use that corresponds to this tool.
1795 pending_tool_use: Option<PendingToolUse>,
1796 /// Whether the tool was canceled by the user.
1797 canceled: bool,
1798 },
1799 CheckpointChanged,
1800 ToolConfirmationNeeded,
1801}
1802
1803impl EventEmitter<ThreadEvent> for Thread {}
1804
1805struct PendingCompletion {
1806 id: usize,
1807 _task: Task<()>,
1808}