1use std::fmt::Write as _;
2use std::io::Write;
3use std::ops::Range;
4use std::sync::Arc;
5
6use anyhow::{Context as _, Result, anyhow};
7use assistant_settings::AssistantSettings;
8use assistant_tool::{ActionLog, Tool, ToolWorkingSet};
9use chrono::{DateTime, Utc};
10use collections::{BTreeMap, HashMap};
11use fs::Fs;
12use futures::future::Shared;
13use futures::{FutureExt, StreamExt as _};
14use git::repository::DiffType;
15use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task, WeakEntity};
16use language_model::{
17 ConfiguredModel, LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry,
18 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
19 LanguageModelToolResult, LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent,
20 PaymentRequiredError, Role, StopReason, TokenUsage,
21};
22use project::git_store::{GitStore, GitStoreCheckpoint, RepositoryState};
23use project::{Project, Worktree};
24use prompt_store::{
25 AssistantSystemPromptContext, PromptBuilder, RulesFile, WorktreeInfoForSystemPrompt,
26};
27use schemars::JsonSchema;
28use serde::{Deserialize, Serialize};
29use settings::Settings;
30use util::{ResultExt as _, TryFutureExt as _, maybe, post_inc};
31use uuid::Uuid;
32
33use crate::context::{AssistantContext, ContextId, format_context_as_string};
34use crate::thread_store::{
35 SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
36 SerializedToolUse,
37};
38use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState, USING_TOOL_MARKER};
39
40#[derive(Debug, Clone, Copy)]
41pub enum RequestKind {
42 Chat,
43 /// Used when summarizing a thread.
44 Summarize,
45}
46
47#[derive(
48 Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
49)]
50pub struct ThreadId(Arc<str>);
51
52impl ThreadId {
53 pub fn new() -> Self {
54 Self(Uuid::new_v4().to_string().into())
55 }
56}
57
58impl std::fmt::Display for ThreadId {
59 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
60 write!(f, "{}", self.0)
61 }
62}
63
64impl From<&str> for ThreadId {
65 fn from(value: &str) -> Self {
66 Self(value.into())
67 }
68}
69
70#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
71pub struct MessageId(pub(crate) usize);
72
73impl MessageId {
74 fn post_inc(&mut self) -> Self {
75 Self(post_inc(&mut self.0))
76 }
77}
78
79/// A message in a [`Thread`].
80#[derive(Debug, Clone)]
81pub struct Message {
82 pub id: MessageId,
83 pub role: Role,
84 pub segments: Vec<MessageSegment>,
85 pub context: String,
86}
87
88impl Message {
89 /// Returns whether the message contains any meaningful text that should be displayed
90 /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
91 pub fn should_display_content(&self) -> bool {
92 self.segments.iter().all(|segment| segment.should_display())
93 }
94
95 pub fn push_thinking(&mut self, text: &str) {
96 if let Some(MessageSegment::Thinking(segment)) = self.segments.last_mut() {
97 segment.push_str(text);
98 } else {
99 self.segments
100 .push(MessageSegment::Thinking(text.to_string()));
101 }
102 }
103
104 pub fn push_text(&mut self, text: &str) {
105 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
106 segment.push_str(text);
107 } else {
108 self.segments.push(MessageSegment::Text(text.to_string()));
109 }
110 }
111
112 pub fn to_string(&self) -> String {
113 let mut result = String::new();
114
115 if !self.context.is_empty() {
116 result.push_str(&self.context);
117 }
118
119 for segment in &self.segments {
120 match segment {
121 MessageSegment::Text(text) => result.push_str(text),
122 MessageSegment::Thinking(text) => {
123 result.push_str("<think>");
124 result.push_str(text);
125 result.push_str("</think>");
126 }
127 }
128 }
129
130 result
131 }
132}
133
134#[derive(Debug, Clone, PartialEq, Eq)]
135pub enum MessageSegment {
136 Text(String),
137 Thinking(String),
138}
139
140impl MessageSegment {
141 pub fn text_mut(&mut self) -> &mut String {
142 match self {
143 Self::Text(text) => text,
144 Self::Thinking(text) => text,
145 }
146 }
147
148 pub fn should_display(&self) -> bool {
149 // We add USING_TOOL_MARKER when making a request that includes tool uses
150 // without non-whitespace text around them, and this can cause the model
151 // to mimic the pattern, so we consider those segments not displayable.
152 match self {
153 Self::Text(text) => text.is_empty() || text.trim() == USING_TOOL_MARKER,
154 Self::Thinking(text) => text.is_empty() || text.trim() == USING_TOOL_MARKER,
155 }
156 }
157}
158
159#[derive(Debug, Clone, Serialize, Deserialize)]
160pub struct ProjectSnapshot {
161 pub worktree_snapshots: Vec<WorktreeSnapshot>,
162 pub unsaved_buffer_paths: Vec<String>,
163 pub timestamp: DateTime<Utc>,
164}
165
166#[derive(Debug, Clone, Serialize, Deserialize)]
167pub struct WorktreeSnapshot {
168 pub worktree_path: String,
169 pub git_state: Option<GitState>,
170}
171
172#[derive(Debug, Clone, Serialize, Deserialize)]
173pub struct GitState {
174 pub remote_url: Option<String>,
175 pub head_sha: Option<String>,
176 pub current_branch: Option<String>,
177 pub diff: Option<String>,
178}
179
180#[derive(Clone)]
181pub struct ThreadCheckpoint {
182 message_id: MessageId,
183 git_checkpoint: GitStoreCheckpoint,
184}
185
186#[derive(Copy, Clone, Debug)]
187pub enum ThreadFeedback {
188 Positive,
189 Negative,
190}
191
192pub enum LastRestoreCheckpoint {
193 Pending {
194 message_id: MessageId,
195 },
196 Error {
197 message_id: MessageId,
198 error: String,
199 },
200}
201
202impl LastRestoreCheckpoint {
203 pub fn message_id(&self) -> MessageId {
204 match self {
205 LastRestoreCheckpoint::Pending { message_id } => *message_id,
206 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
207 }
208 }
209}
210
211#[derive(Clone, Debug, Default, Serialize, Deserialize)]
212pub enum DetailedSummaryState {
213 #[default]
214 NotGenerated,
215 Generating {
216 message_id: MessageId,
217 },
218 Generated {
219 text: SharedString,
220 message_id: MessageId,
221 },
222}
223
224#[derive(Default)]
225pub struct TotalTokenUsage {
226 pub total: usize,
227 pub max: usize,
228 pub ratio: TokenUsageRatio,
229}
230
231#[derive(Default, PartialEq, Eq)]
232pub enum TokenUsageRatio {
233 #[default]
234 Normal,
235 Warning,
236 Exceeded,
237}
238
239/// A thread of conversation with the LLM.
240pub struct Thread {
241 id: ThreadId,
242 updated_at: DateTime<Utc>,
243 summary: Option<SharedString>,
244 pending_summary: Task<Option<()>>,
245 detailed_summary_state: DetailedSummaryState,
246 messages: Vec<Message>,
247 next_message_id: MessageId,
248 context: BTreeMap<ContextId, AssistantContext>,
249 context_by_message: HashMap<MessageId, Vec<ContextId>>,
250 system_prompt_context: Option<AssistantSystemPromptContext>,
251 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
252 completion_count: usize,
253 pending_completions: Vec<PendingCompletion>,
254 project: Entity<Project>,
255 prompt_builder: Arc<PromptBuilder>,
256 tools: Arc<ToolWorkingSet>,
257 tool_use: ToolUseState,
258 action_log: Entity<ActionLog>,
259 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
260 pending_checkpoint: Option<ThreadCheckpoint>,
261 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
262 cumulative_token_usage: TokenUsage,
263 feedback: Option<ThreadFeedback>,
264}
265
266impl Thread {
267 pub fn new(
268 project: Entity<Project>,
269 tools: Arc<ToolWorkingSet>,
270 prompt_builder: Arc<PromptBuilder>,
271 cx: &mut Context<Self>,
272 ) -> Self {
273 Self {
274 id: ThreadId::new(),
275 updated_at: Utc::now(),
276 summary: None,
277 pending_summary: Task::ready(None),
278 detailed_summary_state: DetailedSummaryState::NotGenerated,
279 messages: Vec::new(),
280 next_message_id: MessageId(0),
281 context: BTreeMap::default(),
282 context_by_message: HashMap::default(),
283 system_prompt_context: None,
284 checkpoints_by_message: HashMap::default(),
285 completion_count: 0,
286 pending_completions: Vec::new(),
287 project: project.clone(),
288 prompt_builder,
289 tools: tools.clone(),
290 last_restore_checkpoint: None,
291 pending_checkpoint: None,
292 tool_use: ToolUseState::new(tools.clone()),
293 action_log: cx.new(|_| ActionLog::new(project.clone())),
294 initial_project_snapshot: {
295 let project_snapshot = Self::project_snapshot(project, cx);
296 cx.foreground_executor()
297 .spawn(async move { Some(project_snapshot.await) })
298 .shared()
299 },
300 cumulative_token_usage: TokenUsage::default(),
301 feedback: None,
302 }
303 }
304
305 pub fn deserialize(
306 id: ThreadId,
307 serialized: SerializedThread,
308 project: Entity<Project>,
309 tools: Arc<ToolWorkingSet>,
310 prompt_builder: Arc<PromptBuilder>,
311 cx: &mut Context<Self>,
312 ) -> Self {
313 let next_message_id = MessageId(
314 serialized
315 .messages
316 .last()
317 .map(|message| message.id.0 + 1)
318 .unwrap_or(0),
319 );
320 let tool_use =
321 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |_| true);
322
323 Self {
324 id,
325 updated_at: serialized.updated_at,
326 summary: Some(serialized.summary),
327 pending_summary: Task::ready(None),
328 detailed_summary_state: serialized.detailed_summary_state,
329 messages: serialized
330 .messages
331 .into_iter()
332 .map(|message| Message {
333 id: message.id,
334 role: message.role,
335 segments: message
336 .segments
337 .into_iter()
338 .map(|segment| match segment {
339 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
340 SerializedMessageSegment::Thinking { text } => {
341 MessageSegment::Thinking(text)
342 }
343 })
344 .collect(),
345 context: message.context,
346 })
347 .collect(),
348 next_message_id,
349 context: BTreeMap::default(),
350 context_by_message: HashMap::default(),
351 system_prompt_context: None,
352 checkpoints_by_message: HashMap::default(),
353 completion_count: 0,
354 pending_completions: Vec::new(),
355 last_restore_checkpoint: None,
356 pending_checkpoint: None,
357 project: project.clone(),
358 prompt_builder,
359 tools,
360 tool_use,
361 action_log: cx.new(|_| ActionLog::new(project)),
362 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
363 cumulative_token_usage: serialized.cumulative_token_usage,
364 feedback: None,
365 }
366 }
367
368 pub fn id(&self) -> &ThreadId {
369 &self.id
370 }
371
372 pub fn is_empty(&self) -> bool {
373 self.messages.is_empty()
374 }
375
376 pub fn updated_at(&self) -> DateTime<Utc> {
377 self.updated_at
378 }
379
380 pub fn touch_updated_at(&mut self) {
381 self.updated_at = Utc::now();
382 }
383
384 pub fn summary(&self) -> Option<SharedString> {
385 self.summary.clone()
386 }
387
388 pub const DEFAULT_SUMMARY: SharedString = SharedString::new_static("New Thread");
389
390 pub fn summary_or_default(&self) -> SharedString {
391 self.summary.clone().unwrap_or(Self::DEFAULT_SUMMARY)
392 }
393
394 pub fn set_summary(&mut self, new_summary: impl Into<SharedString>, cx: &mut Context<Self>) {
395 let Some(current_summary) = &self.summary else {
396 // Don't allow setting summary until generated
397 return;
398 };
399
400 let mut new_summary = new_summary.into();
401
402 if new_summary.is_empty() {
403 new_summary = Self::DEFAULT_SUMMARY;
404 }
405
406 if current_summary != &new_summary {
407 self.summary = Some(new_summary);
408 cx.emit(ThreadEvent::SummaryChanged);
409 }
410 }
411
412 pub fn latest_detailed_summary_or_text(&self) -> SharedString {
413 self.latest_detailed_summary()
414 .unwrap_or_else(|| self.text().into())
415 }
416
417 fn latest_detailed_summary(&self) -> Option<SharedString> {
418 if let DetailedSummaryState::Generated { text, .. } = &self.detailed_summary_state {
419 Some(text.clone())
420 } else {
421 None
422 }
423 }
424
425 pub fn message(&self, id: MessageId) -> Option<&Message> {
426 self.messages.iter().find(|message| message.id == id)
427 }
428
429 pub fn messages(&self) -> impl Iterator<Item = &Message> {
430 self.messages.iter()
431 }
432
433 pub fn is_generating(&self) -> bool {
434 !self.pending_completions.is_empty() || !self.all_tools_finished()
435 }
436
437 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
438 &self.tools
439 }
440
441 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
442 self.tool_use
443 .pending_tool_uses()
444 .into_iter()
445 .find(|tool_use| &tool_use.id == id)
446 }
447
448 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
449 self.tool_use
450 .pending_tool_uses()
451 .into_iter()
452 .filter(|tool_use| tool_use.status.needs_confirmation())
453 }
454
455 pub fn has_pending_tool_uses(&self) -> bool {
456 !self.tool_use.pending_tool_uses().is_empty()
457 }
458
459 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
460 self.checkpoints_by_message.get(&id).cloned()
461 }
462
463 pub fn restore_checkpoint(
464 &mut self,
465 checkpoint: ThreadCheckpoint,
466 cx: &mut Context<Self>,
467 ) -> Task<Result<()>> {
468 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
469 message_id: checkpoint.message_id,
470 });
471 cx.emit(ThreadEvent::CheckpointChanged);
472 cx.notify();
473
474 let project = self.project.read(cx);
475 let restore = project
476 .git_store()
477 .read(cx)
478 .restore_checkpoint(checkpoint.git_checkpoint.clone(), cx);
479 cx.spawn(async move |this, cx| {
480 let result = restore.await;
481 this.update(cx, |this, cx| {
482 if let Err(err) = result.as_ref() {
483 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
484 message_id: checkpoint.message_id,
485 error: err.to_string(),
486 });
487 } else {
488 this.truncate(checkpoint.message_id, cx);
489 this.last_restore_checkpoint = None;
490 }
491 this.pending_checkpoint = None;
492 cx.emit(ThreadEvent::CheckpointChanged);
493 cx.notify();
494 })?;
495 result
496 })
497 }
498
499 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
500 let pending_checkpoint = if self.is_generating() {
501 return;
502 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
503 checkpoint
504 } else {
505 return;
506 };
507
508 let git_store = self.project.read(cx).git_store().clone();
509 let final_checkpoint = git_store.read(cx).checkpoint(cx);
510 cx.spawn(async move |this, cx| match final_checkpoint.await {
511 Ok(final_checkpoint) => {
512 let equal = git_store
513 .read_with(cx, |store, cx| {
514 store.compare_checkpoints(
515 pending_checkpoint.git_checkpoint.clone(),
516 final_checkpoint.clone(),
517 cx,
518 )
519 })?
520 .await
521 .unwrap_or(false);
522
523 if equal {
524 git_store
525 .read_with(cx, |store, cx| {
526 store.delete_checkpoint(pending_checkpoint.git_checkpoint, cx)
527 })?
528 .detach();
529 } else {
530 this.update(cx, |this, cx| {
531 this.insert_checkpoint(pending_checkpoint, cx)
532 })?;
533 }
534
535 git_store
536 .read_with(cx, |store, cx| {
537 store.delete_checkpoint(final_checkpoint, cx)
538 })?
539 .detach();
540
541 Ok(())
542 }
543 Err(_) => this.update(cx, |this, cx| {
544 this.insert_checkpoint(pending_checkpoint, cx)
545 }),
546 })
547 .detach();
548 }
549
550 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
551 self.checkpoints_by_message
552 .insert(checkpoint.message_id, checkpoint);
553 cx.emit(ThreadEvent::CheckpointChanged);
554 cx.notify();
555 }
556
557 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
558 self.last_restore_checkpoint.as_ref()
559 }
560
561 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
562 let Some(message_ix) = self
563 .messages
564 .iter()
565 .rposition(|message| message.id == message_id)
566 else {
567 return;
568 };
569 for deleted_message in self.messages.drain(message_ix..) {
570 self.context_by_message.remove(&deleted_message.id);
571 self.checkpoints_by_message.remove(&deleted_message.id);
572 }
573 cx.notify();
574 }
575
576 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AssistantContext> {
577 self.context_by_message
578 .get(&id)
579 .into_iter()
580 .flat_map(|context| {
581 context
582 .iter()
583 .filter_map(|context_id| self.context.get(&context_id))
584 })
585 }
586
587 /// Returns whether all of the tool uses have finished running.
588 pub fn all_tools_finished(&self) -> bool {
589 // If the only pending tool uses left are the ones with errors, then
590 // that means that we've finished running all of the pending tools.
591 self.tool_use
592 .pending_tool_uses()
593 .iter()
594 .all(|tool_use| tool_use.status.is_error())
595 }
596
597 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
598 self.tool_use.tool_uses_for_message(id, cx)
599 }
600
601 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
602 self.tool_use.tool_results_for_message(id)
603 }
604
605 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
606 self.tool_use.tool_result(id)
607 }
608
609 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
610 self.tool_use.message_has_tool_results(message_id)
611 }
612
613 pub fn insert_user_message(
614 &mut self,
615 text: impl Into<String>,
616 context: Vec<AssistantContext>,
617 git_checkpoint: Option<GitStoreCheckpoint>,
618 cx: &mut Context<Self>,
619 ) -> MessageId {
620 let text = text.into();
621
622 let message_id = self.insert_message(Role::User, vec![MessageSegment::Text(text)], cx);
623
624 // Filter out contexts that have already been included in previous messages
625 let new_context: Vec<_> = context
626 .into_iter()
627 .filter(|ctx| !self.context.contains_key(&ctx.id()))
628 .collect();
629
630 if !new_context.is_empty() {
631 if let Some(context_string) = format_context_as_string(new_context.iter(), cx) {
632 if let Some(message) = self.messages.iter_mut().find(|m| m.id == message_id) {
633 message.context = context_string;
634 }
635 }
636
637 self.action_log.update(cx, |log, cx| {
638 // Track all buffers added as context
639 for ctx in &new_context {
640 match ctx {
641 AssistantContext::File(file_ctx) => {
642 log.buffer_added_as_context(file_ctx.context_buffer.buffer.clone(), cx);
643 }
644 AssistantContext::Directory(dir_ctx) => {
645 for context_buffer in &dir_ctx.context_buffers {
646 log.buffer_added_as_context(context_buffer.buffer.clone(), cx);
647 }
648 }
649 AssistantContext::Symbol(symbol_ctx) => {
650 log.buffer_added_as_context(
651 symbol_ctx.context_symbol.buffer.clone(),
652 cx,
653 );
654 }
655 AssistantContext::FetchedUrl(_) | AssistantContext::Thread(_) => {}
656 }
657 }
658 });
659 }
660
661 let context_ids = new_context
662 .iter()
663 .map(|context| context.id())
664 .collect::<Vec<_>>();
665 self.context.extend(
666 new_context
667 .into_iter()
668 .map(|context| (context.id(), context)),
669 );
670 self.context_by_message.insert(message_id, context_ids);
671
672 if let Some(git_checkpoint) = git_checkpoint {
673 self.pending_checkpoint = Some(ThreadCheckpoint {
674 message_id,
675 git_checkpoint,
676 });
677 }
678 message_id
679 }
680
681 pub fn insert_message(
682 &mut self,
683 role: Role,
684 segments: Vec<MessageSegment>,
685 cx: &mut Context<Self>,
686 ) -> MessageId {
687 let id = self.next_message_id.post_inc();
688 self.messages.push(Message {
689 id,
690 role,
691 segments,
692 context: String::new(),
693 });
694 self.touch_updated_at();
695 cx.emit(ThreadEvent::MessageAdded(id));
696 id
697 }
698
699 pub fn edit_message(
700 &mut self,
701 id: MessageId,
702 new_role: Role,
703 new_segments: Vec<MessageSegment>,
704 cx: &mut Context<Self>,
705 ) -> bool {
706 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
707 return false;
708 };
709 message.role = new_role;
710 message.segments = new_segments;
711 self.touch_updated_at();
712 cx.emit(ThreadEvent::MessageEdited(id));
713 true
714 }
715
716 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
717 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
718 return false;
719 };
720 self.messages.remove(index);
721 self.context_by_message.remove(&id);
722 self.touch_updated_at();
723 cx.emit(ThreadEvent::MessageDeleted(id));
724 true
725 }
726
727 /// Returns the representation of this [`Thread`] in a textual form.
728 ///
729 /// This is the representation we use when attaching a thread as context to another thread.
730 pub fn text(&self) -> String {
731 let mut text = String::new();
732
733 for message in &self.messages {
734 text.push_str(match message.role {
735 language_model::Role::User => "User:",
736 language_model::Role::Assistant => "Assistant:",
737 language_model::Role::System => "System:",
738 });
739 text.push('\n');
740
741 for segment in &message.segments {
742 match segment {
743 MessageSegment::Text(content) => text.push_str(content),
744 MessageSegment::Thinking(content) => {
745 text.push_str(&format!("<think>{}</think>", content))
746 }
747 }
748 }
749 text.push('\n');
750 }
751
752 text
753 }
754
755 /// Serializes this thread into a format for storage or telemetry.
756 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
757 let initial_project_snapshot = self.initial_project_snapshot.clone();
758 cx.spawn(async move |this, cx| {
759 let initial_project_snapshot = initial_project_snapshot.await;
760 this.read_with(cx, |this, cx| SerializedThread {
761 version: SerializedThread::VERSION.to_string(),
762 summary: this.summary_or_default(),
763 updated_at: this.updated_at(),
764 messages: this
765 .messages()
766 .map(|message| SerializedMessage {
767 id: message.id,
768 role: message.role,
769 segments: message
770 .segments
771 .iter()
772 .map(|segment| match segment {
773 MessageSegment::Text(text) => {
774 SerializedMessageSegment::Text { text: text.clone() }
775 }
776 MessageSegment::Thinking(text) => {
777 SerializedMessageSegment::Thinking { text: text.clone() }
778 }
779 })
780 .collect(),
781 tool_uses: this
782 .tool_uses_for_message(message.id, cx)
783 .into_iter()
784 .map(|tool_use| SerializedToolUse {
785 id: tool_use.id,
786 name: tool_use.name,
787 input: tool_use.input,
788 })
789 .collect(),
790 tool_results: this
791 .tool_results_for_message(message.id)
792 .into_iter()
793 .map(|tool_result| SerializedToolResult {
794 tool_use_id: tool_result.tool_use_id.clone(),
795 is_error: tool_result.is_error,
796 content: tool_result.content.clone(),
797 })
798 .collect(),
799 context: message.context.clone(),
800 })
801 .collect(),
802 initial_project_snapshot,
803 cumulative_token_usage: this.cumulative_token_usage.clone(),
804 detailed_summary_state: this.detailed_summary_state.clone(),
805 })
806 })
807 }
808
809 pub fn set_system_prompt_context(&mut self, context: AssistantSystemPromptContext) {
810 self.system_prompt_context = Some(context);
811 }
812
813 pub fn system_prompt_context(&self) -> &Option<AssistantSystemPromptContext> {
814 &self.system_prompt_context
815 }
816
817 pub fn load_system_prompt_context(
818 &self,
819 cx: &App,
820 ) -> Task<(AssistantSystemPromptContext, Option<ThreadError>)> {
821 let project = self.project.read(cx);
822 let tasks = project
823 .visible_worktrees(cx)
824 .map(|worktree| {
825 Self::load_worktree_info_for_system_prompt(
826 project.fs().clone(),
827 worktree.read(cx),
828 cx,
829 )
830 })
831 .collect::<Vec<_>>();
832
833 cx.spawn(async |_cx| {
834 let results = futures::future::join_all(tasks).await;
835 let mut first_err = None;
836 let worktrees = results
837 .into_iter()
838 .map(|(worktree, err)| {
839 if first_err.is_none() && err.is_some() {
840 first_err = err;
841 }
842 worktree
843 })
844 .collect::<Vec<_>>();
845 (AssistantSystemPromptContext::new(worktrees), first_err)
846 })
847 }
848
849 fn load_worktree_info_for_system_prompt(
850 fs: Arc<dyn Fs>,
851 worktree: &Worktree,
852 cx: &App,
853 ) -> Task<(WorktreeInfoForSystemPrompt, Option<ThreadError>)> {
854 let root_name = worktree.root_name().into();
855 let abs_path = worktree.abs_path();
856
857 // Note that Cline supports `.clinerules` being a directory, but that is not currently
858 // supported. This doesn't seem to occur often in GitHub repositories.
859 const RULES_FILE_NAMES: [&'static str; 6] = [
860 ".rules",
861 ".cursorrules",
862 ".windsurfrules",
863 ".clinerules",
864 ".github/copilot-instructions.md",
865 "CLAUDE.md",
866 ];
867 let selected_rules_file = RULES_FILE_NAMES
868 .into_iter()
869 .filter_map(|name| {
870 worktree
871 .entry_for_path(name)
872 .filter(|entry| entry.is_file())
873 .map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
874 })
875 .next();
876
877 if let Some((rel_rules_path, abs_rules_path)) = selected_rules_file {
878 cx.spawn(async move |_| {
879 let rules_file_result = maybe!(async move {
880 let abs_rules_path = abs_rules_path?;
881 let text = fs.load(&abs_rules_path).await.with_context(|| {
882 format!("Failed to load assistant rules file {:?}", abs_rules_path)
883 })?;
884 anyhow::Ok(RulesFile {
885 rel_path: rel_rules_path,
886 abs_path: abs_rules_path.into(),
887 text: text.trim().to_string(),
888 })
889 })
890 .await;
891 let (rules_file, rules_file_error) = match rules_file_result {
892 Ok(rules_file) => (Some(rules_file), None),
893 Err(err) => (
894 None,
895 Some(ThreadError::Message {
896 header: "Error loading rules file".into(),
897 message: format!("{err}").into(),
898 }),
899 ),
900 };
901 let worktree_info = WorktreeInfoForSystemPrompt {
902 root_name,
903 abs_path,
904 rules_file,
905 };
906 (worktree_info, rules_file_error)
907 })
908 } else {
909 Task::ready((
910 WorktreeInfoForSystemPrompt {
911 root_name,
912 abs_path,
913 rules_file: None,
914 },
915 None,
916 ))
917 }
918 }
919
920 pub fn send_to_model(
921 &mut self,
922 model: Arc<dyn LanguageModel>,
923 request_kind: RequestKind,
924 cx: &mut Context<Self>,
925 ) {
926 let mut request = self.to_completion_request(request_kind, cx);
927 if model.supports_tools() {
928 request.tools = {
929 let mut tools = Vec::new();
930 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
931 LanguageModelRequestTool {
932 name: tool.name(),
933 description: tool.description(),
934 input_schema: tool.input_schema(model.tool_input_format()),
935 }
936 }));
937
938 tools
939 };
940 }
941
942 self.stream_completion(request, model, cx);
943 }
944
945 pub fn used_tools_since_last_user_message(&self) -> bool {
946 for message in self.messages.iter().rev() {
947 if self.tool_use.message_has_tool_results(message.id) {
948 return true;
949 } else if message.role == Role::User {
950 return false;
951 }
952 }
953
954 false
955 }
956
957 pub fn to_completion_request(
958 &self,
959 request_kind: RequestKind,
960 cx: &App,
961 ) -> LanguageModelRequest {
962 let mut request = LanguageModelRequest {
963 messages: vec![],
964 tools: Vec::new(),
965 stop: Vec::new(),
966 temperature: None,
967 };
968
969 if let Some(system_prompt_context) = self.system_prompt_context.as_ref() {
970 if let Some(system_prompt) = self
971 .prompt_builder
972 .generate_assistant_system_prompt(system_prompt_context)
973 .context("failed to generate assistant system prompt")
974 .log_err()
975 {
976 request.messages.push(LanguageModelRequestMessage {
977 role: Role::System,
978 content: vec![MessageContent::Text(system_prompt)],
979 cache: true,
980 });
981 }
982 } else {
983 log::error!("system_prompt_context not set.")
984 }
985
986 for message in &self.messages {
987 let mut request_message = LanguageModelRequestMessage {
988 role: message.role,
989 content: Vec::new(),
990 cache: false,
991 };
992
993 match request_kind {
994 RequestKind::Chat => {
995 self.tool_use
996 .attach_tool_results(message.id, &mut request_message);
997 }
998 RequestKind::Summarize => {
999 // We don't care about tool use during summarization.
1000 if self.tool_use.message_has_tool_results(message.id) {
1001 continue;
1002 }
1003 }
1004 }
1005
1006 if !message.segments.is_empty() {
1007 request_message
1008 .content
1009 .push(MessageContent::Text(message.to_string()));
1010 }
1011
1012 match request_kind {
1013 RequestKind::Chat => {
1014 self.tool_use
1015 .attach_tool_uses(message.id, &mut request_message);
1016 }
1017 RequestKind::Summarize => {
1018 // We don't care about tool use during summarization.
1019 }
1020 };
1021
1022 request.messages.push(request_message);
1023 }
1024
1025 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
1026 if let Some(last) = request.messages.last_mut() {
1027 last.cache = true;
1028 }
1029
1030 self.attached_tracked_files_state(&mut request.messages, cx);
1031
1032 request
1033 }
1034
1035 fn attached_tracked_files_state(
1036 &self,
1037 messages: &mut Vec<LanguageModelRequestMessage>,
1038 cx: &App,
1039 ) {
1040 const STALE_FILES_HEADER: &str = "These files changed since last read:";
1041
1042 let mut stale_message = String::new();
1043
1044 let action_log = self.action_log.read(cx);
1045
1046 for stale_file in action_log.stale_buffers(cx) {
1047 let Some(file) = stale_file.read(cx).file() else {
1048 continue;
1049 };
1050
1051 if stale_message.is_empty() {
1052 write!(&mut stale_message, "{}\n", STALE_FILES_HEADER).ok();
1053 }
1054
1055 writeln!(&mut stale_message, "- {}", file.path().display()).ok();
1056 }
1057
1058 let mut content = Vec::with_capacity(2);
1059
1060 if !stale_message.is_empty() {
1061 content.push(stale_message.into());
1062 }
1063
1064 if action_log.has_edited_files_since_project_diagnostics_check() {
1065 content.push(
1066 "\n\nWhen you're done making changes, make sure to check project diagnostics \
1067 and fix all errors AND warnings you introduced! \
1068 DO NOT mention you're going to do this until you're done."
1069 .into(),
1070 );
1071 }
1072
1073 if !content.is_empty() {
1074 let context_message = LanguageModelRequestMessage {
1075 role: Role::User,
1076 content,
1077 cache: false,
1078 };
1079
1080 messages.push(context_message);
1081 }
1082 }
1083
1084 pub fn stream_completion(
1085 &mut self,
1086 request: LanguageModelRequest,
1087 model: Arc<dyn LanguageModel>,
1088 cx: &mut Context<Self>,
1089 ) {
1090 let pending_completion_id = post_inc(&mut self.completion_count);
1091
1092 let task = cx.spawn(async move |thread, cx| {
1093 let stream = model.stream_completion(request, &cx);
1094 let initial_token_usage =
1095 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage.clone());
1096 let stream_completion = async {
1097 let mut events = stream.await?;
1098 let mut stop_reason = StopReason::EndTurn;
1099 let mut current_token_usage = TokenUsage::default();
1100
1101 while let Some(event) = events.next().await {
1102 let event = event?;
1103
1104 thread.update(cx, |thread, cx| {
1105 match event {
1106 LanguageModelCompletionEvent::StartMessage { .. } => {
1107 thread.insert_message(
1108 Role::Assistant,
1109 vec![MessageSegment::Text(String::new())],
1110 cx,
1111 );
1112 }
1113 LanguageModelCompletionEvent::Stop(reason) => {
1114 stop_reason = reason;
1115 }
1116 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1117 thread.cumulative_token_usage =
1118 thread.cumulative_token_usage.clone() + token_usage.clone()
1119 - current_token_usage.clone();
1120 current_token_usage = token_usage;
1121 }
1122 LanguageModelCompletionEvent::Text(chunk) => {
1123 if let Some(last_message) = thread.messages.last_mut() {
1124 if last_message.role == Role::Assistant {
1125 last_message.push_text(&chunk);
1126 cx.emit(ThreadEvent::StreamedAssistantText(
1127 last_message.id,
1128 chunk,
1129 ));
1130 } else {
1131 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1132 // of a new Assistant response.
1133 //
1134 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1135 // will result in duplicating the text of the chunk in the rendered Markdown.
1136 thread.insert_message(
1137 Role::Assistant,
1138 vec![MessageSegment::Text(chunk.to_string())],
1139 cx,
1140 );
1141 };
1142 }
1143 }
1144 LanguageModelCompletionEvent::Thinking(chunk) => {
1145 if let Some(last_message) = thread.messages.last_mut() {
1146 if last_message.role == Role::Assistant {
1147 last_message.push_thinking(&chunk);
1148 cx.emit(ThreadEvent::StreamedAssistantThinking(
1149 last_message.id,
1150 chunk,
1151 ));
1152 } else {
1153 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1154 // of a new Assistant response.
1155 //
1156 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1157 // will result in duplicating the text of the chunk in the rendered Markdown.
1158 thread.insert_message(
1159 Role::Assistant,
1160 vec![MessageSegment::Thinking(chunk.to_string())],
1161 cx,
1162 );
1163 };
1164 }
1165 }
1166 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1167 let last_assistant_message_id = thread
1168 .messages
1169 .iter_mut()
1170 .rfind(|message| message.role == Role::Assistant)
1171 .map(|message| message.id)
1172 .unwrap_or_else(|| {
1173 thread.insert_message(Role::Assistant, vec![], cx)
1174 });
1175
1176 thread.tool_use.request_tool_use(
1177 last_assistant_message_id,
1178 tool_use,
1179 cx,
1180 );
1181 }
1182 }
1183
1184 thread.touch_updated_at();
1185 cx.emit(ThreadEvent::StreamedCompletion);
1186 cx.notify();
1187 })?;
1188
1189 smol::future::yield_now().await;
1190 }
1191
1192 thread.update(cx, |thread, cx| {
1193 thread
1194 .pending_completions
1195 .retain(|completion| completion.id != pending_completion_id);
1196
1197 if thread.summary.is_none() && thread.messages.len() >= 2 {
1198 thread.summarize(cx);
1199 }
1200 })?;
1201
1202 anyhow::Ok(stop_reason)
1203 };
1204
1205 let result = stream_completion.await;
1206
1207 thread
1208 .update(cx, |thread, cx| {
1209 thread.finalize_pending_checkpoint(cx);
1210 match result.as_ref() {
1211 Ok(stop_reason) => match stop_reason {
1212 StopReason::ToolUse => {
1213 cx.emit(ThreadEvent::UsePendingTools);
1214 }
1215 StopReason::EndTurn => {}
1216 StopReason::MaxTokens => {}
1217 },
1218 Err(error) => {
1219 if error.is::<PaymentRequiredError>() {
1220 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1221 } else if error.is::<MaxMonthlySpendReachedError>() {
1222 cx.emit(ThreadEvent::ShowError(
1223 ThreadError::MaxMonthlySpendReached,
1224 ));
1225 } else {
1226 let error_message = error
1227 .chain()
1228 .map(|err| err.to_string())
1229 .collect::<Vec<_>>()
1230 .join("\n");
1231 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1232 header: "Error interacting with language model".into(),
1233 message: SharedString::from(error_message.clone()),
1234 }));
1235 }
1236
1237 thread.cancel_last_completion(cx);
1238 }
1239 }
1240 cx.emit(ThreadEvent::DoneStreaming);
1241
1242 if let Ok(initial_usage) = initial_token_usage {
1243 let usage = thread.cumulative_token_usage.clone() - initial_usage;
1244
1245 telemetry::event!(
1246 "Assistant Thread Completion",
1247 thread_id = thread.id().to_string(),
1248 model = model.telemetry_id(),
1249 model_provider = model.provider_id().to_string(),
1250 input_tokens = usage.input_tokens,
1251 output_tokens = usage.output_tokens,
1252 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1253 cache_read_input_tokens = usage.cache_read_input_tokens,
1254 );
1255 }
1256 })
1257 .ok();
1258 });
1259
1260 self.pending_completions.push(PendingCompletion {
1261 id: pending_completion_id,
1262 _task: task,
1263 });
1264 }
1265
1266 pub fn summarize(&mut self, cx: &mut Context<Self>) {
1267 let Some(model) = LanguageModelRegistry::read_global(cx).thread_summary_model() else {
1268 return;
1269 };
1270
1271 if !model.provider.is_authenticated(cx) {
1272 return;
1273 }
1274
1275 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1276 request.messages.push(LanguageModelRequestMessage {
1277 role: Role::User,
1278 content: vec![
1279 "Generate a concise 3-7 word title for this conversation, omitting punctuation. \
1280 Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`. \
1281 If the conversation is about a specific subject, include it in the title. \
1282 Be descriptive. DO NOT speak in the first person."
1283 .into(),
1284 ],
1285 cache: false,
1286 });
1287
1288 self.pending_summary = cx.spawn(async move |this, cx| {
1289 async move {
1290 let stream = model.model.stream_completion_text(request, &cx);
1291 let mut messages = stream.await?;
1292
1293 let mut new_summary = String::new();
1294 while let Some(message) = messages.stream.next().await {
1295 let text = message?;
1296 let mut lines = text.lines();
1297 new_summary.extend(lines.next());
1298
1299 // Stop if the LLM generated multiple lines.
1300 if lines.next().is_some() {
1301 break;
1302 }
1303 }
1304
1305 this.update(cx, |this, cx| {
1306 if !new_summary.is_empty() {
1307 this.summary = Some(new_summary.into());
1308 }
1309
1310 cx.emit(ThreadEvent::SummaryGenerated);
1311 })?;
1312
1313 anyhow::Ok(())
1314 }
1315 .log_err()
1316 .await
1317 });
1318 }
1319
1320 pub fn generate_detailed_summary(&mut self, cx: &mut Context<Self>) -> Option<Task<()>> {
1321 let last_message_id = self.messages.last().map(|message| message.id)?;
1322
1323 match &self.detailed_summary_state {
1324 DetailedSummaryState::Generating { message_id, .. }
1325 | DetailedSummaryState::Generated { message_id, .. }
1326 if *message_id == last_message_id =>
1327 {
1328 // Already up-to-date
1329 return None;
1330 }
1331 _ => {}
1332 }
1333
1334 let ConfiguredModel { model, provider } =
1335 LanguageModelRegistry::read_global(cx).thread_summary_model()?;
1336
1337 if !provider.is_authenticated(cx) {
1338 return None;
1339 }
1340
1341 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1342
1343 request.messages.push(LanguageModelRequestMessage {
1344 role: Role::User,
1345 content: vec![
1346 "Generate a detailed summary of this conversation. Include:\n\
1347 1. A brief overview of what was discussed\n\
1348 2. Key facts or information discovered\n\
1349 3. Outcomes or conclusions reached\n\
1350 4. Any action items or next steps if any\n\
1351 Format it in Markdown with headings and bullet points."
1352 .into(),
1353 ],
1354 cache: false,
1355 });
1356
1357 let task = cx.spawn(async move |thread, cx| {
1358 let stream = model.stream_completion_text(request, &cx);
1359 let Some(mut messages) = stream.await.log_err() else {
1360 thread
1361 .update(cx, |this, _cx| {
1362 this.detailed_summary_state = DetailedSummaryState::NotGenerated;
1363 })
1364 .log_err();
1365
1366 return;
1367 };
1368
1369 let mut new_detailed_summary = String::new();
1370
1371 while let Some(chunk) = messages.stream.next().await {
1372 if let Some(chunk) = chunk.log_err() {
1373 new_detailed_summary.push_str(&chunk);
1374 }
1375 }
1376
1377 thread
1378 .update(cx, |this, _cx| {
1379 this.detailed_summary_state = DetailedSummaryState::Generated {
1380 text: new_detailed_summary.into(),
1381 message_id: last_message_id,
1382 };
1383 })
1384 .log_err();
1385 });
1386
1387 self.detailed_summary_state = DetailedSummaryState::Generating {
1388 message_id: last_message_id,
1389 };
1390
1391 Some(task)
1392 }
1393
1394 pub fn is_generating_detailed_summary(&self) -> bool {
1395 matches!(
1396 self.detailed_summary_state,
1397 DetailedSummaryState::Generating { .. }
1398 )
1399 }
1400
1401 pub fn use_pending_tools(
1402 &mut self,
1403 cx: &mut Context<Self>,
1404 ) -> impl IntoIterator<Item = PendingToolUse> + use<> {
1405 let request = self.to_completion_request(RequestKind::Chat, cx);
1406 let messages = Arc::new(request.messages);
1407 let pending_tool_uses = self
1408 .tool_use
1409 .pending_tool_uses()
1410 .into_iter()
1411 .filter(|tool_use| tool_use.status.is_idle())
1412 .cloned()
1413 .collect::<Vec<_>>();
1414
1415 for tool_use in pending_tool_uses.iter() {
1416 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1417 if tool.needs_confirmation()
1418 && !AssistantSettings::get_global(cx).always_allow_tool_actions
1419 {
1420 self.tool_use.confirm_tool_use(
1421 tool_use.id.clone(),
1422 tool_use.ui_text.clone(),
1423 tool_use.input.clone(),
1424 messages.clone(),
1425 tool,
1426 );
1427 cx.emit(ThreadEvent::ToolConfirmationNeeded);
1428 } else {
1429 self.run_tool(
1430 tool_use.id.clone(),
1431 tool_use.ui_text.clone(),
1432 tool_use.input.clone(),
1433 &messages,
1434 tool,
1435 cx,
1436 );
1437 }
1438 }
1439 }
1440
1441 pending_tool_uses
1442 }
1443
1444 pub fn run_tool(
1445 &mut self,
1446 tool_use_id: LanguageModelToolUseId,
1447 ui_text: impl Into<SharedString>,
1448 input: serde_json::Value,
1449 messages: &[LanguageModelRequestMessage],
1450 tool: Arc<dyn Tool>,
1451 cx: &mut Context<Thread>,
1452 ) {
1453 let task = self.spawn_tool_use(tool_use_id.clone(), messages, input, tool, cx);
1454 self.tool_use
1455 .run_pending_tool(tool_use_id, ui_text.into(), task);
1456 }
1457
1458 fn spawn_tool_use(
1459 &mut self,
1460 tool_use_id: LanguageModelToolUseId,
1461 messages: &[LanguageModelRequestMessage],
1462 input: serde_json::Value,
1463 tool: Arc<dyn Tool>,
1464 cx: &mut Context<Thread>,
1465 ) -> Task<()> {
1466 let tool_name: Arc<str> = tool.name().into();
1467
1468 let run_tool = if self.tools.is_disabled(&tool.source(), &tool_name) {
1469 Task::ready(Err(anyhow!("tool is disabled: {tool_name}")))
1470 } else {
1471 tool.run(
1472 input,
1473 messages,
1474 self.project.clone(),
1475 self.action_log.clone(),
1476 cx,
1477 )
1478 };
1479
1480 cx.spawn({
1481 async move |thread: WeakEntity<Thread>, cx| {
1482 let output = run_tool.await;
1483
1484 thread
1485 .update(cx, |thread, cx| {
1486 let pending_tool_use = thread.tool_use.insert_tool_output(
1487 tool_use_id.clone(),
1488 tool_name,
1489 output,
1490 );
1491
1492 cx.emit(ThreadEvent::ToolFinished {
1493 tool_use_id,
1494 pending_tool_use,
1495 canceled: false,
1496 });
1497 })
1498 .ok();
1499 }
1500 })
1501 }
1502
1503 pub fn attach_tool_results(&mut self, cx: &mut Context<Self>) {
1504 // Insert a user message to contain the tool results.
1505 self.insert_user_message(
1506 // TODO: Sending up a user message without any content results in the model sending back
1507 // responses that also don't have any content. We currently don't handle this case well,
1508 // so for now we provide some text to keep the model on track.
1509 "Here are the tool results.",
1510 Vec::new(),
1511 None,
1512 cx,
1513 );
1514 }
1515
1516 /// Cancels the last pending completion, if there are any pending.
1517 ///
1518 /// Returns whether a completion was canceled.
1519 pub fn cancel_last_completion(&mut self, cx: &mut Context<Self>) -> bool {
1520 let canceled = if self.pending_completions.pop().is_some() {
1521 true
1522 } else {
1523 let mut canceled = false;
1524 for pending_tool_use in self.tool_use.cancel_pending() {
1525 canceled = true;
1526 cx.emit(ThreadEvent::ToolFinished {
1527 tool_use_id: pending_tool_use.id.clone(),
1528 pending_tool_use: Some(pending_tool_use),
1529 canceled: true,
1530 });
1531 }
1532 canceled
1533 };
1534 self.finalize_pending_checkpoint(cx);
1535 canceled
1536 }
1537
1538 /// Returns the feedback given to the thread, if any.
1539 pub fn feedback(&self) -> Option<ThreadFeedback> {
1540 self.feedback
1541 }
1542
1543 /// Reports feedback about the thread and stores it in our telemetry backend.
1544 pub fn report_feedback(
1545 &mut self,
1546 feedback: ThreadFeedback,
1547 cx: &mut Context<Self>,
1548 ) -> Task<Result<()>> {
1549 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
1550 let serialized_thread = self.serialize(cx);
1551 let thread_id = self.id().clone();
1552 let client = self.project.read(cx).client();
1553 self.feedback = Some(feedback);
1554 cx.notify();
1555
1556 cx.background_spawn(async move {
1557 let final_project_snapshot = final_project_snapshot.await;
1558 let serialized_thread = serialized_thread.await?;
1559 let thread_data =
1560 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
1561
1562 let rating = match feedback {
1563 ThreadFeedback::Positive => "positive",
1564 ThreadFeedback::Negative => "negative",
1565 };
1566 telemetry::event!(
1567 "Assistant Thread Rated",
1568 rating,
1569 thread_id,
1570 thread_data,
1571 final_project_snapshot
1572 );
1573 client.telemetry().flush_events();
1574
1575 Ok(())
1576 })
1577 }
1578
1579 /// Create a snapshot of the current project state including git information and unsaved buffers.
1580 fn project_snapshot(
1581 project: Entity<Project>,
1582 cx: &mut Context<Self>,
1583 ) -> Task<Arc<ProjectSnapshot>> {
1584 let git_store = project.read(cx).git_store().clone();
1585 let worktree_snapshots: Vec<_> = project
1586 .read(cx)
1587 .visible_worktrees(cx)
1588 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
1589 .collect();
1590
1591 cx.spawn(async move |_, cx| {
1592 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
1593
1594 let mut unsaved_buffers = Vec::new();
1595 cx.update(|app_cx| {
1596 let buffer_store = project.read(app_cx).buffer_store();
1597 for buffer_handle in buffer_store.read(app_cx).buffers() {
1598 let buffer = buffer_handle.read(app_cx);
1599 if buffer.is_dirty() {
1600 if let Some(file) = buffer.file() {
1601 let path = file.path().to_string_lossy().to_string();
1602 unsaved_buffers.push(path);
1603 }
1604 }
1605 }
1606 })
1607 .ok();
1608
1609 Arc::new(ProjectSnapshot {
1610 worktree_snapshots,
1611 unsaved_buffer_paths: unsaved_buffers,
1612 timestamp: Utc::now(),
1613 })
1614 })
1615 }
1616
1617 fn worktree_snapshot(
1618 worktree: Entity<project::Worktree>,
1619 git_store: Entity<GitStore>,
1620 cx: &App,
1621 ) -> Task<WorktreeSnapshot> {
1622 cx.spawn(async move |cx| {
1623 // Get worktree path and snapshot
1624 let worktree_info = cx.update(|app_cx| {
1625 let worktree = worktree.read(app_cx);
1626 let path = worktree.abs_path().to_string_lossy().to_string();
1627 let snapshot = worktree.snapshot();
1628 (path, snapshot)
1629 });
1630
1631 let Ok((worktree_path, _snapshot)) = worktree_info else {
1632 return WorktreeSnapshot {
1633 worktree_path: String::new(),
1634 git_state: None,
1635 };
1636 };
1637
1638 let git_state = git_store
1639 .update(cx, |git_store, cx| {
1640 git_store
1641 .repositories()
1642 .values()
1643 .find(|repo| {
1644 repo.read(cx)
1645 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
1646 .is_some()
1647 })
1648 .cloned()
1649 })
1650 .ok()
1651 .flatten()
1652 .map(|repo| {
1653 repo.read_with(cx, |repo, _| {
1654 let current_branch =
1655 repo.branch.as_ref().map(|branch| branch.name.to_string());
1656 repo.send_job(|state, _| async move {
1657 let RepositoryState::Local { backend, .. } = state else {
1658 return GitState {
1659 remote_url: None,
1660 head_sha: None,
1661 current_branch,
1662 diff: None,
1663 };
1664 };
1665
1666 let remote_url = backend.remote_url("origin");
1667 let head_sha = backend.head_sha();
1668 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
1669
1670 GitState {
1671 remote_url,
1672 head_sha,
1673 current_branch,
1674 diff,
1675 }
1676 })
1677 })
1678 });
1679
1680 let git_state = match git_state {
1681 Some(git_state) => match git_state.ok() {
1682 Some(git_state) => git_state.await.ok(),
1683 None => None,
1684 },
1685 None => None,
1686 };
1687
1688 WorktreeSnapshot {
1689 worktree_path,
1690 git_state,
1691 }
1692 })
1693 }
1694
1695 pub fn to_markdown(&self, cx: &App) -> Result<String> {
1696 let mut markdown = Vec::new();
1697
1698 if let Some(summary) = self.summary() {
1699 writeln!(markdown, "# {summary}\n")?;
1700 };
1701
1702 for message in self.messages() {
1703 writeln!(
1704 markdown,
1705 "## {role}\n",
1706 role = match message.role {
1707 Role::User => "User",
1708 Role::Assistant => "Assistant",
1709 Role::System => "System",
1710 }
1711 )?;
1712
1713 if !message.context.is_empty() {
1714 writeln!(markdown, "{}", message.context)?;
1715 }
1716
1717 for segment in &message.segments {
1718 match segment {
1719 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
1720 MessageSegment::Thinking(text) => {
1721 writeln!(markdown, "<think>{}</think>\n", text)?
1722 }
1723 }
1724 }
1725
1726 for tool_use in self.tool_uses_for_message(message.id, cx) {
1727 writeln!(
1728 markdown,
1729 "**Use Tool: {} ({})**",
1730 tool_use.name, tool_use.id
1731 )?;
1732 writeln!(markdown, "```json")?;
1733 writeln!(
1734 markdown,
1735 "{}",
1736 serde_json::to_string_pretty(&tool_use.input)?
1737 )?;
1738 writeln!(markdown, "```")?;
1739 }
1740
1741 for tool_result in self.tool_results_for_message(message.id) {
1742 write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
1743 if tool_result.is_error {
1744 write!(markdown, " (Error)")?;
1745 }
1746
1747 writeln!(markdown, "**\n")?;
1748 writeln!(markdown, "{}", tool_result.content)?;
1749 }
1750 }
1751
1752 Ok(String::from_utf8_lossy(&markdown).to_string())
1753 }
1754
1755 pub fn keep_edits_in_range(
1756 &mut self,
1757 buffer: Entity<language::Buffer>,
1758 buffer_range: Range<language::Anchor>,
1759 cx: &mut Context<Self>,
1760 ) {
1761 self.action_log.update(cx, |action_log, cx| {
1762 action_log.keep_edits_in_range(buffer, buffer_range, cx)
1763 });
1764 }
1765
1766 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
1767 self.action_log
1768 .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
1769 }
1770
1771 pub fn reject_edits_in_range(
1772 &mut self,
1773 buffer: Entity<language::Buffer>,
1774 buffer_range: Range<language::Anchor>,
1775 cx: &mut Context<Self>,
1776 ) -> Task<Result<()>> {
1777 self.action_log.update(cx, |action_log, cx| {
1778 action_log.reject_edits_in_range(buffer, buffer_range, cx)
1779 })
1780 }
1781
1782 pub fn action_log(&self) -> &Entity<ActionLog> {
1783 &self.action_log
1784 }
1785
1786 pub fn project(&self) -> &Entity<Project> {
1787 &self.project
1788 }
1789
1790 pub fn cumulative_token_usage(&self) -> TokenUsage {
1791 self.cumulative_token_usage.clone()
1792 }
1793
1794 pub fn total_token_usage(&self, cx: &App) -> TotalTokenUsage {
1795 let model_registry = LanguageModelRegistry::read_global(cx);
1796 let Some(model) = model_registry.default_model() else {
1797 return TotalTokenUsage::default();
1798 };
1799
1800 let max = model.model.max_token_count();
1801
1802 #[cfg(debug_assertions)]
1803 let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
1804 .unwrap_or("0.8".to_string())
1805 .parse()
1806 .unwrap();
1807 #[cfg(not(debug_assertions))]
1808 let warning_threshold: f32 = 0.8;
1809
1810 let total = self.cumulative_token_usage.total_tokens() as usize;
1811
1812 let ratio = if total >= max {
1813 TokenUsageRatio::Exceeded
1814 } else if total as f32 / max as f32 >= warning_threshold {
1815 TokenUsageRatio::Warning
1816 } else {
1817 TokenUsageRatio::Normal
1818 };
1819
1820 TotalTokenUsage { total, max, ratio }
1821 }
1822
1823 pub fn deny_tool_use(
1824 &mut self,
1825 tool_use_id: LanguageModelToolUseId,
1826 tool_name: Arc<str>,
1827 cx: &mut Context<Self>,
1828 ) {
1829 let err = Err(anyhow::anyhow!(
1830 "Permission to run tool action denied by user"
1831 ));
1832
1833 self.tool_use
1834 .insert_tool_output(tool_use_id.clone(), tool_name, err);
1835
1836 cx.emit(ThreadEvent::ToolFinished {
1837 tool_use_id,
1838 pending_tool_use: None,
1839 canceled: true,
1840 });
1841 }
1842}
1843
1844#[derive(Debug, Clone)]
1845pub enum ThreadError {
1846 PaymentRequired,
1847 MaxMonthlySpendReached,
1848 Message {
1849 header: SharedString,
1850 message: SharedString,
1851 },
1852}
1853
1854#[derive(Debug, Clone)]
1855pub enum ThreadEvent {
1856 ShowError(ThreadError),
1857 StreamedCompletion,
1858 StreamedAssistantText(MessageId, String),
1859 StreamedAssistantThinking(MessageId, String),
1860 DoneStreaming,
1861 MessageAdded(MessageId),
1862 MessageEdited(MessageId),
1863 MessageDeleted(MessageId),
1864 SummaryGenerated,
1865 SummaryChanged,
1866 UsePendingTools,
1867 ToolFinished {
1868 #[allow(unused)]
1869 tool_use_id: LanguageModelToolUseId,
1870 /// The pending tool use that corresponds to this tool.
1871 pending_tool_use: Option<PendingToolUse>,
1872 /// Whether the tool was canceled by the user.
1873 canceled: bool,
1874 },
1875 CheckpointChanged,
1876 ToolConfirmationNeeded,
1877}
1878
1879impl EventEmitter<ThreadEvent> for Thread {}
1880
1881struct PendingCompletion {
1882 id: usize,
1883 _task: Task<()>,
1884}
1885
1886#[cfg(test)]
1887mod tests {
1888 use super::*;
1889 use crate::{ThreadStore, context_store::ContextStore, thread_store};
1890 use assistant_settings::AssistantSettings;
1891 use context_server::ContextServerSettings;
1892 use editor::EditorSettings;
1893 use gpui::TestAppContext;
1894 use project::{FakeFs, Project};
1895 use prompt_store::PromptBuilder;
1896 use serde_json::json;
1897 use settings::{Settings, SettingsStore};
1898 use std::sync::Arc;
1899 use theme::ThemeSettings;
1900 use util::path;
1901 use workspace::Workspace;
1902
1903 #[gpui::test]
1904 async fn test_message_with_context(cx: &mut TestAppContext) {
1905 init_test_settings(cx);
1906
1907 let project = create_test_project(
1908 cx,
1909 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
1910 )
1911 .await;
1912
1913 let (_workspace, _thread_store, thread, context_store) =
1914 setup_test_environment(cx, project.clone()).await;
1915
1916 add_file_to_context(&project, &context_store, "test/code.rs", cx)
1917 .await
1918 .unwrap();
1919
1920 let context =
1921 context_store.update(cx, |store, _| store.context().first().cloned().unwrap());
1922
1923 // Insert user message with context
1924 let message_id = thread.update(cx, |thread, cx| {
1925 thread.insert_user_message("Please explain this code", vec![context], None, cx)
1926 });
1927
1928 // Check content and context in message object
1929 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
1930
1931 // Use different path format strings based on platform for the test
1932 #[cfg(windows)]
1933 let path_part = r"test\code.rs";
1934 #[cfg(not(windows))]
1935 let path_part = "test/code.rs";
1936
1937 let expected_context = format!(
1938 r#"
1939<context>
1940The following items were attached by the user. You don't need to use other tools to read them.
1941
1942<files>
1943```rs {path_part}
1944fn main() {{
1945 println!("Hello, world!");
1946}}
1947```
1948</files>
1949</context>
1950"#
1951 );
1952
1953 assert_eq!(message.role, Role::User);
1954 assert_eq!(message.segments.len(), 1);
1955 assert_eq!(
1956 message.segments[0],
1957 MessageSegment::Text("Please explain this code".to_string())
1958 );
1959 assert_eq!(message.context, expected_context);
1960
1961 // Check message in request
1962 let request = thread.read_with(cx, |thread, cx| {
1963 thread.to_completion_request(RequestKind::Chat, cx)
1964 });
1965
1966 assert_eq!(request.messages.len(), 1);
1967 let expected_full_message = format!("{}Please explain this code", expected_context);
1968 assert_eq!(request.messages[0].string_contents(), expected_full_message);
1969 }
1970
1971 #[gpui::test]
1972 async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
1973 init_test_settings(cx);
1974
1975 let project = create_test_project(
1976 cx,
1977 json!({
1978 "file1.rs": "fn function1() {}\n",
1979 "file2.rs": "fn function2() {}\n",
1980 "file3.rs": "fn function3() {}\n",
1981 }),
1982 )
1983 .await;
1984
1985 let (_, _thread_store, thread, context_store) =
1986 setup_test_environment(cx, project.clone()).await;
1987
1988 // Open files individually
1989 add_file_to_context(&project, &context_store, "test/file1.rs", cx)
1990 .await
1991 .unwrap();
1992 add_file_to_context(&project, &context_store, "test/file2.rs", cx)
1993 .await
1994 .unwrap();
1995 add_file_to_context(&project, &context_store, "test/file3.rs", cx)
1996 .await
1997 .unwrap();
1998
1999 // Get the context objects
2000 let contexts = context_store.update(cx, |store, _| store.context().clone());
2001 assert_eq!(contexts.len(), 3);
2002
2003 // First message with context 1
2004 let message1_id = thread.update(cx, |thread, cx| {
2005 thread.insert_user_message("Message 1", vec![contexts[0].clone()], None, cx)
2006 });
2007
2008 // Second message with contexts 1 and 2 (context 1 should be skipped as it's already included)
2009 let message2_id = thread.update(cx, |thread, cx| {
2010 thread.insert_user_message(
2011 "Message 2",
2012 vec![contexts[0].clone(), contexts[1].clone()],
2013 None,
2014 cx,
2015 )
2016 });
2017
2018 // Third message with all three contexts (contexts 1 and 2 should be skipped)
2019 let message3_id = thread.update(cx, |thread, cx| {
2020 thread.insert_user_message(
2021 "Message 3",
2022 vec![
2023 contexts[0].clone(),
2024 contexts[1].clone(),
2025 contexts[2].clone(),
2026 ],
2027 None,
2028 cx,
2029 )
2030 });
2031
2032 // Check what contexts are included in each message
2033 let (message1, message2, message3) = thread.read_with(cx, |thread, _| {
2034 (
2035 thread.message(message1_id).unwrap().clone(),
2036 thread.message(message2_id).unwrap().clone(),
2037 thread.message(message3_id).unwrap().clone(),
2038 )
2039 });
2040
2041 // First message should include context 1
2042 assert!(message1.context.contains("file1.rs"));
2043
2044 // Second message should include only context 2 (not 1)
2045 assert!(!message2.context.contains("file1.rs"));
2046 assert!(message2.context.contains("file2.rs"));
2047
2048 // Third message should include only context 3 (not 1 or 2)
2049 assert!(!message3.context.contains("file1.rs"));
2050 assert!(!message3.context.contains("file2.rs"));
2051 assert!(message3.context.contains("file3.rs"));
2052
2053 // Check entire request to make sure all contexts are properly included
2054 let request = thread.read_with(cx, |thread, cx| {
2055 thread.to_completion_request(RequestKind::Chat, cx)
2056 });
2057
2058 // The request should contain all 3 messages
2059 assert_eq!(request.messages.len(), 3);
2060
2061 // Check that the contexts are properly formatted in each message
2062 assert!(request.messages[0].string_contents().contains("file1.rs"));
2063 assert!(!request.messages[0].string_contents().contains("file2.rs"));
2064 assert!(!request.messages[0].string_contents().contains("file3.rs"));
2065
2066 assert!(!request.messages[1].string_contents().contains("file1.rs"));
2067 assert!(request.messages[1].string_contents().contains("file2.rs"));
2068 assert!(!request.messages[1].string_contents().contains("file3.rs"));
2069
2070 assert!(!request.messages[2].string_contents().contains("file1.rs"));
2071 assert!(!request.messages[2].string_contents().contains("file2.rs"));
2072 assert!(request.messages[2].string_contents().contains("file3.rs"));
2073 }
2074
2075 #[gpui::test]
2076 async fn test_message_without_files(cx: &mut TestAppContext) {
2077 init_test_settings(cx);
2078
2079 let project = create_test_project(
2080 cx,
2081 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
2082 )
2083 .await;
2084
2085 let (_, _thread_store, thread, _context_store) =
2086 setup_test_environment(cx, project.clone()).await;
2087
2088 // Insert user message without any context (empty context vector)
2089 let message_id = thread.update(cx, |thread, cx| {
2090 thread.insert_user_message("What is the best way to learn Rust?", vec![], None, cx)
2091 });
2092
2093 // Check content and context in message object
2094 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
2095
2096 // Context should be empty when no files are included
2097 assert_eq!(message.role, Role::User);
2098 assert_eq!(message.segments.len(), 1);
2099 assert_eq!(
2100 message.segments[0],
2101 MessageSegment::Text("What is the best way to learn Rust?".to_string())
2102 );
2103 assert_eq!(message.context, "");
2104
2105 // Check message in request
2106 let request = thread.read_with(cx, |thread, cx| {
2107 thread.to_completion_request(RequestKind::Chat, cx)
2108 });
2109
2110 assert_eq!(request.messages.len(), 1);
2111 assert_eq!(
2112 request.messages[0].string_contents(),
2113 "What is the best way to learn Rust?"
2114 );
2115
2116 // Add second message, also without context
2117 let message2_id = thread.update(cx, |thread, cx| {
2118 thread.insert_user_message("Are there any good books?", vec![], None, cx)
2119 });
2120
2121 let message2 =
2122 thread.read_with(cx, |thread, _| thread.message(message2_id).unwrap().clone());
2123 assert_eq!(message2.context, "");
2124
2125 // Check that both messages appear in the request
2126 let request = thread.read_with(cx, |thread, cx| {
2127 thread.to_completion_request(RequestKind::Chat, cx)
2128 });
2129
2130 assert_eq!(request.messages.len(), 2);
2131 assert_eq!(
2132 request.messages[0].string_contents(),
2133 "What is the best way to learn Rust?"
2134 );
2135 assert_eq!(
2136 request.messages[1].string_contents(),
2137 "Are there any good books?"
2138 );
2139 }
2140
2141 #[gpui::test]
2142 async fn test_stale_buffer_notification(cx: &mut TestAppContext) {
2143 init_test_settings(cx);
2144
2145 let project = create_test_project(
2146 cx,
2147 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
2148 )
2149 .await;
2150
2151 let (_workspace, _thread_store, thread, context_store) =
2152 setup_test_environment(cx, project.clone()).await;
2153
2154 // Open buffer and add it to context
2155 let buffer = add_file_to_context(&project, &context_store, "test/code.rs", cx)
2156 .await
2157 .unwrap();
2158
2159 let context =
2160 context_store.update(cx, |store, _| store.context().first().cloned().unwrap());
2161
2162 // Insert user message with the buffer as context
2163 thread.update(cx, |thread, cx| {
2164 thread.insert_user_message("Explain this code", vec![context], None, cx)
2165 });
2166
2167 // Create a request and check that it doesn't have a stale buffer warning yet
2168 let initial_request = thread.read_with(cx, |thread, cx| {
2169 thread.to_completion_request(RequestKind::Chat, cx)
2170 });
2171
2172 // Make sure we don't have a stale file warning yet
2173 let has_stale_warning = initial_request.messages.iter().any(|msg| {
2174 msg.string_contents()
2175 .contains("These files changed since last read:")
2176 });
2177 assert!(
2178 !has_stale_warning,
2179 "Should not have stale buffer warning before buffer is modified"
2180 );
2181
2182 // Modify the buffer
2183 buffer.update(cx, |buffer, cx| {
2184 // Find a position at the end of line 1
2185 buffer.edit(
2186 [(1..1, "\n println!(\"Added a new line\");\n")],
2187 None,
2188 cx,
2189 );
2190 });
2191
2192 // Insert another user message without context
2193 thread.update(cx, |thread, cx| {
2194 thread.insert_user_message("What does the code do now?", vec![], None, cx)
2195 });
2196
2197 // Create a new request and check for the stale buffer warning
2198 let new_request = thread.read_with(cx, |thread, cx| {
2199 thread.to_completion_request(RequestKind::Chat, cx)
2200 });
2201
2202 // We should have a stale file warning as the last message
2203 let last_message = new_request
2204 .messages
2205 .last()
2206 .expect("Request should have messages");
2207
2208 // The last message should be the stale buffer notification
2209 assert_eq!(last_message.role, Role::User);
2210
2211 // Check the exact content of the message
2212 let expected_content = "These files changed since last read:\n- code.rs\n";
2213 assert_eq!(
2214 last_message.string_contents(),
2215 expected_content,
2216 "Last message should be exactly the stale buffer notification"
2217 );
2218 }
2219
2220 fn init_test_settings(cx: &mut TestAppContext) {
2221 cx.update(|cx| {
2222 let settings_store = SettingsStore::test(cx);
2223 cx.set_global(settings_store);
2224 language::init(cx);
2225 Project::init_settings(cx);
2226 AssistantSettings::register(cx);
2227 thread_store::init(cx);
2228 workspace::init_settings(cx);
2229 ThemeSettings::register(cx);
2230 ContextServerSettings::register(cx);
2231 EditorSettings::register(cx);
2232 });
2233 }
2234
2235 // Helper to create a test project with test files
2236 async fn create_test_project(
2237 cx: &mut TestAppContext,
2238 files: serde_json::Value,
2239 ) -> Entity<Project> {
2240 let fs = FakeFs::new(cx.executor());
2241 fs.insert_tree(path!("/test"), files).await;
2242 Project::test(fs, [path!("/test").as_ref()], cx).await
2243 }
2244
2245 async fn setup_test_environment(
2246 cx: &mut TestAppContext,
2247 project: Entity<Project>,
2248 ) -> (
2249 Entity<Workspace>,
2250 Entity<ThreadStore>,
2251 Entity<Thread>,
2252 Entity<ContextStore>,
2253 ) {
2254 let (workspace, cx) =
2255 cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
2256
2257 let thread_store = cx.update(|_, cx| {
2258 ThreadStore::new(
2259 project.clone(),
2260 Arc::default(),
2261 Arc::new(PromptBuilder::new(None).unwrap()),
2262 cx,
2263 )
2264 .unwrap()
2265 });
2266
2267 let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
2268 let context_store = cx.new(|_cx| ContextStore::new(workspace.downgrade(), None));
2269
2270 (workspace, thread_store, thread, context_store)
2271 }
2272
2273 async fn add_file_to_context(
2274 project: &Entity<Project>,
2275 context_store: &Entity<ContextStore>,
2276 path: &str,
2277 cx: &mut TestAppContext,
2278 ) -> Result<Entity<language::Buffer>> {
2279 let buffer_path = project
2280 .read_with(cx, |project, cx| project.find_project_path(path, cx))
2281 .unwrap();
2282
2283 let buffer = project
2284 .update(cx, |project, cx| project.open_buffer(buffer_path, cx))
2285 .await
2286 .unwrap();
2287
2288 context_store
2289 .update(cx, |store, cx| {
2290 store.add_file_from_buffer(buffer.clone(), cx)
2291 })
2292 .await?;
2293
2294 Ok(buffer)
2295 }
2296}