1use std::fmt::Write as _;
2use std::io::Write;
3use std::ops::Range;
4use std::sync::Arc;
5
6use anyhow::{Context as _, Result, anyhow};
7use assistant_settings::AssistantSettings;
8use assistant_tool::{ActionLog, Tool, ToolWorkingSet};
9use chrono::{DateTime, Utc};
10use collections::{BTreeMap, HashMap};
11use fs::Fs;
12use futures::future::Shared;
13use futures::{FutureExt, StreamExt as _};
14use git::repository::DiffType;
15use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task, WeakEntity};
16use language_model::{
17 ConfiguredModel, LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry,
18 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
19 LanguageModelToolResult, LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent,
20 PaymentRequiredError, Role, StopReason, TokenUsage,
21};
22use project::git_store::{GitStore, GitStoreCheckpoint, RepositoryState};
23use project::{Project, Worktree};
24use prompt_store::{
25 AssistantSystemPromptContext, PromptBuilder, RulesFile, WorktreeInfoForSystemPrompt,
26};
27use schemars::JsonSchema;
28use serde::{Deserialize, Serialize};
29use settings::Settings;
30use util::{ResultExt as _, TryFutureExt as _, maybe, post_inc};
31use uuid::Uuid;
32
33use crate::context::{AssistantContext, ContextId, format_context_as_string};
34use crate::thread_store::{
35 SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
36 SerializedToolUse,
37};
38use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState, USING_TOOL_MARKER};
39
40#[derive(Debug, Clone, Copy)]
41pub enum RequestKind {
42 Chat,
43 /// Used when summarizing a thread.
44 Summarize,
45}
46
47#[derive(
48 Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, JsonSchema,
49)]
50pub struct ThreadId(Arc<str>);
51
52impl ThreadId {
53 pub fn new() -> Self {
54 Self(Uuid::new_v4().to_string().into())
55 }
56}
57
58impl std::fmt::Display for ThreadId {
59 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
60 write!(f, "{}", self.0)
61 }
62}
63
64impl From<&str> for ThreadId {
65 fn from(value: &str) -> Self {
66 Self(value.into())
67 }
68}
69
70#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
71pub struct MessageId(pub(crate) usize);
72
73impl MessageId {
74 fn post_inc(&mut self) -> Self {
75 Self(post_inc(&mut self.0))
76 }
77}
78
79/// A message in a [`Thread`].
80#[derive(Debug, Clone)]
81pub struct Message {
82 pub id: MessageId,
83 pub role: Role,
84 pub segments: Vec<MessageSegment>,
85 pub context: String,
86}
87
88impl Message {
89 /// Returns whether the message contains any meaningful text that should be displayed
90 /// The model sometimes runs tool without producing any text or just a marker ([`USING_TOOL_MARKER`])
91 pub fn should_display_content(&self) -> bool {
92 self.segments.iter().all(|segment| segment.should_display())
93 }
94
95 pub fn push_thinking(&mut self, text: &str) {
96 if let Some(MessageSegment::Thinking(segment)) = self.segments.last_mut() {
97 segment.push_str(text);
98 } else {
99 self.segments
100 .push(MessageSegment::Thinking(text.to_string()));
101 }
102 }
103
104 pub fn push_text(&mut self, text: &str) {
105 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
106 segment.push_str(text);
107 } else {
108 self.segments.push(MessageSegment::Text(text.to_string()));
109 }
110 }
111
112 pub fn to_string(&self) -> String {
113 let mut result = String::new();
114
115 if !self.context.is_empty() {
116 result.push_str(&self.context);
117 }
118
119 for segment in &self.segments {
120 match segment {
121 MessageSegment::Text(text) => result.push_str(text),
122 MessageSegment::Thinking(text) => {
123 result.push_str("<think>");
124 result.push_str(text);
125 result.push_str("</think>");
126 }
127 }
128 }
129
130 result
131 }
132}
133
134#[derive(Debug, Clone, PartialEq, Eq)]
135pub enum MessageSegment {
136 Text(String),
137 Thinking(String),
138}
139
140impl MessageSegment {
141 pub fn text_mut(&mut self) -> &mut String {
142 match self {
143 Self::Text(text) => text,
144 Self::Thinking(text) => text,
145 }
146 }
147
148 pub fn should_display(&self) -> bool {
149 // We add USING_TOOL_MARKER when making a request that includes tool uses
150 // without non-whitespace text around them, and this can cause the model
151 // to mimic the pattern, so we consider those segments not displayable.
152 match self {
153 Self::Text(text) => text.is_empty() || text.trim() == USING_TOOL_MARKER,
154 Self::Thinking(text) => text.is_empty() || text.trim() == USING_TOOL_MARKER,
155 }
156 }
157}
158
159#[derive(Debug, Clone, Serialize, Deserialize)]
160pub struct ProjectSnapshot {
161 pub worktree_snapshots: Vec<WorktreeSnapshot>,
162 pub unsaved_buffer_paths: Vec<String>,
163 pub timestamp: DateTime<Utc>,
164}
165
166#[derive(Debug, Clone, Serialize, Deserialize)]
167pub struct WorktreeSnapshot {
168 pub worktree_path: String,
169 pub git_state: Option<GitState>,
170}
171
172#[derive(Debug, Clone, Serialize, Deserialize)]
173pub struct GitState {
174 pub remote_url: Option<String>,
175 pub head_sha: Option<String>,
176 pub current_branch: Option<String>,
177 pub diff: Option<String>,
178}
179
180#[derive(Clone)]
181pub struct ThreadCheckpoint {
182 message_id: MessageId,
183 git_checkpoint: GitStoreCheckpoint,
184}
185
186#[derive(Copy, Clone, Debug)]
187pub enum ThreadFeedback {
188 Positive,
189 Negative,
190}
191
192pub enum LastRestoreCheckpoint {
193 Pending {
194 message_id: MessageId,
195 },
196 Error {
197 message_id: MessageId,
198 error: String,
199 },
200}
201
202impl LastRestoreCheckpoint {
203 pub fn message_id(&self) -> MessageId {
204 match self {
205 LastRestoreCheckpoint::Pending { message_id } => *message_id,
206 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
207 }
208 }
209}
210
211#[derive(Clone, Debug, Default, Serialize, Deserialize)]
212pub enum DetailedSummaryState {
213 #[default]
214 NotGenerated,
215 Generating {
216 message_id: MessageId,
217 },
218 Generated {
219 text: SharedString,
220 message_id: MessageId,
221 },
222}
223
224#[derive(Default)]
225pub struct TotalTokenUsage {
226 pub total: usize,
227 pub max: usize,
228 pub ratio: TokenUsageRatio,
229}
230
231#[derive(Default, PartialEq, Eq)]
232pub enum TokenUsageRatio {
233 #[default]
234 Normal,
235 Warning,
236 Exceeded,
237}
238
239/// A thread of conversation with the LLM.
240pub struct Thread {
241 id: ThreadId,
242 updated_at: DateTime<Utc>,
243 summary: Option<SharedString>,
244 pending_summary: Task<Option<()>>,
245 detailed_summary_state: DetailedSummaryState,
246 messages: Vec<Message>,
247 next_message_id: MessageId,
248 context: BTreeMap<ContextId, AssistantContext>,
249 context_by_message: HashMap<MessageId, Vec<ContextId>>,
250 system_prompt_context: Option<AssistantSystemPromptContext>,
251 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
252 completion_count: usize,
253 pending_completions: Vec<PendingCompletion>,
254 project: Entity<Project>,
255 prompt_builder: Arc<PromptBuilder>,
256 tools: Arc<ToolWorkingSet>,
257 tool_use: ToolUseState,
258 action_log: Entity<ActionLog>,
259 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
260 pending_checkpoint: Option<ThreadCheckpoint>,
261 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
262 cumulative_token_usage: TokenUsage,
263 feedback: Option<ThreadFeedback>,
264}
265
266impl Thread {
267 pub fn new(
268 project: Entity<Project>,
269 tools: Arc<ToolWorkingSet>,
270 prompt_builder: Arc<PromptBuilder>,
271 cx: &mut Context<Self>,
272 ) -> Self {
273 Self {
274 id: ThreadId::new(),
275 updated_at: Utc::now(),
276 summary: None,
277 pending_summary: Task::ready(None),
278 detailed_summary_state: DetailedSummaryState::NotGenerated,
279 messages: Vec::new(),
280 next_message_id: MessageId(0),
281 context: BTreeMap::default(),
282 context_by_message: HashMap::default(),
283 system_prompt_context: None,
284 checkpoints_by_message: HashMap::default(),
285 completion_count: 0,
286 pending_completions: Vec::new(),
287 project: project.clone(),
288 prompt_builder,
289 tools: tools.clone(),
290 last_restore_checkpoint: None,
291 pending_checkpoint: None,
292 tool_use: ToolUseState::new(tools.clone()),
293 action_log: cx.new(|_| ActionLog::new(project.clone())),
294 initial_project_snapshot: {
295 let project_snapshot = Self::project_snapshot(project, cx);
296 cx.foreground_executor()
297 .spawn(async move { Some(project_snapshot.await) })
298 .shared()
299 },
300 cumulative_token_usage: TokenUsage::default(),
301 feedback: None,
302 }
303 }
304
305 pub fn deserialize(
306 id: ThreadId,
307 serialized: SerializedThread,
308 project: Entity<Project>,
309 tools: Arc<ToolWorkingSet>,
310 prompt_builder: Arc<PromptBuilder>,
311 cx: &mut Context<Self>,
312 ) -> Self {
313 let next_message_id = MessageId(
314 serialized
315 .messages
316 .last()
317 .map(|message| message.id.0 + 1)
318 .unwrap_or(0),
319 );
320 let tool_use =
321 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |_| true);
322
323 Self {
324 id,
325 updated_at: serialized.updated_at,
326 summary: Some(serialized.summary),
327 pending_summary: Task::ready(None),
328 detailed_summary_state: serialized.detailed_summary_state,
329 messages: serialized
330 .messages
331 .into_iter()
332 .map(|message| Message {
333 id: message.id,
334 role: message.role,
335 segments: message
336 .segments
337 .into_iter()
338 .map(|segment| match segment {
339 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
340 SerializedMessageSegment::Thinking { text } => {
341 MessageSegment::Thinking(text)
342 }
343 })
344 .collect(),
345 context: message.context,
346 })
347 .collect(),
348 next_message_id,
349 context: BTreeMap::default(),
350 context_by_message: HashMap::default(),
351 system_prompt_context: None,
352 checkpoints_by_message: HashMap::default(),
353 completion_count: 0,
354 pending_completions: Vec::new(),
355 last_restore_checkpoint: None,
356 pending_checkpoint: None,
357 project: project.clone(),
358 prompt_builder,
359 tools,
360 tool_use,
361 action_log: cx.new(|_| ActionLog::new(project)),
362 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
363 cumulative_token_usage: serialized.cumulative_token_usage,
364 feedback: None,
365 }
366 }
367
368 pub fn id(&self) -> &ThreadId {
369 &self.id
370 }
371
372 pub fn is_empty(&self) -> bool {
373 self.messages.is_empty()
374 }
375
376 pub fn updated_at(&self) -> DateTime<Utc> {
377 self.updated_at
378 }
379
380 pub fn touch_updated_at(&mut self) {
381 self.updated_at = Utc::now();
382 }
383
384 pub fn summary(&self) -> Option<SharedString> {
385 self.summary.clone()
386 }
387
388 pub const DEFAULT_SUMMARY: SharedString = SharedString::new_static("New Thread");
389
390 pub fn summary_or_default(&self) -> SharedString {
391 self.summary.clone().unwrap_or(Self::DEFAULT_SUMMARY)
392 }
393
394 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
395 let summary = summary.into();
396 let old_summary = self.summary_or_default();
397
398 self.summary = if summary.is_empty() {
399 Some(Self::DEFAULT_SUMMARY)
400 } else {
401 Some(summary)
402 };
403
404 if Some(old_summary) != self.summary {
405 cx.emit(ThreadEvent::SummaryChanged);
406 }
407 }
408
409 pub fn latest_detailed_summary_or_text(&self) -> SharedString {
410 self.latest_detailed_summary()
411 .unwrap_or_else(|| self.text().into())
412 }
413
414 fn latest_detailed_summary(&self) -> Option<SharedString> {
415 if let DetailedSummaryState::Generated { text, .. } = &self.detailed_summary_state {
416 Some(text.clone())
417 } else {
418 None
419 }
420 }
421
422 pub fn message(&self, id: MessageId) -> Option<&Message> {
423 self.messages.iter().find(|message| message.id == id)
424 }
425
426 pub fn messages(&self) -> impl Iterator<Item = &Message> {
427 self.messages.iter()
428 }
429
430 pub fn is_generating(&self) -> bool {
431 !self.pending_completions.is_empty() || !self.all_tools_finished()
432 }
433
434 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
435 &self.tools
436 }
437
438 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
439 self.tool_use
440 .pending_tool_uses()
441 .into_iter()
442 .find(|tool_use| &tool_use.id == id)
443 }
444
445 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
446 self.tool_use
447 .pending_tool_uses()
448 .into_iter()
449 .filter(|tool_use| tool_use.status.needs_confirmation())
450 }
451
452 pub fn has_pending_tool_uses(&self) -> bool {
453 !self.tool_use.pending_tool_uses().is_empty()
454 }
455
456 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
457 self.checkpoints_by_message.get(&id).cloned()
458 }
459
460 pub fn restore_checkpoint(
461 &mut self,
462 checkpoint: ThreadCheckpoint,
463 cx: &mut Context<Self>,
464 ) -> Task<Result<()>> {
465 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
466 message_id: checkpoint.message_id,
467 });
468 cx.emit(ThreadEvent::CheckpointChanged);
469 cx.notify();
470
471 let project = self.project.read(cx);
472 let restore = project
473 .git_store()
474 .read(cx)
475 .restore_checkpoint(checkpoint.git_checkpoint.clone(), cx);
476 cx.spawn(async move |this, cx| {
477 let result = restore.await;
478 this.update(cx, |this, cx| {
479 if let Err(err) = result.as_ref() {
480 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
481 message_id: checkpoint.message_id,
482 error: err.to_string(),
483 });
484 } else {
485 this.truncate(checkpoint.message_id, cx);
486 this.last_restore_checkpoint = None;
487 }
488 this.pending_checkpoint = None;
489 cx.emit(ThreadEvent::CheckpointChanged);
490 cx.notify();
491 })?;
492 result
493 })
494 }
495
496 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
497 let pending_checkpoint = if self.is_generating() {
498 return;
499 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
500 checkpoint
501 } else {
502 return;
503 };
504
505 let git_store = self.project.read(cx).git_store().clone();
506 let final_checkpoint = git_store.read(cx).checkpoint(cx);
507 cx.spawn(async move |this, cx| match final_checkpoint.await {
508 Ok(final_checkpoint) => {
509 let equal = git_store
510 .read_with(cx, |store, cx| {
511 store.compare_checkpoints(
512 pending_checkpoint.git_checkpoint.clone(),
513 final_checkpoint.clone(),
514 cx,
515 )
516 })?
517 .await
518 .unwrap_or(false);
519
520 if equal {
521 git_store
522 .read_with(cx, |store, cx| {
523 store.delete_checkpoint(pending_checkpoint.git_checkpoint, cx)
524 })?
525 .detach();
526 } else {
527 this.update(cx, |this, cx| {
528 this.insert_checkpoint(pending_checkpoint, cx)
529 })?;
530 }
531
532 git_store
533 .read_with(cx, |store, cx| {
534 store.delete_checkpoint(final_checkpoint, cx)
535 })?
536 .detach();
537
538 Ok(())
539 }
540 Err(_) => this.update(cx, |this, cx| {
541 this.insert_checkpoint(pending_checkpoint, cx)
542 }),
543 })
544 .detach();
545 }
546
547 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
548 self.checkpoints_by_message
549 .insert(checkpoint.message_id, checkpoint);
550 cx.emit(ThreadEvent::CheckpointChanged);
551 cx.notify();
552 }
553
554 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
555 self.last_restore_checkpoint.as_ref()
556 }
557
558 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
559 let Some(message_ix) = self
560 .messages
561 .iter()
562 .rposition(|message| message.id == message_id)
563 else {
564 return;
565 };
566 for deleted_message in self.messages.drain(message_ix..) {
567 self.context_by_message.remove(&deleted_message.id);
568 self.checkpoints_by_message.remove(&deleted_message.id);
569 }
570 cx.notify();
571 }
572
573 pub fn context_for_message(&self, id: MessageId) -> impl Iterator<Item = &AssistantContext> {
574 self.context_by_message
575 .get(&id)
576 .into_iter()
577 .flat_map(|context| {
578 context
579 .iter()
580 .filter_map(|context_id| self.context.get(&context_id))
581 })
582 }
583
584 /// Returns whether all of the tool uses have finished running.
585 pub fn all_tools_finished(&self) -> bool {
586 // If the only pending tool uses left are the ones with errors, then
587 // that means that we've finished running all of the pending tools.
588 self.tool_use
589 .pending_tool_uses()
590 .iter()
591 .all(|tool_use| tool_use.status.is_error())
592 }
593
594 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
595 self.tool_use.tool_uses_for_message(id, cx)
596 }
597
598 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
599 self.tool_use.tool_results_for_message(id)
600 }
601
602 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
603 self.tool_use.tool_result(id)
604 }
605
606 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
607 self.tool_use.message_has_tool_results(message_id)
608 }
609
610 pub fn insert_user_message(
611 &mut self,
612 text: impl Into<String>,
613 context: Vec<AssistantContext>,
614 git_checkpoint: Option<GitStoreCheckpoint>,
615 cx: &mut Context<Self>,
616 ) -> MessageId {
617 let text = text.into();
618
619 let message_id = self.insert_message(Role::User, vec![MessageSegment::Text(text)], cx);
620
621 // Filter out contexts that have already been included in previous messages
622 let new_context: Vec<_> = context
623 .into_iter()
624 .filter(|ctx| !self.context.contains_key(&ctx.id()))
625 .collect();
626
627 if !new_context.is_empty() {
628 if let Some(context_string) = format_context_as_string(new_context.iter(), cx) {
629 if let Some(message) = self.messages.iter_mut().find(|m| m.id == message_id) {
630 message.context = context_string;
631 }
632 }
633
634 self.action_log.update(cx, |log, cx| {
635 // Track all buffers added as context
636 for ctx in &new_context {
637 match ctx {
638 AssistantContext::File(file_ctx) => {
639 log.buffer_added_as_context(file_ctx.context_buffer.buffer.clone(), cx);
640 }
641 AssistantContext::Directory(dir_ctx) => {
642 for context_buffer in &dir_ctx.context_buffers {
643 log.buffer_added_as_context(context_buffer.buffer.clone(), cx);
644 }
645 }
646 AssistantContext::Symbol(symbol_ctx) => {
647 log.buffer_added_as_context(
648 symbol_ctx.context_symbol.buffer.clone(),
649 cx,
650 );
651 }
652 AssistantContext::FetchedUrl(_) | AssistantContext::Thread(_) => {}
653 }
654 }
655 });
656 }
657
658 let context_ids = new_context
659 .iter()
660 .map(|context| context.id())
661 .collect::<Vec<_>>();
662 self.context.extend(
663 new_context
664 .into_iter()
665 .map(|context| (context.id(), context)),
666 );
667 self.context_by_message.insert(message_id, context_ids);
668
669 if let Some(git_checkpoint) = git_checkpoint {
670 self.pending_checkpoint = Some(ThreadCheckpoint {
671 message_id,
672 git_checkpoint,
673 });
674 }
675 message_id
676 }
677
678 pub fn insert_message(
679 &mut self,
680 role: Role,
681 segments: Vec<MessageSegment>,
682 cx: &mut Context<Self>,
683 ) -> MessageId {
684 let id = self.next_message_id.post_inc();
685 self.messages.push(Message {
686 id,
687 role,
688 segments,
689 context: String::new(),
690 });
691 self.touch_updated_at();
692 cx.emit(ThreadEvent::MessageAdded(id));
693 id
694 }
695
696 pub fn edit_message(
697 &mut self,
698 id: MessageId,
699 new_role: Role,
700 new_segments: Vec<MessageSegment>,
701 cx: &mut Context<Self>,
702 ) -> bool {
703 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
704 return false;
705 };
706 message.role = new_role;
707 message.segments = new_segments;
708 self.touch_updated_at();
709 cx.emit(ThreadEvent::MessageEdited(id));
710 true
711 }
712
713 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
714 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
715 return false;
716 };
717 self.messages.remove(index);
718 self.context_by_message.remove(&id);
719 self.touch_updated_at();
720 cx.emit(ThreadEvent::MessageDeleted(id));
721 true
722 }
723
724 /// Returns the representation of this [`Thread`] in a textual form.
725 ///
726 /// This is the representation we use when attaching a thread as context to another thread.
727 pub fn text(&self) -> String {
728 let mut text = String::new();
729
730 for message in &self.messages {
731 text.push_str(match message.role {
732 language_model::Role::User => "User:",
733 language_model::Role::Assistant => "Assistant:",
734 language_model::Role::System => "System:",
735 });
736 text.push('\n');
737
738 for segment in &message.segments {
739 match segment {
740 MessageSegment::Text(content) => text.push_str(content),
741 MessageSegment::Thinking(content) => {
742 text.push_str(&format!("<think>{}</think>", content))
743 }
744 }
745 }
746 text.push('\n');
747 }
748
749 text
750 }
751
752 /// Serializes this thread into a format for storage or telemetry.
753 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
754 let initial_project_snapshot = self.initial_project_snapshot.clone();
755 cx.spawn(async move |this, cx| {
756 let initial_project_snapshot = initial_project_snapshot.await;
757 this.read_with(cx, |this, cx| SerializedThread {
758 version: SerializedThread::VERSION.to_string(),
759 summary: this.summary_or_default(),
760 updated_at: this.updated_at(),
761 messages: this
762 .messages()
763 .map(|message| SerializedMessage {
764 id: message.id,
765 role: message.role,
766 segments: message
767 .segments
768 .iter()
769 .map(|segment| match segment {
770 MessageSegment::Text(text) => {
771 SerializedMessageSegment::Text { text: text.clone() }
772 }
773 MessageSegment::Thinking(text) => {
774 SerializedMessageSegment::Thinking { text: text.clone() }
775 }
776 })
777 .collect(),
778 tool_uses: this
779 .tool_uses_for_message(message.id, cx)
780 .into_iter()
781 .map(|tool_use| SerializedToolUse {
782 id: tool_use.id,
783 name: tool_use.name,
784 input: tool_use.input,
785 })
786 .collect(),
787 tool_results: this
788 .tool_results_for_message(message.id)
789 .into_iter()
790 .map(|tool_result| SerializedToolResult {
791 tool_use_id: tool_result.tool_use_id.clone(),
792 is_error: tool_result.is_error,
793 content: tool_result.content.clone(),
794 })
795 .collect(),
796 context: message.context.clone(),
797 })
798 .collect(),
799 initial_project_snapshot,
800 cumulative_token_usage: this.cumulative_token_usage.clone(),
801 detailed_summary_state: this.detailed_summary_state.clone(),
802 })
803 })
804 }
805
806 pub fn set_system_prompt_context(&mut self, context: AssistantSystemPromptContext) {
807 self.system_prompt_context = Some(context);
808 }
809
810 pub fn system_prompt_context(&self) -> &Option<AssistantSystemPromptContext> {
811 &self.system_prompt_context
812 }
813
814 pub fn load_system_prompt_context(
815 &self,
816 cx: &App,
817 ) -> Task<(AssistantSystemPromptContext, Option<ThreadError>)> {
818 let project = self.project.read(cx);
819 let tasks = project
820 .visible_worktrees(cx)
821 .map(|worktree| {
822 Self::load_worktree_info_for_system_prompt(
823 project.fs().clone(),
824 worktree.read(cx),
825 cx,
826 )
827 })
828 .collect::<Vec<_>>();
829
830 cx.spawn(async |_cx| {
831 let results = futures::future::join_all(tasks).await;
832 let mut first_err = None;
833 let worktrees = results
834 .into_iter()
835 .map(|(worktree, err)| {
836 if first_err.is_none() && err.is_some() {
837 first_err = err;
838 }
839 worktree
840 })
841 .collect::<Vec<_>>();
842 (AssistantSystemPromptContext::new(worktrees), first_err)
843 })
844 }
845
846 fn load_worktree_info_for_system_prompt(
847 fs: Arc<dyn Fs>,
848 worktree: &Worktree,
849 cx: &App,
850 ) -> Task<(WorktreeInfoForSystemPrompt, Option<ThreadError>)> {
851 let root_name = worktree.root_name().into();
852 let abs_path = worktree.abs_path();
853
854 // Note that Cline supports `.clinerules` being a directory, but that is not currently
855 // supported. This doesn't seem to occur often in GitHub repositories.
856 const RULES_FILE_NAMES: [&'static str; 6] = [
857 ".rules",
858 ".cursorrules",
859 ".windsurfrules",
860 ".clinerules",
861 ".github/copilot-instructions.md",
862 "CLAUDE.md",
863 ];
864 let selected_rules_file = RULES_FILE_NAMES
865 .into_iter()
866 .filter_map(|name| {
867 worktree
868 .entry_for_path(name)
869 .filter(|entry| entry.is_file())
870 .map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
871 })
872 .next();
873
874 if let Some((rel_rules_path, abs_rules_path)) = selected_rules_file {
875 cx.spawn(async move |_| {
876 let rules_file_result = maybe!(async move {
877 let abs_rules_path = abs_rules_path?;
878 let text = fs.load(&abs_rules_path).await.with_context(|| {
879 format!("Failed to load assistant rules file {:?}", abs_rules_path)
880 })?;
881 anyhow::Ok(RulesFile {
882 rel_path: rel_rules_path,
883 abs_path: abs_rules_path.into(),
884 text: text.trim().to_string(),
885 })
886 })
887 .await;
888 let (rules_file, rules_file_error) = match rules_file_result {
889 Ok(rules_file) => (Some(rules_file), None),
890 Err(err) => (
891 None,
892 Some(ThreadError::Message {
893 header: "Error loading rules file".into(),
894 message: format!("{err}").into(),
895 }),
896 ),
897 };
898 let worktree_info = WorktreeInfoForSystemPrompt {
899 root_name,
900 abs_path,
901 rules_file,
902 };
903 (worktree_info, rules_file_error)
904 })
905 } else {
906 Task::ready((
907 WorktreeInfoForSystemPrompt {
908 root_name,
909 abs_path,
910 rules_file: None,
911 },
912 None,
913 ))
914 }
915 }
916
917 pub fn send_to_model(
918 &mut self,
919 model: Arc<dyn LanguageModel>,
920 request_kind: RequestKind,
921 cx: &mut Context<Self>,
922 ) {
923 let mut request = self.to_completion_request(request_kind, cx);
924 if model.supports_tools() {
925 request.tools = {
926 let mut tools = Vec::new();
927 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
928 LanguageModelRequestTool {
929 name: tool.name(),
930 description: tool.description(),
931 input_schema: tool.input_schema(model.tool_input_format()),
932 }
933 }));
934
935 tools
936 };
937 }
938
939 self.stream_completion(request, model, cx);
940 }
941
942 pub fn used_tools_since_last_user_message(&self) -> bool {
943 for message in self.messages.iter().rev() {
944 if self.tool_use.message_has_tool_results(message.id) {
945 return true;
946 } else if message.role == Role::User {
947 return false;
948 }
949 }
950
951 false
952 }
953
954 pub fn to_completion_request(
955 &self,
956 request_kind: RequestKind,
957 cx: &App,
958 ) -> LanguageModelRequest {
959 let mut request = LanguageModelRequest {
960 messages: vec![],
961 tools: Vec::new(),
962 stop: Vec::new(),
963 temperature: None,
964 };
965
966 if let Some(system_prompt_context) = self.system_prompt_context.as_ref() {
967 if let Some(system_prompt) = self
968 .prompt_builder
969 .generate_assistant_system_prompt(system_prompt_context)
970 .context("failed to generate assistant system prompt")
971 .log_err()
972 {
973 request.messages.push(LanguageModelRequestMessage {
974 role: Role::System,
975 content: vec![MessageContent::Text(system_prompt)],
976 cache: true,
977 });
978 }
979 } else {
980 log::error!("system_prompt_context not set.")
981 }
982
983 for message in &self.messages {
984 let mut request_message = LanguageModelRequestMessage {
985 role: message.role,
986 content: Vec::new(),
987 cache: false,
988 };
989
990 match request_kind {
991 RequestKind::Chat => {
992 self.tool_use
993 .attach_tool_results(message.id, &mut request_message);
994 }
995 RequestKind::Summarize => {
996 // We don't care about tool use during summarization.
997 if self.tool_use.message_has_tool_results(message.id) {
998 continue;
999 }
1000 }
1001 }
1002
1003 if !message.segments.is_empty() {
1004 request_message
1005 .content
1006 .push(MessageContent::Text(message.to_string()));
1007 }
1008
1009 match request_kind {
1010 RequestKind::Chat => {
1011 self.tool_use
1012 .attach_tool_uses(message.id, &mut request_message);
1013 }
1014 RequestKind::Summarize => {
1015 // We don't care about tool use during summarization.
1016 }
1017 };
1018
1019 request.messages.push(request_message);
1020 }
1021
1022 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
1023 if let Some(last) = request.messages.last_mut() {
1024 last.cache = true;
1025 }
1026
1027 self.attached_tracked_files_state(&mut request.messages, cx);
1028
1029 request
1030 }
1031
1032 fn attached_tracked_files_state(
1033 &self,
1034 messages: &mut Vec<LanguageModelRequestMessage>,
1035 cx: &App,
1036 ) {
1037 const STALE_FILES_HEADER: &str = "These files changed since last read:";
1038
1039 let mut stale_message = String::new();
1040
1041 let action_log = self.action_log.read(cx);
1042
1043 for stale_file in action_log.stale_buffers(cx) {
1044 let Some(file) = stale_file.read(cx).file() else {
1045 continue;
1046 };
1047
1048 if stale_message.is_empty() {
1049 write!(&mut stale_message, "{}\n", STALE_FILES_HEADER).ok();
1050 }
1051
1052 writeln!(&mut stale_message, "- {}", file.path().display()).ok();
1053 }
1054
1055 let mut content = Vec::with_capacity(2);
1056
1057 if !stale_message.is_empty() {
1058 content.push(stale_message.into());
1059 }
1060
1061 if action_log.has_edited_files_since_project_diagnostics_check() {
1062 content.push(
1063 "\n\nWhen you're done making changes, make sure to check project diagnostics \
1064 and fix all errors AND warnings you introduced! \
1065 DO NOT mention you're going to do this until you're done."
1066 .into(),
1067 );
1068 }
1069
1070 if !content.is_empty() {
1071 let context_message = LanguageModelRequestMessage {
1072 role: Role::User,
1073 content,
1074 cache: false,
1075 };
1076
1077 messages.push(context_message);
1078 }
1079 }
1080
1081 pub fn stream_completion(
1082 &mut self,
1083 request: LanguageModelRequest,
1084 model: Arc<dyn LanguageModel>,
1085 cx: &mut Context<Self>,
1086 ) {
1087 let pending_completion_id = post_inc(&mut self.completion_count);
1088
1089 let task = cx.spawn(async move |thread, cx| {
1090 let stream = model.stream_completion(request, &cx);
1091 let initial_token_usage =
1092 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage.clone());
1093 let stream_completion = async {
1094 let mut events = stream.await?;
1095 let mut stop_reason = StopReason::EndTurn;
1096 let mut current_token_usage = TokenUsage::default();
1097
1098 while let Some(event) = events.next().await {
1099 let event = event?;
1100
1101 thread.update(cx, |thread, cx| {
1102 match event {
1103 LanguageModelCompletionEvent::StartMessage { .. } => {
1104 thread.insert_message(
1105 Role::Assistant,
1106 vec![MessageSegment::Text(String::new())],
1107 cx,
1108 );
1109 }
1110 LanguageModelCompletionEvent::Stop(reason) => {
1111 stop_reason = reason;
1112 }
1113 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
1114 thread.cumulative_token_usage =
1115 thread.cumulative_token_usage.clone() + token_usage.clone()
1116 - current_token_usage.clone();
1117 current_token_usage = token_usage;
1118 }
1119 LanguageModelCompletionEvent::Text(chunk) => {
1120 if let Some(last_message) = thread.messages.last_mut() {
1121 if last_message.role == Role::Assistant {
1122 last_message.push_text(&chunk);
1123 cx.emit(ThreadEvent::StreamedAssistantText(
1124 last_message.id,
1125 chunk,
1126 ));
1127 } else {
1128 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1129 // of a new Assistant response.
1130 //
1131 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1132 // will result in duplicating the text of the chunk in the rendered Markdown.
1133 thread.insert_message(
1134 Role::Assistant,
1135 vec![MessageSegment::Text(chunk.to_string())],
1136 cx,
1137 );
1138 };
1139 }
1140 }
1141 LanguageModelCompletionEvent::Thinking(chunk) => {
1142 if let Some(last_message) = thread.messages.last_mut() {
1143 if last_message.role == Role::Assistant {
1144 last_message.push_thinking(&chunk);
1145 cx.emit(ThreadEvent::StreamedAssistantThinking(
1146 last_message.id,
1147 chunk,
1148 ));
1149 } else {
1150 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1151 // of a new Assistant response.
1152 //
1153 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1154 // will result in duplicating the text of the chunk in the rendered Markdown.
1155 thread.insert_message(
1156 Role::Assistant,
1157 vec![MessageSegment::Thinking(chunk.to_string())],
1158 cx,
1159 );
1160 };
1161 }
1162 }
1163 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1164 let last_assistant_message_id = thread
1165 .messages
1166 .iter_mut()
1167 .rfind(|message| message.role == Role::Assistant)
1168 .map(|message| message.id)
1169 .unwrap_or_else(|| {
1170 thread.insert_message(Role::Assistant, vec![], cx)
1171 });
1172
1173 thread.tool_use.request_tool_use(
1174 last_assistant_message_id,
1175 tool_use,
1176 cx,
1177 );
1178 }
1179 }
1180
1181 thread.touch_updated_at();
1182 cx.emit(ThreadEvent::StreamedCompletion);
1183 cx.notify();
1184 })?;
1185
1186 smol::future::yield_now().await;
1187 }
1188
1189 thread.update(cx, |thread, cx| {
1190 thread
1191 .pending_completions
1192 .retain(|completion| completion.id != pending_completion_id);
1193
1194 if thread.summary.is_none() && thread.messages.len() >= 2 {
1195 thread.summarize(cx);
1196 }
1197 })?;
1198
1199 anyhow::Ok(stop_reason)
1200 };
1201
1202 let result = stream_completion.await;
1203
1204 thread
1205 .update(cx, |thread, cx| {
1206 thread.finalize_pending_checkpoint(cx);
1207 match result.as_ref() {
1208 Ok(stop_reason) => match stop_reason {
1209 StopReason::ToolUse => {
1210 cx.emit(ThreadEvent::UsePendingTools);
1211 }
1212 StopReason::EndTurn => {}
1213 StopReason::MaxTokens => {}
1214 },
1215 Err(error) => {
1216 if error.is::<PaymentRequiredError>() {
1217 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1218 } else if error.is::<MaxMonthlySpendReachedError>() {
1219 cx.emit(ThreadEvent::ShowError(
1220 ThreadError::MaxMonthlySpendReached,
1221 ));
1222 } else {
1223 let error_message = error
1224 .chain()
1225 .map(|err| err.to_string())
1226 .collect::<Vec<_>>()
1227 .join("\n");
1228 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1229 header: "Error interacting with language model".into(),
1230 message: SharedString::from(error_message.clone()),
1231 }));
1232 }
1233
1234 thread.cancel_last_completion(cx);
1235 }
1236 }
1237 cx.emit(ThreadEvent::DoneStreaming);
1238
1239 if let Ok(initial_usage) = initial_token_usage {
1240 let usage = thread.cumulative_token_usage.clone() - initial_usage;
1241
1242 telemetry::event!(
1243 "Assistant Thread Completion",
1244 thread_id = thread.id().to_string(),
1245 model = model.telemetry_id(),
1246 model_provider = model.provider_id().to_string(),
1247 input_tokens = usage.input_tokens,
1248 output_tokens = usage.output_tokens,
1249 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1250 cache_read_input_tokens = usage.cache_read_input_tokens,
1251 );
1252 }
1253 })
1254 .ok();
1255 });
1256
1257 self.pending_completions.push(PendingCompletion {
1258 id: pending_completion_id,
1259 _task: task,
1260 });
1261 }
1262
1263 pub fn summarize(&mut self, cx: &mut Context<Self>) {
1264 let Some(model) = LanguageModelRegistry::read_global(cx).thread_summary_model() else {
1265 return;
1266 };
1267
1268 if !model.provider.is_authenticated(cx) {
1269 return;
1270 }
1271
1272 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1273 request.messages.push(LanguageModelRequestMessage {
1274 role: Role::User,
1275 content: vec![
1276 "Generate a concise 3-7 word title for this conversation, omitting punctuation. \
1277 Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`. \
1278 If the conversation is about a specific subject, include it in the title. \
1279 Be descriptive. DO NOT speak in the first person."
1280 .into(),
1281 ],
1282 cache: false,
1283 });
1284
1285 self.pending_summary = cx.spawn(async move |this, cx| {
1286 async move {
1287 let stream = model.model.stream_completion_text(request, &cx);
1288 let mut messages = stream.await?;
1289
1290 let mut new_summary = String::new();
1291 while let Some(message) = messages.stream.next().await {
1292 let text = message?;
1293 let mut lines = text.lines();
1294 new_summary.extend(lines.next());
1295
1296 // Stop if the LLM generated multiple lines.
1297 if lines.next().is_some() {
1298 break;
1299 }
1300 }
1301
1302 this.update(cx, |this, cx| {
1303 if !new_summary.is_empty() {
1304 this.summary = Some(new_summary.into());
1305 }
1306
1307 cx.emit(ThreadEvent::SummaryGenerated);
1308 })?;
1309
1310 anyhow::Ok(())
1311 }
1312 .log_err()
1313 .await
1314 });
1315 }
1316
1317 pub fn generate_detailed_summary(&mut self, cx: &mut Context<Self>) -> Option<Task<()>> {
1318 let last_message_id = self.messages.last().map(|message| message.id)?;
1319
1320 match &self.detailed_summary_state {
1321 DetailedSummaryState::Generating { message_id, .. }
1322 | DetailedSummaryState::Generated { message_id, .. }
1323 if *message_id == last_message_id =>
1324 {
1325 // Already up-to-date
1326 return None;
1327 }
1328 _ => {}
1329 }
1330
1331 let ConfiguredModel { model, provider } =
1332 LanguageModelRegistry::read_global(cx).thread_summary_model()?;
1333
1334 if !provider.is_authenticated(cx) {
1335 return None;
1336 }
1337
1338 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1339
1340 request.messages.push(LanguageModelRequestMessage {
1341 role: Role::User,
1342 content: vec![
1343 "Generate a detailed summary of this conversation. Include:\n\
1344 1. A brief overview of what was discussed\n\
1345 2. Key facts or information discovered\n\
1346 3. Outcomes or conclusions reached\n\
1347 4. Any action items or next steps if any\n\
1348 Format it in Markdown with headings and bullet points."
1349 .into(),
1350 ],
1351 cache: false,
1352 });
1353
1354 let task = cx.spawn(async move |thread, cx| {
1355 let stream = model.stream_completion_text(request, &cx);
1356 let Some(mut messages) = stream.await.log_err() else {
1357 thread
1358 .update(cx, |this, _cx| {
1359 this.detailed_summary_state = DetailedSummaryState::NotGenerated;
1360 })
1361 .log_err();
1362
1363 return;
1364 };
1365
1366 let mut new_detailed_summary = String::new();
1367
1368 while let Some(chunk) = messages.stream.next().await {
1369 if let Some(chunk) = chunk.log_err() {
1370 new_detailed_summary.push_str(&chunk);
1371 }
1372 }
1373
1374 thread
1375 .update(cx, |this, _cx| {
1376 this.detailed_summary_state = DetailedSummaryState::Generated {
1377 text: new_detailed_summary.into(),
1378 message_id: last_message_id,
1379 };
1380 })
1381 .log_err();
1382 });
1383
1384 self.detailed_summary_state = DetailedSummaryState::Generating {
1385 message_id: last_message_id,
1386 };
1387
1388 Some(task)
1389 }
1390
1391 pub fn is_generating_detailed_summary(&self) -> bool {
1392 matches!(
1393 self.detailed_summary_state,
1394 DetailedSummaryState::Generating { .. }
1395 )
1396 }
1397
1398 pub fn use_pending_tools(
1399 &mut self,
1400 cx: &mut Context<Self>,
1401 ) -> impl IntoIterator<Item = PendingToolUse> + use<> {
1402 let request = self.to_completion_request(RequestKind::Chat, cx);
1403 let messages = Arc::new(request.messages);
1404 let pending_tool_uses = self
1405 .tool_use
1406 .pending_tool_uses()
1407 .into_iter()
1408 .filter(|tool_use| tool_use.status.is_idle())
1409 .cloned()
1410 .collect::<Vec<_>>();
1411
1412 for tool_use in pending_tool_uses.iter() {
1413 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1414 if tool.needs_confirmation()
1415 && !AssistantSettings::get_global(cx).always_allow_tool_actions
1416 {
1417 self.tool_use.confirm_tool_use(
1418 tool_use.id.clone(),
1419 tool_use.ui_text.clone(),
1420 tool_use.input.clone(),
1421 messages.clone(),
1422 tool,
1423 );
1424 cx.emit(ThreadEvent::ToolConfirmationNeeded);
1425 } else {
1426 self.run_tool(
1427 tool_use.id.clone(),
1428 tool_use.ui_text.clone(),
1429 tool_use.input.clone(),
1430 &messages,
1431 tool,
1432 cx,
1433 );
1434 }
1435 }
1436 }
1437
1438 pending_tool_uses
1439 }
1440
1441 pub fn run_tool(
1442 &mut self,
1443 tool_use_id: LanguageModelToolUseId,
1444 ui_text: impl Into<SharedString>,
1445 input: serde_json::Value,
1446 messages: &[LanguageModelRequestMessage],
1447 tool: Arc<dyn Tool>,
1448 cx: &mut Context<Thread>,
1449 ) {
1450 let task = self.spawn_tool_use(tool_use_id.clone(), messages, input, tool, cx);
1451 self.tool_use
1452 .run_pending_tool(tool_use_id, ui_text.into(), task);
1453 }
1454
1455 fn spawn_tool_use(
1456 &mut self,
1457 tool_use_id: LanguageModelToolUseId,
1458 messages: &[LanguageModelRequestMessage],
1459 input: serde_json::Value,
1460 tool: Arc<dyn Tool>,
1461 cx: &mut Context<Thread>,
1462 ) -> Task<()> {
1463 let tool_name: Arc<str> = tool.name().into();
1464
1465 let run_tool = if self.tools.is_disabled(&tool.source(), &tool_name) {
1466 Task::ready(Err(anyhow!("tool is disabled: {tool_name}")))
1467 } else {
1468 tool.run(
1469 input,
1470 messages,
1471 self.project.clone(),
1472 self.action_log.clone(),
1473 cx,
1474 )
1475 };
1476
1477 cx.spawn({
1478 async move |thread: WeakEntity<Thread>, cx| {
1479 let output = run_tool.await;
1480
1481 thread
1482 .update(cx, |thread, cx| {
1483 let pending_tool_use = thread.tool_use.insert_tool_output(
1484 tool_use_id.clone(),
1485 tool_name,
1486 output,
1487 );
1488
1489 cx.emit(ThreadEvent::ToolFinished {
1490 tool_use_id,
1491 pending_tool_use,
1492 canceled: false,
1493 });
1494 })
1495 .ok();
1496 }
1497 })
1498 }
1499
1500 pub fn attach_tool_results(&mut self, cx: &mut Context<Self>) {
1501 // Insert a user message to contain the tool results.
1502 self.insert_user_message(
1503 // TODO: Sending up a user message without any content results in the model sending back
1504 // responses that also don't have any content. We currently don't handle this case well,
1505 // so for now we provide some text to keep the model on track.
1506 "Here are the tool results.",
1507 Vec::new(),
1508 None,
1509 cx,
1510 );
1511 }
1512
1513 /// Cancels the last pending completion, if there are any pending.
1514 ///
1515 /// Returns whether a completion was canceled.
1516 pub fn cancel_last_completion(&mut self, cx: &mut Context<Self>) -> bool {
1517 let canceled = if self.pending_completions.pop().is_some() {
1518 true
1519 } else {
1520 let mut canceled = false;
1521 for pending_tool_use in self.tool_use.cancel_pending() {
1522 canceled = true;
1523 cx.emit(ThreadEvent::ToolFinished {
1524 tool_use_id: pending_tool_use.id.clone(),
1525 pending_tool_use: Some(pending_tool_use),
1526 canceled: true,
1527 });
1528 }
1529 canceled
1530 };
1531 self.finalize_pending_checkpoint(cx);
1532 canceled
1533 }
1534
1535 /// Returns the feedback given to the thread, if any.
1536 pub fn feedback(&self) -> Option<ThreadFeedback> {
1537 self.feedback
1538 }
1539
1540 /// Reports feedback about the thread and stores it in our telemetry backend.
1541 pub fn report_feedback(
1542 &mut self,
1543 feedback: ThreadFeedback,
1544 cx: &mut Context<Self>,
1545 ) -> Task<Result<()>> {
1546 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
1547 let serialized_thread = self.serialize(cx);
1548 let thread_id = self.id().clone();
1549 let client = self.project.read(cx).client();
1550 self.feedback = Some(feedback);
1551 cx.notify();
1552
1553 cx.background_spawn(async move {
1554 let final_project_snapshot = final_project_snapshot.await;
1555 let serialized_thread = serialized_thread.await?;
1556 let thread_data =
1557 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
1558
1559 let rating = match feedback {
1560 ThreadFeedback::Positive => "positive",
1561 ThreadFeedback::Negative => "negative",
1562 };
1563 telemetry::event!(
1564 "Assistant Thread Rated",
1565 rating,
1566 thread_id,
1567 thread_data,
1568 final_project_snapshot
1569 );
1570 client.telemetry().flush_events();
1571
1572 Ok(())
1573 })
1574 }
1575
1576 /// Create a snapshot of the current project state including git information and unsaved buffers.
1577 fn project_snapshot(
1578 project: Entity<Project>,
1579 cx: &mut Context<Self>,
1580 ) -> Task<Arc<ProjectSnapshot>> {
1581 let git_store = project.read(cx).git_store().clone();
1582 let worktree_snapshots: Vec<_> = project
1583 .read(cx)
1584 .visible_worktrees(cx)
1585 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
1586 .collect();
1587
1588 cx.spawn(async move |_, cx| {
1589 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
1590
1591 let mut unsaved_buffers = Vec::new();
1592 cx.update(|app_cx| {
1593 let buffer_store = project.read(app_cx).buffer_store();
1594 for buffer_handle in buffer_store.read(app_cx).buffers() {
1595 let buffer = buffer_handle.read(app_cx);
1596 if buffer.is_dirty() {
1597 if let Some(file) = buffer.file() {
1598 let path = file.path().to_string_lossy().to_string();
1599 unsaved_buffers.push(path);
1600 }
1601 }
1602 }
1603 })
1604 .ok();
1605
1606 Arc::new(ProjectSnapshot {
1607 worktree_snapshots,
1608 unsaved_buffer_paths: unsaved_buffers,
1609 timestamp: Utc::now(),
1610 })
1611 })
1612 }
1613
1614 fn worktree_snapshot(
1615 worktree: Entity<project::Worktree>,
1616 git_store: Entity<GitStore>,
1617 cx: &App,
1618 ) -> Task<WorktreeSnapshot> {
1619 cx.spawn(async move |cx| {
1620 // Get worktree path and snapshot
1621 let worktree_info = cx.update(|app_cx| {
1622 let worktree = worktree.read(app_cx);
1623 let path = worktree.abs_path().to_string_lossy().to_string();
1624 let snapshot = worktree.snapshot();
1625 (path, snapshot)
1626 });
1627
1628 let Ok((worktree_path, _snapshot)) = worktree_info else {
1629 return WorktreeSnapshot {
1630 worktree_path: String::new(),
1631 git_state: None,
1632 };
1633 };
1634
1635 let git_state = git_store
1636 .update(cx, |git_store, cx| {
1637 git_store
1638 .repositories()
1639 .values()
1640 .find(|repo| {
1641 repo.read(cx)
1642 .abs_path_to_repo_path(&worktree.read(cx).abs_path())
1643 .is_some()
1644 })
1645 .cloned()
1646 })
1647 .ok()
1648 .flatten()
1649 .map(|repo| {
1650 repo.read_with(cx, |repo, _| {
1651 let current_branch =
1652 repo.branch.as_ref().map(|branch| branch.name.to_string());
1653 repo.send_job(|state, _| async move {
1654 let RepositoryState::Local { backend, .. } = state else {
1655 return GitState {
1656 remote_url: None,
1657 head_sha: None,
1658 current_branch,
1659 diff: None,
1660 };
1661 };
1662
1663 let remote_url = backend.remote_url("origin");
1664 let head_sha = backend.head_sha();
1665 let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
1666
1667 GitState {
1668 remote_url,
1669 head_sha,
1670 current_branch,
1671 diff,
1672 }
1673 })
1674 })
1675 });
1676
1677 let git_state = match git_state {
1678 Some(git_state) => match git_state.ok() {
1679 Some(git_state) => git_state.await.ok(),
1680 None => None,
1681 },
1682 None => None,
1683 };
1684
1685 WorktreeSnapshot {
1686 worktree_path,
1687 git_state,
1688 }
1689 })
1690 }
1691
1692 pub fn to_markdown(&self, cx: &App) -> Result<String> {
1693 let mut markdown = Vec::new();
1694
1695 if let Some(summary) = self.summary() {
1696 writeln!(markdown, "# {summary}\n")?;
1697 };
1698
1699 for message in self.messages() {
1700 writeln!(
1701 markdown,
1702 "## {role}\n",
1703 role = match message.role {
1704 Role::User => "User",
1705 Role::Assistant => "Assistant",
1706 Role::System => "System",
1707 }
1708 )?;
1709
1710 if !message.context.is_empty() {
1711 writeln!(markdown, "{}", message.context)?;
1712 }
1713
1714 for segment in &message.segments {
1715 match segment {
1716 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
1717 MessageSegment::Thinking(text) => {
1718 writeln!(markdown, "<think>{}</think>\n", text)?
1719 }
1720 }
1721 }
1722
1723 for tool_use in self.tool_uses_for_message(message.id, cx) {
1724 writeln!(
1725 markdown,
1726 "**Use Tool: {} ({})**",
1727 tool_use.name, tool_use.id
1728 )?;
1729 writeln!(markdown, "```json")?;
1730 writeln!(
1731 markdown,
1732 "{}",
1733 serde_json::to_string_pretty(&tool_use.input)?
1734 )?;
1735 writeln!(markdown, "```")?;
1736 }
1737
1738 for tool_result in self.tool_results_for_message(message.id) {
1739 write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
1740 if tool_result.is_error {
1741 write!(markdown, " (Error)")?;
1742 }
1743
1744 writeln!(markdown, "**\n")?;
1745 writeln!(markdown, "{}", tool_result.content)?;
1746 }
1747 }
1748
1749 Ok(String::from_utf8_lossy(&markdown).to_string())
1750 }
1751
1752 pub fn keep_edits_in_range(
1753 &mut self,
1754 buffer: Entity<language::Buffer>,
1755 buffer_range: Range<language::Anchor>,
1756 cx: &mut Context<Self>,
1757 ) {
1758 self.action_log.update(cx, |action_log, cx| {
1759 action_log.keep_edits_in_range(buffer, buffer_range, cx)
1760 });
1761 }
1762
1763 pub fn keep_all_edits(&mut self, cx: &mut Context<Self>) {
1764 self.action_log
1765 .update(cx, |action_log, cx| action_log.keep_all_edits(cx));
1766 }
1767
1768 pub fn reject_edits_in_range(
1769 &mut self,
1770 buffer: Entity<language::Buffer>,
1771 buffer_range: Range<language::Anchor>,
1772 cx: &mut Context<Self>,
1773 ) -> Task<Result<()>> {
1774 self.action_log.update(cx, |action_log, cx| {
1775 action_log.reject_edits_in_range(buffer, buffer_range, cx)
1776 })
1777 }
1778
1779 pub fn action_log(&self) -> &Entity<ActionLog> {
1780 &self.action_log
1781 }
1782
1783 pub fn project(&self) -> &Entity<Project> {
1784 &self.project
1785 }
1786
1787 pub fn cumulative_token_usage(&self) -> TokenUsage {
1788 self.cumulative_token_usage.clone()
1789 }
1790
1791 pub fn total_token_usage(&self, cx: &App) -> TotalTokenUsage {
1792 let model_registry = LanguageModelRegistry::read_global(cx);
1793 let Some(model) = model_registry.default_model() else {
1794 return TotalTokenUsage::default();
1795 };
1796
1797 let max = model.model.max_token_count();
1798
1799 #[cfg(debug_assertions)]
1800 let warning_threshold: f32 = std::env::var("ZED_THREAD_WARNING_THRESHOLD")
1801 .unwrap_or("0.8".to_string())
1802 .parse()
1803 .unwrap();
1804 #[cfg(not(debug_assertions))]
1805 let warning_threshold: f32 = 0.8;
1806
1807 let total = self.cumulative_token_usage.total_tokens() as usize;
1808
1809 let ratio = if total >= max {
1810 TokenUsageRatio::Exceeded
1811 } else if total as f32 / max as f32 >= warning_threshold {
1812 TokenUsageRatio::Warning
1813 } else {
1814 TokenUsageRatio::Normal
1815 };
1816
1817 TotalTokenUsage { total, max, ratio }
1818 }
1819
1820 pub fn deny_tool_use(
1821 &mut self,
1822 tool_use_id: LanguageModelToolUseId,
1823 tool_name: Arc<str>,
1824 cx: &mut Context<Self>,
1825 ) {
1826 let err = Err(anyhow::anyhow!(
1827 "Permission to run tool action denied by user"
1828 ));
1829
1830 self.tool_use
1831 .insert_tool_output(tool_use_id.clone(), tool_name, err);
1832
1833 cx.emit(ThreadEvent::ToolFinished {
1834 tool_use_id,
1835 pending_tool_use: None,
1836 canceled: true,
1837 });
1838 }
1839}
1840
1841#[derive(Debug, Clone)]
1842pub enum ThreadError {
1843 PaymentRequired,
1844 MaxMonthlySpendReached,
1845 Message {
1846 header: SharedString,
1847 message: SharedString,
1848 },
1849}
1850
1851#[derive(Debug, Clone)]
1852pub enum ThreadEvent {
1853 ShowError(ThreadError),
1854 StreamedCompletion,
1855 StreamedAssistantText(MessageId, String),
1856 StreamedAssistantThinking(MessageId, String),
1857 DoneStreaming,
1858 MessageAdded(MessageId),
1859 MessageEdited(MessageId),
1860 MessageDeleted(MessageId),
1861 SummaryGenerated,
1862 SummaryChanged,
1863 UsePendingTools,
1864 ToolFinished {
1865 #[allow(unused)]
1866 tool_use_id: LanguageModelToolUseId,
1867 /// The pending tool use that corresponds to this tool.
1868 pending_tool_use: Option<PendingToolUse>,
1869 /// Whether the tool was canceled by the user.
1870 canceled: bool,
1871 },
1872 CheckpointChanged,
1873 ToolConfirmationNeeded,
1874}
1875
1876impl EventEmitter<ThreadEvent> for Thread {}
1877
1878struct PendingCompletion {
1879 id: usize,
1880 _task: Task<()>,
1881}
1882
1883#[cfg(test)]
1884mod tests {
1885 use super::*;
1886 use crate::{ThreadStore, context_store::ContextStore, thread_store};
1887 use assistant_settings::AssistantSettings;
1888 use context_server::ContextServerSettings;
1889 use editor::EditorSettings;
1890 use gpui::TestAppContext;
1891 use project::{FakeFs, Project};
1892 use prompt_store::PromptBuilder;
1893 use serde_json::json;
1894 use settings::{Settings, SettingsStore};
1895 use std::sync::Arc;
1896 use theme::ThemeSettings;
1897 use util::path;
1898 use workspace::Workspace;
1899
1900 #[gpui::test]
1901 async fn test_message_with_context(cx: &mut TestAppContext) {
1902 init_test_settings(cx);
1903
1904 let project = create_test_project(
1905 cx,
1906 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
1907 )
1908 .await;
1909
1910 let (_workspace, _thread_store, thread, context_store) =
1911 setup_test_environment(cx, project.clone()).await;
1912
1913 add_file_to_context(&project, &context_store, "test/code.rs", cx)
1914 .await
1915 .unwrap();
1916
1917 let context =
1918 context_store.update(cx, |store, _| store.context().first().cloned().unwrap());
1919
1920 // Insert user message with context
1921 let message_id = thread.update(cx, |thread, cx| {
1922 thread.insert_user_message("Please explain this code", vec![context], None, cx)
1923 });
1924
1925 // Check content and context in message object
1926 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
1927
1928 // Use different path format strings based on platform for the test
1929 #[cfg(windows)]
1930 let path_part = r"test\code.rs";
1931 #[cfg(not(windows))]
1932 let path_part = "test/code.rs";
1933
1934 let expected_context = format!(
1935 r#"
1936<context>
1937The following items were attached by the user. You don't need to use other tools to read them.
1938
1939<files>
1940```rs {path_part}
1941fn main() {{
1942 println!("Hello, world!");
1943}}
1944```
1945</files>
1946</context>
1947"#
1948 );
1949
1950 assert_eq!(message.role, Role::User);
1951 assert_eq!(message.segments.len(), 1);
1952 assert_eq!(
1953 message.segments[0],
1954 MessageSegment::Text("Please explain this code".to_string())
1955 );
1956 assert_eq!(message.context, expected_context);
1957
1958 // Check message in request
1959 let request = thread.read_with(cx, |thread, cx| {
1960 thread.to_completion_request(RequestKind::Chat, cx)
1961 });
1962
1963 assert_eq!(request.messages.len(), 1);
1964 let expected_full_message = format!("{}Please explain this code", expected_context);
1965 assert_eq!(request.messages[0].string_contents(), expected_full_message);
1966 }
1967
1968 #[gpui::test]
1969 async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
1970 init_test_settings(cx);
1971
1972 let project = create_test_project(
1973 cx,
1974 json!({
1975 "file1.rs": "fn function1() {}\n",
1976 "file2.rs": "fn function2() {}\n",
1977 "file3.rs": "fn function3() {}\n",
1978 }),
1979 )
1980 .await;
1981
1982 let (_, _thread_store, thread, context_store) =
1983 setup_test_environment(cx, project.clone()).await;
1984
1985 // Open files individually
1986 add_file_to_context(&project, &context_store, "test/file1.rs", cx)
1987 .await
1988 .unwrap();
1989 add_file_to_context(&project, &context_store, "test/file2.rs", cx)
1990 .await
1991 .unwrap();
1992 add_file_to_context(&project, &context_store, "test/file3.rs", cx)
1993 .await
1994 .unwrap();
1995
1996 // Get the context objects
1997 let contexts = context_store.update(cx, |store, _| store.context().clone());
1998 assert_eq!(contexts.len(), 3);
1999
2000 // First message with context 1
2001 let message1_id = thread.update(cx, |thread, cx| {
2002 thread.insert_user_message("Message 1", vec![contexts[0].clone()], None, cx)
2003 });
2004
2005 // Second message with contexts 1 and 2 (context 1 should be skipped as it's already included)
2006 let message2_id = thread.update(cx, |thread, cx| {
2007 thread.insert_user_message(
2008 "Message 2",
2009 vec![contexts[0].clone(), contexts[1].clone()],
2010 None,
2011 cx,
2012 )
2013 });
2014
2015 // Third message with all three contexts (contexts 1 and 2 should be skipped)
2016 let message3_id = thread.update(cx, |thread, cx| {
2017 thread.insert_user_message(
2018 "Message 3",
2019 vec![
2020 contexts[0].clone(),
2021 contexts[1].clone(),
2022 contexts[2].clone(),
2023 ],
2024 None,
2025 cx,
2026 )
2027 });
2028
2029 // Check what contexts are included in each message
2030 let (message1, message2, message3) = thread.read_with(cx, |thread, _| {
2031 (
2032 thread.message(message1_id).unwrap().clone(),
2033 thread.message(message2_id).unwrap().clone(),
2034 thread.message(message3_id).unwrap().clone(),
2035 )
2036 });
2037
2038 // First message should include context 1
2039 assert!(message1.context.contains("file1.rs"));
2040
2041 // Second message should include only context 2 (not 1)
2042 assert!(!message2.context.contains("file1.rs"));
2043 assert!(message2.context.contains("file2.rs"));
2044
2045 // Third message should include only context 3 (not 1 or 2)
2046 assert!(!message3.context.contains("file1.rs"));
2047 assert!(!message3.context.contains("file2.rs"));
2048 assert!(message3.context.contains("file3.rs"));
2049
2050 // Check entire request to make sure all contexts are properly included
2051 let request = thread.read_with(cx, |thread, cx| {
2052 thread.to_completion_request(RequestKind::Chat, cx)
2053 });
2054
2055 // The request should contain all 3 messages
2056 assert_eq!(request.messages.len(), 3);
2057
2058 // Check that the contexts are properly formatted in each message
2059 assert!(request.messages[0].string_contents().contains("file1.rs"));
2060 assert!(!request.messages[0].string_contents().contains("file2.rs"));
2061 assert!(!request.messages[0].string_contents().contains("file3.rs"));
2062
2063 assert!(!request.messages[1].string_contents().contains("file1.rs"));
2064 assert!(request.messages[1].string_contents().contains("file2.rs"));
2065 assert!(!request.messages[1].string_contents().contains("file3.rs"));
2066
2067 assert!(!request.messages[2].string_contents().contains("file1.rs"));
2068 assert!(!request.messages[2].string_contents().contains("file2.rs"));
2069 assert!(request.messages[2].string_contents().contains("file3.rs"));
2070 }
2071
2072 #[gpui::test]
2073 async fn test_message_without_files(cx: &mut TestAppContext) {
2074 init_test_settings(cx);
2075
2076 let project = create_test_project(
2077 cx,
2078 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
2079 )
2080 .await;
2081
2082 let (_, _thread_store, thread, _context_store) =
2083 setup_test_environment(cx, project.clone()).await;
2084
2085 // Insert user message without any context (empty context vector)
2086 let message_id = thread.update(cx, |thread, cx| {
2087 thread.insert_user_message("What is the best way to learn Rust?", vec![], None, cx)
2088 });
2089
2090 // Check content and context in message object
2091 let message = thread.read_with(cx, |thread, _| thread.message(message_id).unwrap().clone());
2092
2093 // Context should be empty when no files are included
2094 assert_eq!(message.role, Role::User);
2095 assert_eq!(message.segments.len(), 1);
2096 assert_eq!(
2097 message.segments[0],
2098 MessageSegment::Text("What is the best way to learn Rust?".to_string())
2099 );
2100 assert_eq!(message.context, "");
2101
2102 // Check message in request
2103 let request = thread.read_with(cx, |thread, cx| {
2104 thread.to_completion_request(RequestKind::Chat, cx)
2105 });
2106
2107 assert_eq!(request.messages.len(), 1);
2108 assert_eq!(
2109 request.messages[0].string_contents(),
2110 "What is the best way to learn Rust?"
2111 );
2112
2113 // Add second message, also without context
2114 let message2_id = thread.update(cx, |thread, cx| {
2115 thread.insert_user_message("Are there any good books?", vec![], None, cx)
2116 });
2117
2118 let message2 =
2119 thread.read_with(cx, |thread, _| thread.message(message2_id).unwrap().clone());
2120 assert_eq!(message2.context, "");
2121
2122 // Check that both messages appear in the request
2123 let request = thread.read_with(cx, |thread, cx| {
2124 thread.to_completion_request(RequestKind::Chat, cx)
2125 });
2126
2127 assert_eq!(request.messages.len(), 2);
2128 assert_eq!(
2129 request.messages[0].string_contents(),
2130 "What is the best way to learn Rust?"
2131 );
2132 assert_eq!(
2133 request.messages[1].string_contents(),
2134 "Are there any good books?"
2135 );
2136 }
2137
2138 #[gpui::test]
2139 async fn test_stale_buffer_notification(cx: &mut TestAppContext) {
2140 init_test_settings(cx);
2141
2142 let project = create_test_project(
2143 cx,
2144 json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
2145 )
2146 .await;
2147
2148 let (_workspace, _thread_store, thread, context_store) =
2149 setup_test_environment(cx, project.clone()).await;
2150
2151 // Open buffer and add it to context
2152 let buffer = add_file_to_context(&project, &context_store, "test/code.rs", cx)
2153 .await
2154 .unwrap();
2155
2156 let context =
2157 context_store.update(cx, |store, _| store.context().first().cloned().unwrap());
2158
2159 // Insert user message with the buffer as context
2160 thread.update(cx, |thread, cx| {
2161 thread.insert_user_message("Explain this code", vec![context], None, cx)
2162 });
2163
2164 // Create a request and check that it doesn't have a stale buffer warning yet
2165 let initial_request = thread.read_with(cx, |thread, cx| {
2166 thread.to_completion_request(RequestKind::Chat, cx)
2167 });
2168
2169 // Make sure we don't have a stale file warning yet
2170 let has_stale_warning = initial_request.messages.iter().any(|msg| {
2171 msg.string_contents()
2172 .contains("These files changed since last read:")
2173 });
2174 assert!(
2175 !has_stale_warning,
2176 "Should not have stale buffer warning before buffer is modified"
2177 );
2178
2179 // Modify the buffer
2180 buffer.update(cx, |buffer, cx| {
2181 // Find a position at the end of line 1
2182 buffer.edit(
2183 [(1..1, "\n println!(\"Added a new line\");\n")],
2184 None,
2185 cx,
2186 );
2187 });
2188
2189 // Insert another user message without context
2190 thread.update(cx, |thread, cx| {
2191 thread.insert_user_message("What does the code do now?", vec![], None, cx)
2192 });
2193
2194 // Create a new request and check for the stale buffer warning
2195 let new_request = thread.read_with(cx, |thread, cx| {
2196 thread.to_completion_request(RequestKind::Chat, cx)
2197 });
2198
2199 // We should have a stale file warning as the last message
2200 let last_message = new_request
2201 .messages
2202 .last()
2203 .expect("Request should have messages");
2204
2205 // The last message should be the stale buffer notification
2206 assert_eq!(last_message.role, Role::User);
2207
2208 // Check the exact content of the message
2209 let expected_content = "These files changed since last read:\n- code.rs\n";
2210 assert_eq!(
2211 last_message.string_contents(),
2212 expected_content,
2213 "Last message should be exactly the stale buffer notification"
2214 );
2215 }
2216
2217 fn init_test_settings(cx: &mut TestAppContext) {
2218 cx.update(|cx| {
2219 let settings_store = SettingsStore::test(cx);
2220 cx.set_global(settings_store);
2221 language::init(cx);
2222 Project::init_settings(cx);
2223 AssistantSettings::register(cx);
2224 thread_store::init(cx);
2225 workspace::init_settings(cx);
2226 ThemeSettings::register(cx);
2227 ContextServerSettings::register(cx);
2228 EditorSettings::register(cx);
2229 });
2230 }
2231
2232 // Helper to create a test project with test files
2233 async fn create_test_project(
2234 cx: &mut TestAppContext,
2235 files: serde_json::Value,
2236 ) -> Entity<Project> {
2237 let fs = FakeFs::new(cx.executor());
2238 fs.insert_tree(path!("/test"), files).await;
2239 Project::test(fs, [path!("/test").as_ref()], cx).await
2240 }
2241
2242 async fn setup_test_environment(
2243 cx: &mut TestAppContext,
2244 project: Entity<Project>,
2245 ) -> (
2246 Entity<Workspace>,
2247 Entity<ThreadStore>,
2248 Entity<Thread>,
2249 Entity<ContextStore>,
2250 ) {
2251 let (workspace, cx) =
2252 cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
2253
2254 let thread_store = cx.update(|_, cx| {
2255 ThreadStore::new(
2256 project.clone(),
2257 Arc::default(),
2258 Arc::new(PromptBuilder::new(None).unwrap()),
2259 cx,
2260 )
2261 .unwrap()
2262 });
2263
2264 let thread = thread_store.update(cx, |store, cx| store.create_thread(cx));
2265 let context_store = cx.new(|_cx| ContextStore::new(workspace.downgrade(), None));
2266
2267 (workspace, thread_store, thread, context_store)
2268 }
2269
2270 async fn add_file_to_context(
2271 project: &Entity<Project>,
2272 context_store: &Entity<ContextStore>,
2273 path: &str,
2274 cx: &mut TestAppContext,
2275 ) -> Result<Entity<language::Buffer>> {
2276 let buffer_path = project
2277 .read_with(cx, |project, cx| project.find_project_path(path, cx))
2278 .unwrap();
2279
2280 let buffer = project
2281 .update(cx, |project, cx| project.open_buffer(buffer_path, cx))
2282 .await
2283 .unwrap();
2284
2285 context_store
2286 .update(cx, |store, cx| {
2287 store.add_file_from_buffer(buffer.clone(), cx)
2288 })
2289 .await?;
2290
2291 Ok(buffer)
2292 }
2293}