1use std::fmt::Write as _;
2use std::io::Write;
3use std::sync::Arc;
4
5use anyhow::{Context as _, Result};
6use assistant_settings::AssistantSettings;
7use assistant_tool::{ActionLog, Tool, ToolWorkingSet};
8use chrono::{DateTime, Utc};
9use collections::{BTreeMap, HashMap, HashSet};
10use fs::Fs;
11use futures::future::Shared;
12use futures::{FutureExt, StreamExt as _};
13use git;
14use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task, WeakEntity};
15use language_model::{
16 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
17 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
18 LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
19 Role, StopReason, TokenUsage,
20};
21use project::git_store::{GitStore, GitStoreCheckpoint};
22use project::{Project, Worktree};
23use prompt_store::{
24 AssistantSystemPromptContext, PromptBuilder, RulesFile, WorktreeInfoForSystemPrompt,
25};
26use serde::{Deserialize, Serialize};
27use settings::Settings;
28use util::{maybe, post_inc, ResultExt as _, TryFutureExt as _};
29use uuid::Uuid;
30
31use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
32use crate::thread_store::{
33 SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
34 SerializedToolUse,
35};
36use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState};
37
38#[derive(Debug, Clone, Copy)]
39pub enum RequestKind {
40 Chat,
41 /// Used when summarizing a thread.
42 Summarize,
43}
44
45#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
46pub struct ThreadId(Arc<str>);
47
48impl ThreadId {
49 pub fn new() -> Self {
50 Self(Uuid::new_v4().to_string().into())
51 }
52}
53
54impl std::fmt::Display for ThreadId {
55 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
56 write!(f, "{}", self.0)
57 }
58}
59
60#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
61pub struct MessageId(pub(crate) usize);
62
63impl MessageId {
64 fn post_inc(&mut self) -> Self {
65 Self(post_inc(&mut self.0))
66 }
67}
68
69/// A message in a [`Thread`].
70#[derive(Debug, Clone)]
71pub struct Message {
72 pub id: MessageId,
73 pub role: Role,
74 pub segments: Vec<MessageSegment>,
75}
76
77impl Message {
78 pub fn push_thinking(&mut self, text: &str) {
79 if let Some(MessageSegment::Thinking(segment)) = self.segments.last_mut() {
80 segment.push_str(text);
81 } else {
82 self.segments
83 .push(MessageSegment::Thinking(text.to_string()));
84 }
85 }
86
87 pub fn push_text(&mut self, text: &str) {
88 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
89 segment.push_str(text);
90 } else {
91 self.segments.push(MessageSegment::Text(text.to_string()));
92 }
93 }
94
95 pub fn to_string(&self) -> String {
96 let mut result = String::new();
97 for segment in &self.segments {
98 match segment {
99 MessageSegment::Text(text) => result.push_str(text),
100 MessageSegment::Thinking(text) => {
101 result.push_str("<think>");
102 result.push_str(text);
103 result.push_str("</think>");
104 }
105 }
106 }
107 result
108 }
109}
110
111#[derive(Debug, Clone)]
112pub enum MessageSegment {
113 Text(String),
114 Thinking(String),
115}
116
117#[derive(Debug, Clone, Serialize, Deserialize)]
118pub struct ProjectSnapshot {
119 pub worktree_snapshots: Vec<WorktreeSnapshot>,
120 pub unsaved_buffer_paths: Vec<String>,
121 pub timestamp: DateTime<Utc>,
122}
123
124#[derive(Debug, Clone, Serialize, Deserialize)]
125pub struct WorktreeSnapshot {
126 pub worktree_path: String,
127 pub git_state: Option<GitState>,
128}
129
130#[derive(Debug, Clone, Serialize, Deserialize)]
131pub struct GitState {
132 pub remote_url: Option<String>,
133 pub head_sha: Option<String>,
134 pub current_branch: Option<String>,
135 pub diff: Option<String>,
136}
137
138#[derive(Clone)]
139pub struct ThreadCheckpoint {
140 message_id: MessageId,
141 git_checkpoint: GitStoreCheckpoint,
142}
143
144#[derive(Copy, Clone, Debug)]
145pub enum ThreadFeedback {
146 Positive,
147 Negative,
148}
149
150pub enum LastRestoreCheckpoint {
151 Pending {
152 message_id: MessageId,
153 },
154 Error {
155 message_id: MessageId,
156 error: String,
157 },
158}
159
160impl LastRestoreCheckpoint {
161 pub fn message_id(&self) -> MessageId {
162 match self {
163 LastRestoreCheckpoint::Pending { message_id } => *message_id,
164 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
165 }
166 }
167}
168
169/// A thread of conversation with the LLM.
170pub struct Thread {
171 id: ThreadId,
172 updated_at: DateTime<Utc>,
173 summary: Option<SharedString>,
174 pending_summary: Task<Option<()>>,
175 messages: Vec<Message>,
176 next_message_id: MessageId,
177 context: BTreeMap<ContextId, ContextSnapshot>,
178 context_by_message: HashMap<MessageId, Vec<ContextId>>,
179 system_prompt_context: Option<AssistantSystemPromptContext>,
180 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
181 completion_count: usize,
182 pending_completions: Vec<PendingCompletion>,
183 project: Entity<Project>,
184 prompt_builder: Arc<PromptBuilder>,
185 tools: Arc<ToolWorkingSet>,
186 tool_use: ToolUseState,
187 action_log: Entity<ActionLog>,
188 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
189 pending_checkpoint: Option<ThreadCheckpoint>,
190 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
191 cumulative_token_usage: TokenUsage,
192 feedback: Option<ThreadFeedback>,
193}
194
195impl Thread {
196 pub fn new(
197 project: Entity<Project>,
198 tools: Arc<ToolWorkingSet>,
199 prompt_builder: Arc<PromptBuilder>,
200 cx: &mut Context<Self>,
201 ) -> Self {
202 Self {
203 id: ThreadId::new(),
204 updated_at: Utc::now(),
205 summary: None,
206 pending_summary: Task::ready(None),
207 messages: Vec::new(),
208 next_message_id: MessageId(0),
209 context: BTreeMap::default(),
210 context_by_message: HashMap::default(),
211 system_prompt_context: None,
212 checkpoints_by_message: HashMap::default(),
213 completion_count: 0,
214 pending_completions: Vec::new(),
215 project: project.clone(),
216 prompt_builder,
217 tools: tools.clone(),
218 last_restore_checkpoint: None,
219 pending_checkpoint: None,
220 tool_use: ToolUseState::new(tools.clone()),
221 action_log: cx.new(|_| ActionLog::new()),
222 initial_project_snapshot: {
223 let project_snapshot = Self::project_snapshot(project, cx);
224 cx.foreground_executor()
225 .spawn(async move { Some(project_snapshot.await) })
226 .shared()
227 },
228 cumulative_token_usage: TokenUsage::default(),
229 feedback: None,
230 }
231 }
232
233 pub fn deserialize(
234 id: ThreadId,
235 serialized: SerializedThread,
236 project: Entity<Project>,
237 tools: Arc<ToolWorkingSet>,
238 prompt_builder: Arc<PromptBuilder>,
239 cx: &mut Context<Self>,
240 ) -> Self {
241 let next_message_id = MessageId(
242 serialized
243 .messages
244 .last()
245 .map(|message| message.id.0 + 1)
246 .unwrap_or(0),
247 );
248 let tool_use =
249 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |_| true);
250
251 Self {
252 id,
253 updated_at: serialized.updated_at,
254 summary: Some(serialized.summary),
255 pending_summary: Task::ready(None),
256 messages: serialized
257 .messages
258 .into_iter()
259 .map(|message| Message {
260 id: message.id,
261 role: message.role,
262 segments: message
263 .segments
264 .into_iter()
265 .map(|segment| match segment {
266 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
267 SerializedMessageSegment::Thinking { text } => {
268 MessageSegment::Thinking(text)
269 }
270 })
271 .collect(),
272 })
273 .collect(),
274 next_message_id,
275 context: BTreeMap::default(),
276 context_by_message: HashMap::default(),
277 system_prompt_context: None,
278 checkpoints_by_message: HashMap::default(),
279 completion_count: 0,
280 pending_completions: Vec::new(),
281 last_restore_checkpoint: None,
282 pending_checkpoint: None,
283 project,
284 prompt_builder,
285 tools,
286 tool_use,
287 action_log: cx.new(|_| ActionLog::new()),
288 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
289 cumulative_token_usage: serialized.cumulative_token_usage,
290 feedback: None,
291 }
292 }
293
294 pub fn id(&self) -> &ThreadId {
295 &self.id
296 }
297
298 pub fn is_empty(&self) -> bool {
299 self.messages.is_empty()
300 }
301
302 pub fn updated_at(&self) -> DateTime<Utc> {
303 self.updated_at
304 }
305
306 pub fn touch_updated_at(&mut self) {
307 self.updated_at = Utc::now();
308 }
309
310 pub fn summary(&self) -> Option<SharedString> {
311 self.summary.clone()
312 }
313
314 pub fn summary_or_default(&self) -> SharedString {
315 const DEFAULT: SharedString = SharedString::new_static("New Thread");
316 self.summary.clone().unwrap_or(DEFAULT)
317 }
318
319 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
320 self.summary = Some(summary.into());
321 cx.emit(ThreadEvent::SummaryChanged);
322 }
323
324 pub fn message(&self, id: MessageId) -> Option<&Message> {
325 self.messages.iter().find(|message| message.id == id)
326 }
327
328 pub fn messages(&self) -> impl Iterator<Item = &Message> {
329 self.messages.iter()
330 }
331
332 pub fn is_generating(&self) -> bool {
333 !self.pending_completions.is_empty() || !self.all_tools_finished()
334 }
335
336 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
337 &self.tools
338 }
339
340 pub fn pending_tool(&self, id: &LanguageModelToolUseId) -> Option<&PendingToolUse> {
341 self.tool_use
342 .pending_tool_uses()
343 .into_iter()
344 .find(|tool_use| &tool_use.id == id)
345 }
346
347 pub fn tools_needing_confirmation(&self) -> impl Iterator<Item = &PendingToolUse> {
348 self.tool_use
349 .pending_tool_uses()
350 .into_iter()
351 .filter(|tool_use| tool_use.status.needs_confirmation())
352 }
353
354 pub fn has_pending_tool_uses(&self) -> bool {
355 !self.tool_use.pending_tool_uses().is_empty()
356 }
357
358 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
359 self.checkpoints_by_message.get(&id).cloned()
360 }
361
362 pub fn restore_checkpoint(
363 &mut self,
364 checkpoint: ThreadCheckpoint,
365 cx: &mut Context<Self>,
366 ) -> Task<Result<()>> {
367 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
368 message_id: checkpoint.message_id,
369 });
370 cx.emit(ThreadEvent::CheckpointChanged);
371 cx.notify();
372
373 let project = self.project.read(cx);
374 let restore = project
375 .git_store()
376 .read(cx)
377 .restore_checkpoint(checkpoint.git_checkpoint.clone(), cx);
378 cx.spawn(async move |this, cx| {
379 let result = restore.await;
380 this.update(cx, |this, cx| {
381 if let Err(err) = result.as_ref() {
382 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
383 message_id: checkpoint.message_id,
384 error: err.to_string(),
385 });
386 } else {
387 this.truncate(checkpoint.message_id, cx);
388 this.last_restore_checkpoint = None;
389 }
390 this.pending_checkpoint = None;
391 cx.emit(ThreadEvent::CheckpointChanged);
392 cx.notify();
393 })?;
394 result
395 })
396 }
397
398 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
399 let pending_checkpoint = if self.is_generating() {
400 return;
401 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
402 checkpoint
403 } else {
404 return;
405 };
406
407 let git_store = self.project.read(cx).git_store().clone();
408 let final_checkpoint = git_store.read(cx).checkpoint(cx);
409 cx.spawn(async move |this, cx| match final_checkpoint.await {
410 Ok(final_checkpoint) => {
411 let equal = git_store
412 .read_with(cx, |store, cx| {
413 store.compare_checkpoints(
414 pending_checkpoint.git_checkpoint.clone(),
415 final_checkpoint.clone(),
416 cx,
417 )
418 })?
419 .await
420 .unwrap_or(false);
421
422 if equal {
423 git_store
424 .read_with(cx, |store, cx| {
425 store.delete_checkpoint(pending_checkpoint.git_checkpoint, cx)
426 })?
427 .detach();
428 } else {
429 this.update(cx, |this, cx| {
430 this.insert_checkpoint(pending_checkpoint, cx)
431 })?;
432 }
433
434 git_store
435 .read_with(cx, |store, cx| {
436 store.delete_checkpoint(final_checkpoint, cx)
437 })?
438 .detach();
439
440 Ok(())
441 }
442 Err(_) => this.update(cx, |this, cx| {
443 this.insert_checkpoint(pending_checkpoint, cx)
444 }),
445 })
446 .detach();
447 }
448
449 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
450 self.checkpoints_by_message
451 .insert(checkpoint.message_id, checkpoint);
452 cx.emit(ThreadEvent::CheckpointChanged);
453 cx.notify();
454 }
455
456 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
457 self.last_restore_checkpoint.as_ref()
458 }
459
460 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
461 let Some(message_ix) = self
462 .messages
463 .iter()
464 .rposition(|message| message.id == message_id)
465 else {
466 return;
467 };
468 for deleted_message in self.messages.drain(message_ix..) {
469 self.context_by_message.remove(&deleted_message.id);
470 self.checkpoints_by_message.remove(&deleted_message.id);
471 }
472 cx.notify();
473 }
474
475 pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
476 let context = self.context_by_message.get(&id)?;
477 Some(
478 context
479 .into_iter()
480 .filter_map(|context_id| self.context.get(&context_id))
481 .cloned()
482 .collect::<Vec<_>>(),
483 )
484 }
485
486 /// Returns whether all of the tool uses have finished running.
487 pub fn all_tools_finished(&self) -> bool {
488 // If the only pending tool uses left are the ones with errors, then
489 // that means that we've finished running all of the pending tools.
490 self.tool_use
491 .pending_tool_uses()
492 .iter()
493 .all(|tool_use| tool_use.status.is_error())
494 }
495
496 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
497 self.tool_use.tool_uses_for_message(id, cx)
498 }
499
500 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
501 self.tool_use.tool_results_for_message(id)
502 }
503
504 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
505 self.tool_use.tool_result(id)
506 }
507
508 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
509 self.tool_use.message_has_tool_results(message_id)
510 }
511
512 pub fn insert_user_message(
513 &mut self,
514 text: impl Into<String>,
515 context: Vec<ContextSnapshot>,
516 git_checkpoint: Option<GitStoreCheckpoint>,
517 cx: &mut Context<Self>,
518 ) -> MessageId {
519 let message_id =
520 self.insert_message(Role::User, vec![MessageSegment::Text(text.into())], cx);
521 let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
522 self.context
523 .extend(context.into_iter().map(|context| (context.id, context)));
524 self.context_by_message.insert(message_id, context_ids);
525 if let Some(git_checkpoint) = git_checkpoint {
526 self.pending_checkpoint = Some(ThreadCheckpoint {
527 message_id,
528 git_checkpoint,
529 });
530 }
531 message_id
532 }
533
534 pub fn insert_message(
535 &mut self,
536 role: Role,
537 segments: Vec<MessageSegment>,
538 cx: &mut Context<Self>,
539 ) -> MessageId {
540 let id = self.next_message_id.post_inc();
541 self.messages.push(Message { id, role, segments });
542 self.touch_updated_at();
543 cx.emit(ThreadEvent::MessageAdded(id));
544 id
545 }
546
547 pub fn edit_message(
548 &mut self,
549 id: MessageId,
550 new_role: Role,
551 new_segments: Vec<MessageSegment>,
552 cx: &mut Context<Self>,
553 ) -> bool {
554 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
555 return false;
556 };
557 message.role = new_role;
558 message.segments = new_segments;
559 self.touch_updated_at();
560 cx.emit(ThreadEvent::MessageEdited(id));
561 true
562 }
563
564 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
565 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
566 return false;
567 };
568 self.messages.remove(index);
569 self.context_by_message.remove(&id);
570 self.touch_updated_at();
571 cx.emit(ThreadEvent::MessageDeleted(id));
572 true
573 }
574
575 /// Returns the representation of this [`Thread`] in a textual form.
576 ///
577 /// This is the representation we use when attaching a thread as context to another thread.
578 pub fn text(&self) -> String {
579 let mut text = String::new();
580
581 for message in &self.messages {
582 text.push_str(match message.role {
583 language_model::Role::User => "User:",
584 language_model::Role::Assistant => "Assistant:",
585 language_model::Role::System => "System:",
586 });
587 text.push('\n');
588
589 for segment in &message.segments {
590 match segment {
591 MessageSegment::Text(content) => text.push_str(content),
592 MessageSegment::Thinking(content) => {
593 text.push_str(&format!("<think>{}</think>", content))
594 }
595 }
596 }
597 text.push('\n');
598 }
599
600 text
601 }
602
603 /// Serializes this thread into a format for storage or telemetry.
604 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
605 let initial_project_snapshot = self.initial_project_snapshot.clone();
606 cx.spawn(async move |this, cx| {
607 let initial_project_snapshot = initial_project_snapshot.await;
608 this.read_with(cx, |this, cx| SerializedThread {
609 version: SerializedThread::VERSION.to_string(),
610 summary: this.summary_or_default(),
611 updated_at: this.updated_at(),
612 messages: this
613 .messages()
614 .map(|message| SerializedMessage {
615 id: message.id,
616 role: message.role,
617 segments: message
618 .segments
619 .iter()
620 .map(|segment| match segment {
621 MessageSegment::Text(text) => {
622 SerializedMessageSegment::Text { text: text.clone() }
623 }
624 MessageSegment::Thinking(text) => {
625 SerializedMessageSegment::Thinking { text: text.clone() }
626 }
627 })
628 .collect(),
629 tool_uses: this
630 .tool_uses_for_message(message.id, cx)
631 .into_iter()
632 .map(|tool_use| SerializedToolUse {
633 id: tool_use.id,
634 name: tool_use.name,
635 input: tool_use.input,
636 })
637 .collect(),
638 tool_results: this
639 .tool_results_for_message(message.id)
640 .into_iter()
641 .map(|tool_result| SerializedToolResult {
642 tool_use_id: tool_result.tool_use_id.clone(),
643 is_error: tool_result.is_error,
644 content: tool_result.content.clone(),
645 })
646 .collect(),
647 })
648 .collect(),
649 initial_project_snapshot,
650 cumulative_token_usage: this.cumulative_token_usage.clone(),
651 })
652 })
653 }
654
655 pub fn set_system_prompt_context(&mut self, context: AssistantSystemPromptContext) {
656 self.system_prompt_context = Some(context);
657 }
658
659 pub fn system_prompt_context(&self) -> &Option<AssistantSystemPromptContext> {
660 &self.system_prompt_context
661 }
662
663 pub fn load_system_prompt_context(
664 &self,
665 cx: &App,
666 ) -> Task<(AssistantSystemPromptContext, Option<ThreadError>)> {
667 let project = self.project.read(cx);
668 let tasks = project
669 .visible_worktrees(cx)
670 .map(|worktree| {
671 Self::load_worktree_info_for_system_prompt(
672 project.fs().clone(),
673 worktree.read(cx),
674 cx,
675 )
676 })
677 .collect::<Vec<_>>();
678
679 cx.spawn(async |_cx| {
680 let results = futures::future::join_all(tasks).await;
681 let mut first_err = None;
682 let worktrees = results
683 .into_iter()
684 .map(|(worktree, err)| {
685 if first_err.is_none() && err.is_some() {
686 first_err = err;
687 }
688 worktree
689 })
690 .collect::<Vec<_>>();
691 (AssistantSystemPromptContext::new(worktrees), first_err)
692 })
693 }
694
695 fn load_worktree_info_for_system_prompt(
696 fs: Arc<dyn Fs>,
697 worktree: &Worktree,
698 cx: &App,
699 ) -> Task<(WorktreeInfoForSystemPrompt, Option<ThreadError>)> {
700 let root_name = worktree.root_name().into();
701 let abs_path = worktree.abs_path();
702
703 // Note that Cline supports `.clinerules` being a directory, but that is not currently
704 // supported. This doesn't seem to occur often in GitHub repositories.
705 const RULES_FILE_NAMES: [&'static str; 6] = [
706 ".rules",
707 ".cursorrules",
708 ".windsurfrules",
709 ".clinerules",
710 ".github/copilot-instructions.md",
711 "CLAUDE.md",
712 ];
713 let selected_rules_file = RULES_FILE_NAMES
714 .into_iter()
715 .filter_map(|name| {
716 worktree
717 .entry_for_path(name)
718 .filter(|entry| entry.is_file())
719 .map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
720 })
721 .next();
722
723 if let Some((rel_rules_path, abs_rules_path)) = selected_rules_file {
724 cx.spawn(async move |_| {
725 let rules_file_result = maybe!(async move {
726 let abs_rules_path = abs_rules_path?;
727 let text = fs.load(&abs_rules_path).await.with_context(|| {
728 format!("Failed to load assistant rules file {:?}", abs_rules_path)
729 })?;
730 anyhow::Ok(RulesFile {
731 rel_path: rel_rules_path,
732 abs_path: abs_rules_path.into(),
733 text: text.trim().to_string(),
734 })
735 })
736 .await;
737 let (rules_file, rules_file_error) = match rules_file_result {
738 Ok(rules_file) => (Some(rules_file), None),
739 Err(err) => (
740 None,
741 Some(ThreadError::Message {
742 header: "Error loading rules file".into(),
743 message: format!("{err}").into(),
744 }),
745 ),
746 };
747 let worktree_info = WorktreeInfoForSystemPrompt {
748 root_name,
749 abs_path,
750 rules_file,
751 };
752 (worktree_info, rules_file_error)
753 })
754 } else {
755 Task::ready((
756 WorktreeInfoForSystemPrompt {
757 root_name,
758 abs_path,
759 rules_file: None,
760 },
761 None,
762 ))
763 }
764 }
765
766 pub fn send_to_model(
767 &mut self,
768 model: Arc<dyn LanguageModel>,
769 request_kind: RequestKind,
770 cx: &mut Context<Self>,
771 ) {
772 let mut request = self.to_completion_request(request_kind, cx);
773 request.tools = {
774 let mut tools = Vec::new();
775 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
776 LanguageModelRequestTool {
777 name: tool.name(),
778 description: tool.description(),
779 input_schema: tool.input_schema(),
780 }
781 }));
782
783 tools
784 };
785
786 self.stream_completion(request, model, cx);
787 }
788
789 pub fn to_completion_request(
790 &self,
791 request_kind: RequestKind,
792 cx: &App,
793 ) -> LanguageModelRequest {
794 let mut request = LanguageModelRequest {
795 messages: vec![],
796 tools: Vec::new(),
797 stop: Vec::new(),
798 temperature: None,
799 };
800
801 if let Some(system_prompt_context) = self.system_prompt_context.as_ref() {
802 if let Some(system_prompt) = self
803 .prompt_builder
804 .generate_assistant_system_prompt(system_prompt_context)
805 .context("failed to generate assistant system prompt")
806 .log_err()
807 {
808 request.messages.push(LanguageModelRequestMessage {
809 role: Role::System,
810 content: vec![MessageContent::Text(system_prompt)],
811 cache: true,
812 });
813 }
814 } else {
815 log::error!("system_prompt_context not set.")
816 }
817
818 let mut referenced_context_ids = HashSet::default();
819
820 for message in &self.messages {
821 if let Some(context_ids) = self.context_by_message.get(&message.id) {
822 referenced_context_ids.extend(context_ids);
823 }
824
825 let mut request_message = LanguageModelRequestMessage {
826 role: message.role,
827 content: Vec::new(),
828 cache: false,
829 };
830
831 match request_kind {
832 RequestKind::Chat => {
833 self.tool_use
834 .attach_tool_results(message.id, &mut request_message);
835 }
836 RequestKind::Summarize => {
837 // We don't care about tool use during summarization.
838 }
839 }
840
841 if !message.segments.is_empty() {
842 request_message
843 .content
844 .push(MessageContent::Text(message.to_string()));
845 }
846
847 match request_kind {
848 RequestKind::Chat => {
849 self.tool_use
850 .attach_tool_uses(message.id, &mut request_message);
851 }
852 RequestKind::Summarize => {
853 // We don't care about tool use during summarization.
854 }
855 };
856
857 request.messages.push(request_message);
858 }
859
860 // Set a cache breakpoint at the second-to-last message.
861 // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
862 let breakpoint_index = request.messages.len() - 2;
863 for (index, message) in request.messages.iter_mut().enumerate() {
864 message.cache = index == breakpoint_index;
865 }
866
867 if !referenced_context_ids.is_empty() {
868 let mut context_message = LanguageModelRequestMessage {
869 role: Role::User,
870 content: Vec::new(),
871 cache: false,
872 };
873
874 let referenced_context = referenced_context_ids
875 .into_iter()
876 .filter_map(|context_id| self.context.get(context_id))
877 .cloned();
878 attach_context_to_message(&mut context_message, referenced_context);
879
880 request.messages.push(context_message);
881 }
882
883 self.attached_tracked_files_state(&mut request.messages, cx);
884
885 request
886 }
887
888 fn attached_tracked_files_state(
889 &self,
890 messages: &mut Vec<LanguageModelRequestMessage>,
891 cx: &App,
892 ) {
893 const STALE_FILES_HEADER: &str = "These files changed since last read:";
894
895 let mut stale_message = String::new();
896
897 let action_log = self.action_log.read(cx);
898
899 for stale_file in action_log.stale_buffers(cx) {
900 let Some(file) = stale_file.read(cx).file() else {
901 continue;
902 };
903
904 if stale_message.is_empty() {
905 write!(&mut stale_message, "{}", STALE_FILES_HEADER).ok();
906 }
907
908 writeln!(&mut stale_message, "- {}", file.path().display()).ok();
909 }
910
911 let mut content = Vec::with_capacity(2);
912
913 if !stale_message.is_empty() {
914 content.push(stale_message.into());
915 }
916
917 if action_log.has_edited_files_since_project_diagnostics_check() {
918 content.push(
919 "When you're done making changes, make sure to check project diagnostics and fix all errors AND warnings you introduced!".into(),
920 );
921 }
922
923 if !content.is_empty() {
924 let context_message = LanguageModelRequestMessage {
925 role: Role::User,
926 content,
927 cache: false,
928 };
929
930 messages.push(context_message);
931 }
932 }
933
934 pub fn stream_completion(
935 &mut self,
936 request: LanguageModelRequest,
937 model: Arc<dyn LanguageModel>,
938 cx: &mut Context<Self>,
939 ) {
940 let pending_completion_id = post_inc(&mut self.completion_count);
941
942 let task = cx.spawn(async move |thread, cx| {
943 let stream = model.stream_completion(request, &cx);
944 let initial_token_usage =
945 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage.clone());
946 let stream_completion = async {
947 let mut events = stream.await?;
948 let mut stop_reason = StopReason::EndTurn;
949 let mut current_token_usage = TokenUsage::default();
950
951 while let Some(event) = events.next().await {
952 let event = event?;
953
954 thread.update(cx, |thread, cx| {
955 match event {
956 LanguageModelCompletionEvent::StartMessage { .. } => {
957 thread.insert_message(
958 Role::Assistant,
959 vec![MessageSegment::Text(String::new())],
960 cx,
961 );
962 }
963 LanguageModelCompletionEvent::Stop(reason) => {
964 stop_reason = reason;
965 }
966 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
967 thread.cumulative_token_usage =
968 thread.cumulative_token_usage.clone() + token_usage.clone()
969 - current_token_usage.clone();
970 current_token_usage = token_usage;
971 }
972 LanguageModelCompletionEvent::Text(chunk) => {
973 if let Some(last_message) = thread.messages.last_mut() {
974 if last_message.role == Role::Assistant {
975 last_message.push_text(&chunk);
976 cx.emit(ThreadEvent::StreamedAssistantText(
977 last_message.id,
978 chunk,
979 ));
980 } else {
981 // If we won't have an Assistant message yet, assume this chunk marks the beginning
982 // of a new Assistant response.
983 //
984 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
985 // will result in duplicating the text of the chunk in the rendered Markdown.
986 thread.insert_message(
987 Role::Assistant,
988 vec![MessageSegment::Text(chunk.to_string())],
989 cx,
990 );
991 };
992 }
993 }
994 LanguageModelCompletionEvent::Thinking(chunk) => {
995 if let Some(last_message) = thread.messages.last_mut() {
996 if last_message.role == Role::Assistant {
997 last_message.push_thinking(&chunk);
998 cx.emit(ThreadEvent::StreamedAssistantThinking(
999 last_message.id,
1000 chunk,
1001 ));
1002 } else {
1003 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1004 // of a new Assistant response.
1005 //
1006 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1007 // will result in duplicating the text of the chunk in the rendered Markdown.
1008 thread.insert_message(
1009 Role::Assistant,
1010 vec![MessageSegment::Thinking(chunk.to_string())],
1011 cx,
1012 );
1013 };
1014 }
1015 }
1016 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1017 if let Some(last_assistant_message) = thread
1018 .messages
1019 .iter()
1020 .rfind(|message| message.role == Role::Assistant)
1021 {
1022 thread.tool_use.request_tool_use(
1023 last_assistant_message.id,
1024 tool_use,
1025 cx,
1026 );
1027 }
1028 }
1029 }
1030
1031 thread.touch_updated_at();
1032 cx.emit(ThreadEvent::StreamedCompletion);
1033 cx.notify();
1034 })?;
1035
1036 smol::future::yield_now().await;
1037 }
1038
1039 thread.update(cx, |thread, cx| {
1040 thread
1041 .pending_completions
1042 .retain(|completion| completion.id != pending_completion_id);
1043
1044 if thread.summary.is_none() && thread.messages.len() >= 2 {
1045 thread.summarize(cx);
1046 }
1047 })?;
1048
1049 anyhow::Ok(stop_reason)
1050 };
1051
1052 let result = stream_completion.await;
1053
1054 thread
1055 .update(cx, |thread, cx| {
1056 thread.finalize_pending_checkpoint(cx);
1057 match result.as_ref() {
1058 Ok(stop_reason) => match stop_reason {
1059 StopReason::ToolUse => {
1060 cx.emit(ThreadEvent::UsePendingTools);
1061 }
1062 StopReason::EndTurn => {}
1063 StopReason::MaxTokens => {}
1064 },
1065 Err(error) => {
1066 if error.is::<PaymentRequiredError>() {
1067 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1068 } else if error.is::<MaxMonthlySpendReachedError>() {
1069 cx.emit(ThreadEvent::ShowError(
1070 ThreadError::MaxMonthlySpendReached,
1071 ));
1072 } else {
1073 let error_message = error
1074 .chain()
1075 .map(|err| err.to_string())
1076 .collect::<Vec<_>>()
1077 .join("\n");
1078 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1079 header: "Error interacting with language model".into(),
1080 message: SharedString::from(error_message.clone()),
1081 }));
1082 }
1083
1084 thread.cancel_last_completion(cx);
1085 }
1086 }
1087 cx.emit(ThreadEvent::DoneStreaming);
1088
1089 if let Ok(initial_usage) = initial_token_usage {
1090 let usage = thread.cumulative_token_usage.clone() - initial_usage;
1091
1092 telemetry::event!(
1093 "Assistant Thread Completion",
1094 thread_id = thread.id().to_string(),
1095 model = model.telemetry_id(),
1096 model_provider = model.provider_id().to_string(),
1097 input_tokens = usage.input_tokens,
1098 output_tokens = usage.output_tokens,
1099 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1100 cache_read_input_tokens = usage.cache_read_input_tokens,
1101 );
1102 }
1103 })
1104 .ok();
1105 });
1106
1107 self.pending_completions.push(PendingCompletion {
1108 id: pending_completion_id,
1109 _task: task,
1110 });
1111 }
1112
1113 pub fn summarize(&mut self, cx: &mut Context<Self>) {
1114 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
1115 return;
1116 };
1117 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
1118 return;
1119 };
1120
1121 if !provider.is_authenticated(cx) {
1122 return;
1123 }
1124
1125 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1126 request.messages.push(LanguageModelRequestMessage {
1127 role: Role::User,
1128 content: vec![
1129 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
1130 .into(),
1131 ],
1132 cache: false,
1133 });
1134
1135 self.pending_summary = cx.spawn(async move |this, cx| {
1136 async move {
1137 let stream = model.stream_completion_text(request, &cx);
1138 let mut messages = stream.await?;
1139
1140 let mut new_summary = String::new();
1141 while let Some(message) = messages.stream.next().await {
1142 let text = message?;
1143 let mut lines = text.lines();
1144 new_summary.extend(lines.next());
1145
1146 // Stop if the LLM generated multiple lines.
1147 if lines.next().is_some() {
1148 break;
1149 }
1150 }
1151
1152 this.update(cx, |this, cx| {
1153 if !new_summary.is_empty() {
1154 this.summary = Some(new_summary.into());
1155 }
1156
1157 cx.emit(ThreadEvent::SummaryChanged);
1158 })?;
1159
1160 anyhow::Ok(())
1161 }
1162 .log_err()
1163 .await
1164 });
1165 }
1166
1167 pub fn use_pending_tools(
1168 &mut self,
1169 cx: &mut Context<Self>,
1170 ) -> impl IntoIterator<Item = PendingToolUse> {
1171 let request = self.to_completion_request(RequestKind::Chat, cx);
1172 let messages = Arc::new(request.messages);
1173 let pending_tool_uses = self
1174 .tool_use
1175 .pending_tool_uses()
1176 .into_iter()
1177 .filter(|tool_use| tool_use.status.is_idle())
1178 .cloned()
1179 .collect::<Vec<_>>();
1180
1181 for tool_use in pending_tool_uses.iter() {
1182 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1183 if tool.needs_confirmation()
1184 && !AssistantSettings::get_global(cx).always_allow_tool_actions
1185 {
1186 self.tool_use.confirm_tool_use(
1187 tool_use.id.clone(),
1188 tool_use.ui_text.clone(),
1189 tool_use.input.clone(),
1190 messages.clone(),
1191 tool,
1192 );
1193 cx.emit(ThreadEvent::ToolConfirmationNeeded);
1194 } else {
1195 self.run_tool(
1196 tool_use.id.clone(),
1197 tool_use.ui_text.clone(),
1198 tool_use.input.clone(),
1199 &messages,
1200 tool,
1201 cx,
1202 );
1203 }
1204 } else if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1205 self.run_tool(
1206 tool_use.id.clone(),
1207 tool_use.ui_text.clone(),
1208 tool_use.input.clone(),
1209 &messages,
1210 tool,
1211 cx,
1212 );
1213 }
1214 }
1215
1216 pending_tool_uses
1217 }
1218
1219 pub fn run_tool(
1220 &mut self,
1221 tool_use_id: LanguageModelToolUseId,
1222 ui_text: impl Into<SharedString>,
1223 input: serde_json::Value,
1224 messages: &[LanguageModelRequestMessage],
1225 tool: Arc<dyn Tool>,
1226 cx: &mut Context<'_, Thread>,
1227 ) {
1228 let task = self.spawn_tool_use(tool_use_id.clone(), messages, input, tool, cx);
1229 self.tool_use
1230 .run_pending_tool(tool_use_id, ui_text.into(), task);
1231 }
1232
1233 fn spawn_tool_use(
1234 &mut self,
1235 tool_use_id: LanguageModelToolUseId,
1236 messages: &[LanguageModelRequestMessage],
1237 input: serde_json::Value,
1238 tool: Arc<dyn Tool>,
1239 cx: &mut Context<Thread>,
1240 ) -> Task<()> {
1241 let run_tool = tool.run(
1242 input,
1243 messages,
1244 self.project.clone(),
1245 self.action_log.clone(),
1246 cx,
1247 );
1248
1249 cx.spawn({
1250 async move |thread: WeakEntity<Thread>, cx| {
1251 let output = run_tool.await;
1252
1253 thread
1254 .update(cx, |thread, cx| {
1255 let pending_tool_use = thread
1256 .tool_use
1257 .insert_tool_output(tool_use_id.clone(), output);
1258
1259 cx.emit(ThreadEvent::ToolFinished {
1260 tool_use_id,
1261 pending_tool_use,
1262 canceled: false,
1263 });
1264 })
1265 .ok();
1266 }
1267 })
1268 }
1269
1270 pub fn attach_tool_results(
1271 &mut self,
1272 updated_context: Vec<ContextSnapshot>,
1273 cx: &mut Context<Self>,
1274 ) {
1275 self.context.extend(
1276 updated_context
1277 .into_iter()
1278 .map(|context| (context.id, context)),
1279 );
1280
1281 // Insert a user message to contain the tool results.
1282 self.insert_user_message(
1283 // TODO: Sending up a user message without any content results in the model sending back
1284 // responses that also don't have any content. We currently don't handle this case well,
1285 // so for now we provide some text to keep the model on track.
1286 "Here are the tool results.",
1287 Vec::new(),
1288 None,
1289 cx,
1290 );
1291 }
1292
1293 /// Cancels the last pending completion, if there are any pending.
1294 ///
1295 /// Returns whether a completion was canceled.
1296 pub fn cancel_last_completion(&mut self, cx: &mut Context<Self>) -> bool {
1297 let canceled = if self.pending_completions.pop().is_some() {
1298 true
1299 } else {
1300 let mut canceled = false;
1301 for pending_tool_use in self.tool_use.cancel_pending() {
1302 canceled = true;
1303 cx.emit(ThreadEvent::ToolFinished {
1304 tool_use_id: pending_tool_use.id.clone(),
1305 pending_tool_use: Some(pending_tool_use),
1306 canceled: true,
1307 });
1308 }
1309 canceled
1310 };
1311 self.finalize_pending_checkpoint(cx);
1312 canceled
1313 }
1314
1315 /// Returns the feedback given to the thread, if any.
1316 pub fn feedback(&self) -> Option<ThreadFeedback> {
1317 self.feedback
1318 }
1319
1320 /// Reports feedback about the thread and stores it in our telemetry backend.
1321 pub fn report_feedback(
1322 &mut self,
1323 feedback: ThreadFeedback,
1324 cx: &mut Context<Self>,
1325 ) -> Task<Result<()>> {
1326 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
1327 let serialized_thread = self.serialize(cx);
1328 let thread_id = self.id().clone();
1329 let client = self.project.read(cx).client();
1330 self.feedback = Some(feedback);
1331 cx.notify();
1332
1333 cx.background_spawn(async move {
1334 let final_project_snapshot = final_project_snapshot.await;
1335 let serialized_thread = serialized_thread.await?;
1336 let thread_data =
1337 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
1338
1339 let rating = match feedback {
1340 ThreadFeedback::Positive => "positive",
1341 ThreadFeedback::Negative => "negative",
1342 };
1343 telemetry::event!(
1344 "Assistant Thread Rated",
1345 rating,
1346 thread_id,
1347 thread_data,
1348 final_project_snapshot
1349 );
1350 client.telemetry().flush_events();
1351
1352 Ok(())
1353 })
1354 }
1355
1356 /// Create a snapshot of the current project state including git information and unsaved buffers.
1357 fn project_snapshot(
1358 project: Entity<Project>,
1359 cx: &mut Context<Self>,
1360 ) -> Task<Arc<ProjectSnapshot>> {
1361 let git_store = project.read(cx).git_store().clone();
1362 let worktree_snapshots: Vec<_> = project
1363 .read(cx)
1364 .visible_worktrees(cx)
1365 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
1366 .collect();
1367
1368 cx.spawn(async move |_, cx| {
1369 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
1370
1371 let mut unsaved_buffers = Vec::new();
1372 cx.update(|app_cx| {
1373 let buffer_store = project.read(app_cx).buffer_store();
1374 for buffer_handle in buffer_store.read(app_cx).buffers() {
1375 let buffer = buffer_handle.read(app_cx);
1376 if buffer.is_dirty() {
1377 if let Some(file) = buffer.file() {
1378 let path = file.path().to_string_lossy().to_string();
1379 unsaved_buffers.push(path);
1380 }
1381 }
1382 }
1383 })
1384 .ok();
1385
1386 Arc::new(ProjectSnapshot {
1387 worktree_snapshots,
1388 unsaved_buffer_paths: unsaved_buffers,
1389 timestamp: Utc::now(),
1390 })
1391 })
1392 }
1393
1394 fn worktree_snapshot(
1395 worktree: Entity<project::Worktree>,
1396 git_store: Entity<GitStore>,
1397 cx: &App,
1398 ) -> Task<WorktreeSnapshot> {
1399 cx.spawn(async move |cx| {
1400 // Get worktree path and snapshot
1401 let worktree_info = cx.update(|app_cx| {
1402 let worktree = worktree.read(app_cx);
1403 let path = worktree.abs_path().to_string_lossy().to_string();
1404 let snapshot = worktree.snapshot();
1405 (path, snapshot)
1406 });
1407
1408 let Ok((worktree_path, snapshot)) = worktree_info else {
1409 return WorktreeSnapshot {
1410 worktree_path: String::new(),
1411 git_state: None,
1412 };
1413 };
1414
1415 let repo_info = git_store
1416 .update(cx, |git_store, cx| {
1417 git_store
1418 .repositories()
1419 .values()
1420 .find(|repo| repo.read(cx).worktree_id == Some(snapshot.id()))
1421 .and_then(|repo| {
1422 let repo = repo.read(cx);
1423 Some((repo.branch().cloned(), repo.local_repository()?))
1424 })
1425 })
1426 .ok()
1427 .flatten();
1428
1429 // Extract git information
1430 let git_state = match repo_info {
1431 None => None,
1432 Some((branch, repo)) => {
1433 let current_branch = branch.map(|branch| branch.name.to_string());
1434 let remote_url = repo.remote_url("origin");
1435 let head_sha = repo.head_sha();
1436
1437 // Get diff asynchronously
1438 let diff = repo
1439 .diff(git::repository::DiffType::HeadToWorktree)
1440 .await
1441 .ok();
1442
1443 Some(GitState {
1444 remote_url,
1445 head_sha,
1446 current_branch,
1447 diff,
1448 })
1449 }
1450 };
1451
1452 WorktreeSnapshot {
1453 worktree_path,
1454 git_state,
1455 }
1456 })
1457 }
1458
1459 pub fn to_markdown(&self, cx: &App) -> Result<String> {
1460 let mut markdown = Vec::new();
1461
1462 if let Some(summary) = self.summary() {
1463 writeln!(markdown, "# {summary}\n")?;
1464 };
1465
1466 for message in self.messages() {
1467 writeln!(
1468 markdown,
1469 "## {role}\n",
1470 role = match message.role {
1471 Role::User => "User",
1472 Role::Assistant => "Assistant",
1473 Role::System => "System",
1474 }
1475 )?;
1476 for segment in &message.segments {
1477 match segment {
1478 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
1479 MessageSegment::Thinking(text) => {
1480 writeln!(markdown, "<think>{}</think>\n", text)?
1481 }
1482 }
1483 }
1484
1485 for tool_use in self.tool_uses_for_message(message.id, cx) {
1486 writeln!(
1487 markdown,
1488 "**Use Tool: {} ({})**",
1489 tool_use.name, tool_use.id
1490 )?;
1491 writeln!(markdown, "```json")?;
1492 writeln!(
1493 markdown,
1494 "{}",
1495 serde_json::to_string_pretty(&tool_use.input)?
1496 )?;
1497 writeln!(markdown, "```")?;
1498 }
1499
1500 for tool_result in self.tool_results_for_message(message.id) {
1501 write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
1502 if tool_result.is_error {
1503 write!(markdown, " (Error)")?;
1504 }
1505
1506 writeln!(markdown, "**\n")?;
1507 writeln!(markdown, "{}", tool_result.content)?;
1508 }
1509 }
1510
1511 Ok(String::from_utf8_lossy(&markdown).to_string())
1512 }
1513
1514 pub fn action_log(&self) -> &Entity<ActionLog> {
1515 &self.action_log
1516 }
1517
1518 pub fn project(&self) -> &Entity<Project> {
1519 &self.project
1520 }
1521
1522 pub fn cumulative_token_usage(&self) -> TokenUsage {
1523 self.cumulative_token_usage.clone()
1524 }
1525
1526 pub fn deny_tool_use(&mut self, tool_use_id: LanguageModelToolUseId, cx: &mut Context<Self>) {
1527 let err = Err(anyhow::anyhow!(
1528 "Permission to run tool action denied by user"
1529 ));
1530
1531 self.tool_use.insert_tool_output(tool_use_id.clone(), err);
1532
1533 cx.emit(ThreadEvent::ToolFinished {
1534 tool_use_id,
1535 pending_tool_use: None,
1536 canceled: true,
1537 });
1538 }
1539}
1540
1541#[derive(Debug, Clone)]
1542pub enum ThreadError {
1543 PaymentRequired,
1544 MaxMonthlySpendReached,
1545 Message {
1546 header: SharedString,
1547 message: SharedString,
1548 },
1549}
1550
1551#[derive(Debug, Clone)]
1552pub enum ThreadEvent {
1553 ShowError(ThreadError),
1554 StreamedCompletion,
1555 StreamedAssistantText(MessageId, String),
1556 StreamedAssistantThinking(MessageId, String),
1557 DoneStreaming,
1558 MessageAdded(MessageId),
1559 MessageEdited(MessageId),
1560 MessageDeleted(MessageId),
1561 SummaryChanged,
1562 UsePendingTools,
1563 ToolFinished {
1564 #[allow(unused)]
1565 tool_use_id: LanguageModelToolUseId,
1566 /// The pending tool use that corresponds to this tool.
1567 pending_tool_use: Option<PendingToolUse>,
1568 /// Whether the tool was canceled by the user.
1569 canceled: bool,
1570 },
1571 CheckpointChanged,
1572 ToolConfirmationNeeded,
1573}
1574
1575impl EventEmitter<ThreadEvent> for Thread {}
1576
1577struct PendingCompletion {
1578 id: usize,
1579 _task: Task<()>,
1580}