1use std::fmt::Write as _;
2use std::io::Write;
3use std::mem;
4use std::sync::Arc;
5
6use anyhow::{Context as _, Result};
7use assistant_tool::{ActionLog, ToolWorkingSet};
8use chrono::{DateTime, Utc};
9use collections::{BTreeMap, HashMap, HashSet};
10use fs::Fs;
11use futures::future::Shared;
12use futures::{FutureExt, StreamExt as _};
13use git;
14use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task};
15use language_model::{
16 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
17 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
18 LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
19 Role, StopReason, TokenUsage,
20};
21use project::git_store::{GitStore, GitStoreCheckpoint};
22use project::{Project, Worktree};
23use prompt_store::{
24 AssistantSystemPromptContext, PromptBuilder, RulesFile, WorktreeInfoForSystemPrompt,
25};
26use scripting_tool::{ScriptingSession, ScriptingTool};
27use serde::{Deserialize, Serialize};
28use util::{maybe, post_inc, ResultExt as _, TryFutureExt as _};
29use uuid::Uuid;
30
31use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
32use crate::thread_store::{
33 SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
34 SerializedToolUse,
35};
36use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState};
37
38#[derive(Debug, Clone, Copy)]
39pub enum RequestKind {
40 Chat,
41 /// Used when summarizing a thread.
42 Summarize,
43}
44
45#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
46pub struct ThreadId(Arc<str>);
47
48impl ThreadId {
49 pub fn new() -> Self {
50 Self(Uuid::new_v4().to_string().into())
51 }
52}
53
54impl std::fmt::Display for ThreadId {
55 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
56 write!(f, "{}", self.0)
57 }
58}
59
60#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
61pub struct MessageId(pub(crate) usize);
62
63impl MessageId {
64 fn post_inc(&mut self) -> Self {
65 Self(post_inc(&mut self.0))
66 }
67}
68
69/// A message in a [`Thread`].
70#[derive(Debug, Clone)]
71pub struct Message {
72 pub id: MessageId,
73 pub role: Role,
74 pub segments: Vec<MessageSegment>,
75}
76
77impl Message {
78 pub fn push_thinking(&mut self, text: &str) {
79 if let Some(MessageSegment::Thinking(segment)) = self.segments.last_mut() {
80 segment.push_str(text);
81 } else {
82 self.segments
83 .push(MessageSegment::Thinking(text.to_string()));
84 }
85 }
86
87 pub fn push_text(&mut self, text: &str) {
88 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
89 segment.push_str(text);
90 } else {
91 self.segments.push(MessageSegment::Text(text.to_string()));
92 }
93 }
94
95 pub fn to_string(&self) -> String {
96 let mut result = String::new();
97 for segment in &self.segments {
98 match segment {
99 MessageSegment::Text(text) => result.push_str(text),
100 MessageSegment::Thinking(text) => {
101 result.push_str("<think>");
102 result.push_str(text);
103 result.push_str("</think>");
104 }
105 }
106 }
107 result
108 }
109}
110
111#[derive(Debug, Clone)]
112pub enum MessageSegment {
113 Text(String),
114 Thinking(String),
115}
116
117#[derive(Debug, Clone, Serialize, Deserialize)]
118pub struct ProjectSnapshot {
119 pub worktree_snapshots: Vec<WorktreeSnapshot>,
120 pub unsaved_buffer_paths: Vec<String>,
121 pub timestamp: DateTime<Utc>,
122}
123
124#[derive(Debug, Clone, Serialize, Deserialize)]
125pub struct WorktreeSnapshot {
126 pub worktree_path: String,
127 pub git_state: Option<GitState>,
128}
129
130#[derive(Debug, Clone, Serialize, Deserialize)]
131pub struct GitState {
132 pub remote_url: Option<String>,
133 pub head_sha: Option<String>,
134 pub current_branch: Option<String>,
135 pub diff: Option<String>,
136}
137
138#[derive(Clone)]
139pub struct ThreadCheckpoint {
140 message_id: MessageId,
141 git_checkpoint: GitStoreCheckpoint,
142}
143
144#[derive(Copy, Clone, Debug)]
145pub enum ThreadFeedback {
146 Positive,
147 Negative,
148}
149
150pub enum LastRestoreCheckpoint {
151 Pending {
152 message_id: MessageId,
153 },
154 Error {
155 message_id: MessageId,
156 error: String,
157 },
158}
159
160impl LastRestoreCheckpoint {
161 pub fn message_id(&self) -> MessageId {
162 match self {
163 LastRestoreCheckpoint::Pending { message_id } => *message_id,
164 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
165 }
166 }
167}
168
169/// A thread of conversation with the LLM.
170pub struct Thread {
171 id: ThreadId,
172 updated_at: DateTime<Utc>,
173 summary: Option<SharedString>,
174 pending_summary: Task<Option<()>>,
175 messages: Vec<Message>,
176 next_message_id: MessageId,
177 context: BTreeMap<ContextId, ContextSnapshot>,
178 context_by_message: HashMap<MessageId, Vec<ContextId>>,
179 system_prompt_context: Option<AssistantSystemPromptContext>,
180 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
181 completion_count: usize,
182 pending_completions: Vec<PendingCompletion>,
183 project: Entity<Project>,
184 prompt_builder: Arc<PromptBuilder>,
185 tools: Arc<ToolWorkingSet>,
186 tool_use: ToolUseState,
187 action_log: Entity<ActionLog>,
188 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
189 pending_checkpoint: Option<Task<Result<ThreadCheckpoint>>>,
190 checkpoint_on_next_user_message: bool,
191 scripting_session: Entity<ScriptingSession>,
192 scripting_tool_use: ToolUseState,
193 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
194 cumulative_token_usage: TokenUsage,
195 feedback: Option<ThreadFeedback>,
196}
197
198impl Thread {
199 pub fn new(
200 project: Entity<Project>,
201 tools: Arc<ToolWorkingSet>,
202 prompt_builder: Arc<PromptBuilder>,
203 cx: &mut Context<Self>,
204 ) -> Self {
205 Self {
206 id: ThreadId::new(),
207 updated_at: Utc::now(),
208 summary: None,
209 pending_summary: Task::ready(None),
210 messages: Vec::new(),
211 next_message_id: MessageId(0),
212 context: BTreeMap::default(),
213 context_by_message: HashMap::default(),
214 system_prompt_context: None,
215 checkpoints_by_message: HashMap::default(),
216 completion_count: 0,
217 pending_completions: Vec::new(),
218 project: project.clone(),
219 prompt_builder,
220 tools: tools.clone(),
221 last_restore_checkpoint: None,
222 pending_checkpoint: None,
223 checkpoint_on_next_user_message: true,
224 tool_use: ToolUseState::new(tools.clone()),
225 scripting_session: cx.new(|cx| ScriptingSession::new(project.clone(), cx)),
226 scripting_tool_use: ToolUseState::new(tools),
227 action_log: cx.new(|_| ActionLog::new()),
228 initial_project_snapshot: {
229 let project_snapshot = Self::project_snapshot(project, cx);
230 cx.foreground_executor()
231 .spawn(async move { Some(project_snapshot.await) })
232 .shared()
233 },
234 cumulative_token_usage: TokenUsage::default(),
235 feedback: None,
236 }
237 }
238
239 pub fn deserialize(
240 id: ThreadId,
241 serialized: SerializedThread,
242 project: Entity<Project>,
243 tools: Arc<ToolWorkingSet>,
244 prompt_builder: Arc<PromptBuilder>,
245 cx: &mut Context<Self>,
246 ) -> Self {
247 let next_message_id = MessageId(
248 serialized
249 .messages
250 .last()
251 .map(|message| message.id.0 + 1)
252 .unwrap_or(0),
253 );
254 let tool_use =
255 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |name| {
256 name != ScriptingTool::NAME
257 });
258 let scripting_tool_use =
259 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |name| {
260 name == ScriptingTool::NAME
261 });
262 let scripting_session = cx.new(|cx| ScriptingSession::new(project.clone(), cx));
263
264 Self {
265 id,
266 updated_at: serialized.updated_at,
267 summary: Some(serialized.summary),
268 pending_summary: Task::ready(None),
269 messages: serialized
270 .messages
271 .into_iter()
272 .map(|message| Message {
273 id: message.id,
274 role: message.role,
275 segments: message
276 .segments
277 .into_iter()
278 .map(|segment| match segment {
279 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
280 SerializedMessageSegment::Thinking { text } => {
281 MessageSegment::Thinking(text)
282 }
283 })
284 .collect(),
285 })
286 .collect(),
287 next_message_id,
288 context: BTreeMap::default(),
289 context_by_message: HashMap::default(),
290 system_prompt_context: None,
291 checkpoints_by_message: HashMap::default(),
292 completion_count: 0,
293 pending_completions: Vec::new(),
294 last_restore_checkpoint: None,
295 pending_checkpoint: None,
296 checkpoint_on_next_user_message: true,
297 project,
298 prompt_builder,
299 tools,
300 tool_use,
301 action_log: cx.new(|_| ActionLog::new()),
302 scripting_session,
303 scripting_tool_use,
304 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
305 // TODO: persist token usage?
306 cumulative_token_usage: TokenUsage::default(),
307 feedback: None,
308 }
309 }
310
311 pub fn id(&self) -> &ThreadId {
312 &self.id
313 }
314
315 pub fn is_empty(&self) -> bool {
316 self.messages.is_empty()
317 }
318
319 pub fn updated_at(&self) -> DateTime<Utc> {
320 self.updated_at
321 }
322
323 pub fn touch_updated_at(&mut self) {
324 self.updated_at = Utc::now();
325 }
326
327 pub fn summary(&self) -> Option<SharedString> {
328 self.summary.clone()
329 }
330
331 pub fn summary_or_default(&self) -> SharedString {
332 const DEFAULT: SharedString = SharedString::new_static("New Thread");
333 self.summary.clone().unwrap_or(DEFAULT)
334 }
335
336 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
337 self.summary = Some(summary.into());
338 cx.emit(ThreadEvent::SummaryChanged);
339 }
340
341 pub fn message(&self, id: MessageId) -> Option<&Message> {
342 self.messages.iter().find(|message| message.id == id)
343 }
344
345 pub fn messages(&self) -> impl Iterator<Item = &Message> {
346 self.messages.iter()
347 }
348
349 pub fn is_generating(&self) -> bool {
350 !self.pending_completions.is_empty() || !self.all_tools_finished()
351 }
352
353 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
354 &self.tools
355 }
356
357 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
358 self.checkpoints_by_message.get(&id).cloned()
359 }
360
361 pub fn restore_checkpoint(
362 &mut self,
363 checkpoint: ThreadCheckpoint,
364 cx: &mut Context<Self>,
365 ) -> Task<Result<()>> {
366 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
367 message_id: checkpoint.message_id,
368 });
369 cx.emit(ThreadEvent::CheckpointChanged);
370 cx.notify();
371
372 let project = self.project.read(cx);
373 let restore = project
374 .git_store()
375 .read(cx)
376 .restore_checkpoint(checkpoint.git_checkpoint.clone(), cx);
377 cx.spawn(async move |this, cx| {
378 let result = restore.await;
379 this.update(cx, |this, cx| {
380 if let Err(err) = result.as_ref() {
381 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
382 message_id: checkpoint.message_id,
383 error: err.to_string(),
384 });
385 } else {
386 this.truncate(checkpoint.message_id, cx);
387 this.last_restore_checkpoint = None;
388 this.pending_checkpoint = Some(Task::ready(Ok(ThreadCheckpoint {
389 message_id: this.next_message_id,
390 git_checkpoint: checkpoint.git_checkpoint,
391 })));
392 }
393 cx.emit(ThreadEvent::CheckpointChanged);
394 cx.notify();
395 })?;
396 result
397 })
398 }
399
400 fn checkpoint(&mut self, cx: &mut Context<Self>) {
401 if self.is_generating() {
402 return;
403 }
404
405 let git_store = self.project.read(cx).git_store().clone();
406 let new_checkpoint = git_store.read(cx).checkpoint(cx);
407 let old_checkpoint = self.pending_checkpoint.take();
408 let next_user_message_id = self.next_message_id;
409 self.pending_checkpoint = Some(cx.spawn(async move |this, cx| {
410 let new_checkpoint = new_checkpoint.await?;
411
412 if let Some(old_checkpoint) = old_checkpoint {
413 if let Ok(old_checkpoint) = old_checkpoint.await {
414 let equal = git_store
415 .read_with(cx, |store, cx| {
416 store.compare_checkpoints(
417 old_checkpoint.git_checkpoint.clone(),
418 new_checkpoint.clone(),
419 cx,
420 )
421 })?
422 .await;
423
424 if equal.ok() != Some(true) {
425 this.update(cx, |this, cx| {
426 this.checkpoints_by_message
427 .insert(old_checkpoint.message_id, old_checkpoint);
428 cx.emit(ThreadEvent::CheckpointChanged);
429 cx.notify();
430 })?;
431 }
432 }
433 }
434
435 Ok(ThreadCheckpoint {
436 message_id: next_user_message_id,
437 git_checkpoint: new_checkpoint,
438 })
439 }));
440 }
441
442 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
443 self.last_restore_checkpoint.as_ref()
444 }
445
446 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
447 let Some(message_ix) = self
448 .messages
449 .iter()
450 .rposition(|message| message.id == message_id)
451 else {
452 return;
453 };
454 for deleted_message in self.messages.drain(message_ix..) {
455 self.context_by_message.remove(&deleted_message.id);
456 self.checkpoints_by_message.remove(&deleted_message.id);
457 }
458 cx.notify();
459 }
460
461 pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
462 let context = self.context_by_message.get(&id)?;
463 Some(
464 context
465 .into_iter()
466 .filter_map(|context_id| self.context.get(&context_id))
467 .cloned()
468 .collect::<Vec<_>>(),
469 )
470 }
471
472 /// Returns whether all of the tool uses have finished running.
473 pub fn all_tools_finished(&self) -> bool {
474 let mut all_pending_tool_uses = self
475 .tool_use
476 .pending_tool_uses()
477 .into_iter()
478 .chain(self.scripting_tool_use.pending_tool_uses());
479
480 // If the only pending tool uses left are the ones with errors, then
481 // that means that we've finished running all of the pending tools.
482 all_pending_tool_uses.all(|tool_use| tool_use.status.is_error())
483 }
484
485 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
486 self.tool_use.tool_uses_for_message(id, cx)
487 }
488
489 pub fn scripting_tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
490 self.scripting_tool_use.tool_uses_for_message(id, cx)
491 }
492
493 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
494 self.tool_use.tool_results_for_message(id)
495 }
496
497 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
498 self.tool_use.tool_result(id)
499 }
500
501 pub fn scripting_tool_results_for_message(
502 &self,
503 id: MessageId,
504 ) -> Vec<&LanguageModelToolResult> {
505 self.scripting_tool_use.tool_results_for_message(id)
506 }
507
508 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
509 self.tool_use.message_has_tool_results(message_id)
510 }
511
512 pub fn message_has_scripting_tool_results(&self, message_id: MessageId) -> bool {
513 self.scripting_tool_use.message_has_tool_results(message_id)
514 }
515
516 pub fn insert_user_message(
517 &mut self,
518 text: impl Into<String>,
519 context: Vec<ContextSnapshot>,
520 cx: &mut Context<Self>,
521 ) -> MessageId {
522 if mem::take(&mut self.checkpoint_on_next_user_message) {
523 self.checkpoint(cx);
524 }
525
526 let message_id =
527 self.insert_message(Role::User, vec![MessageSegment::Text(text.into())], cx);
528 let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
529 self.context
530 .extend(context.into_iter().map(|context| (context.id, context)));
531 self.context_by_message.insert(message_id, context_ids);
532 message_id
533 }
534
535 pub fn insert_message(
536 &mut self,
537 role: Role,
538 segments: Vec<MessageSegment>,
539 cx: &mut Context<Self>,
540 ) -> MessageId {
541 let id = self.next_message_id.post_inc();
542 self.messages.push(Message { id, role, segments });
543 self.touch_updated_at();
544 cx.emit(ThreadEvent::MessageAdded(id));
545 id
546 }
547
548 pub fn edit_message(
549 &mut self,
550 id: MessageId,
551 new_role: Role,
552 new_segments: Vec<MessageSegment>,
553 cx: &mut Context<Self>,
554 ) -> bool {
555 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
556 return false;
557 };
558 message.role = new_role;
559 message.segments = new_segments;
560 self.touch_updated_at();
561 cx.emit(ThreadEvent::MessageEdited(id));
562 true
563 }
564
565 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
566 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
567 return false;
568 };
569 self.messages.remove(index);
570 self.context_by_message.remove(&id);
571 self.touch_updated_at();
572 cx.emit(ThreadEvent::MessageDeleted(id));
573 true
574 }
575
576 /// Returns the representation of this [`Thread`] in a textual form.
577 ///
578 /// This is the representation we use when attaching a thread as context to another thread.
579 pub fn text(&self) -> String {
580 let mut text = String::new();
581
582 for message in &self.messages {
583 text.push_str(match message.role {
584 language_model::Role::User => "User:",
585 language_model::Role::Assistant => "Assistant:",
586 language_model::Role::System => "System:",
587 });
588 text.push('\n');
589
590 for segment in &message.segments {
591 match segment {
592 MessageSegment::Text(content) => text.push_str(content),
593 MessageSegment::Thinking(content) => {
594 text.push_str(&format!("<think>{}</think>", content))
595 }
596 }
597 }
598 text.push('\n');
599 }
600
601 text
602 }
603
604 /// Serializes this thread into a format for storage or telemetry.
605 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
606 let initial_project_snapshot = self.initial_project_snapshot.clone();
607 cx.spawn(async move |this, cx| {
608 let initial_project_snapshot = initial_project_snapshot.await;
609 this.read_with(cx, |this, cx| SerializedThread {
610 version: SerializedThread::VERSION.to_string(),
611 summary: this.summary_or_default(),
612 updated_at: this.updated_at(),
613 messages: this
614 .messages()
615 .map(|message| SerializedMessage {
616 id: message.id,
617 role: message.role,
618 segments: message
619 .segments
620 .iter()
621 .map(|segment| match segment {
622 MessageSegment::Text(text) => {
623 SerializedMessageSegment::Text { text: text.clone() }
624 }
625 MessageSegment::Thinking(text) => {
626 SerializedMessageSegment::Thinking { text: text.clone() }
627 }
628 })
629 .collect(),
630 tool_uses: this
631 .tool_uses_for_message(message.id, cx)
632 .into_iter()
633 .chain(this.scripting_tool_uses_for_message(message.id, cx))
634 .map(|tool_use| SerializedToolUse {
635 id: tool_use.id,
636 name: tool_use.name,
637 input: tool_use.input,
638 })
639 .collect(),
640 tool_results: this
641 .tool_results_for_message(message.id)
642 .into_iter()
643 .chain(this.scripting_tool_results_for_message(message.id))
644 .map(|tool_result| SerializedToolResult {
645 tool_use_id: tool_result.tool_use_id.clone(),
646 is_error: tool_result.is_error,
647 content: tool_result.content.clone(),
648 })
649 .collect(),
650 })
651 .collect(),
652 initial_project_snapshot,
653 })
654 })
655 }
656
657 pub fn set_system_prompt_context(&mut self, context: AssistantSystemPromptContext) {
658 self.system_prompt_context = Some(context);
659 }
660
661 pub fn system_prompt_context(&self) -> &Option<AssistantSystemPromptContext> {
662 &self.system_prompt_context
663 }
664
665 pub fn load_system_prompt_context(
666 &self,
667 cx: &App,
668 ) -> Task<(AssistantSystemPromptContext, Option<ThreadError>)> {
669 let project = self.project.read(cx);
670 let tasks = project
671 .visible_worktrees(cx)
672 .map(|worktree| {
673 Self::load_worktree_info_for_system_prompt(
674 project.fs().clone(),
675 worktree.read(cx),
676 cx,
677 )
678 })
679 .collect::<Vec<_>>();
680
681 cx.spawn(async |_cx| {
682 let results = futures::future::join_all(tasks).await;
683 let mut first_err = None;
684 let worktrees = results
685 .into_iter()
686 .map(|(worktree, err)| {
687 if first_err.is_none() && err.is_some() {
688 first_err = err;
689 }
690 worktree
691 })
692 .collect::<Vec<_>>();
693 (AssistantSystemPromptContext::new(worktrees), first_err)
694 })
695 }
696
697 fn load_worktree_info_for_system_prompt(
698 fs: Arc<dyn Fs>,
699 worktree: &Worktree,
700 cx: &App,
701 ) -> Task<(WorktreeInfoForSystemPrompt, Option<ThreadError>)> {
702 let root_name = worktree.root_name().into();
703 let abs_path = worktree.abs_path();
704
705 // Note that Cline supports `.clinerules` being a directory, but that is not currently
706 // supported. This doesn't seem to occur often in GitHub repositories.
707 const RULES_FILE_NAMES: [&'static str; 5] = [
708 ".rules",
709 ".cursorrules",
710 ".windsurfrules",
711 ".clinerules",
712 "CLAUDE.md",
713 ];
714 let selected_rules_file = RULES_FILE_NAMES
715 .into_iter()
716 .filter_map(|name| {
717 worktree
718 .entry_for_path(name)
719 .filter(|entry| entry.is_file())
720 .map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
721 })
722 .next();
723
724 if let Some((rel_rules_path, abs_rules_path)) = selected_rules_file {
725 cx.spawn(async move |_| {
726 let rules_file_result = maybe!(async move {
727 let abs_rules_path = abs_rules_path?;
728 let text = fs.load(&abs_rules_path).await.with_context(|| {
729 format!("Failed to load assistant rules file {:?}", abs_rules_path)
730 })?;
731 anyhow::Ok(RulesFile {
732 rel_path: rel_rules_path,
733 abs_path: abs_rules_path.into(),
734 text: text.trim().to_string(),
735 })
736 })
737 .await;
738 let (rules_file, rules_file_error) = match rules_file_result {
739 Ok(rules_file) => (Some(rules_file), None),
740 Err(err) => (
741 None,
742 Some(ThreadError::Message {
743 header: "Error loading rules file".into(),
744 message: format!("{err}").into(),
745 }),
746 ),
747 };
748 let worktree_info = WorktreeInfoForSystemPrompt {
749 root_name,
750 abs_path,
751 rules_file,
752 };
753 (worktree_info, rules_file_error)
754 })
755 } else {
756 Task::ready((
757 WorktreeInfoForSystemPrompt {
758 root_name,
759 abs_path,
760 rules_file: None,
761 },
762 None,
763 ))
764 }
765 }
766
767 pub fn send_to_model(
768 &mut self,
769 model: Arc<dyn LanguageModel>,
770 request_kind: RequestKind,
771 cx: &mut Context<Self>,
772 ) {
773 let mut request = self.to_completion_request(request_kind, cx);
774 request.tools = {
775 let mut tools = Vec::new();
776
777 if self.tools.is_scripting_tool_enabled() {
778 tools.push(LanguageModelRequestTool {
779 name: ScriptingTool::NAME.into(),
780 description: ScriptingTool::DESCRIPTION.into(),
781 input_schema: ScriptingTool::input_schema(),
782 });
783 }
784
785 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
786 LanguageModelRequestTool {
787 name: tool.name(),
788 description: tool.description(),
789 input_schema: tool.input_schema(),
790 }
791 }));
792
793 tools
794 };
795
796 self.stream_completion(request, model, cx);
797 }
798
799 pub fn to_completion_request(
800 &self,
801 request_kind: RequestKind,
802 cx: &App,
803 ) -> LanguageModelRequest {
804 let mut request = LanguageModelRequest {
805 messages: vec![],
806 tools: Vec::new(),
807 stop: Vec::new(),
808 temperature: None,
809 };
810
811 if let Some(system_prompt_context) = self.system_prompt_context.as_ref() {
812 if let Some(system_prompt) = self
813 .prompt_builder
814 .generate_assistant_system_prompt(system_prompt_context)
815 .context("failed to generate assistant system prompt")
816 .log_err()
817 {
818 request.messages.push(LanguageModelRequestMessage {
819 role: Role::System,
820 content: vec![MessageContent::Text(system_prompt)],
821 cache: true,
822 });
823 }
824 } else {
825 log::error!("system_prompt_context not set.")
826 }
827
828 let mut referenced_context_ids = HashSet::default();
829
830 for message in &self.messages {
831 if let Some(context_ids) = self.context_by_message.get(&message.id) {
832 referenced_context_ids.extend(context_ids);
833 }
834
835 let mut request_message = LanguageModelRequestMessage {
836 role: message.role,
837 content: Vec::new(),
838 cache: false,
839 };
840
841 match request_kind {
842 RequestKind::Chat => {
843 self.tool_use
844 .attach_tool_results(message.id, &mut request_message);
845 self.scripting_tool_use
846 .attach_tool_results(message.id, &mut request_message);
847 }
848 RequestKind::Summarize => {
849 // We don't care about tool use during summarization.
850 }
851 }
852
853 if !message.segments.is_empty() {
854 request_message
855 .content
856 .push(MessageContent::Text(message.to_string()));
857 }
858
859 match request_kind {
860 RequestKind::Chat => {
861 self.tool_use
862 .attach_tool_uses(message.id, &mut request_message);
863 self.scripting_tool_use
864 .attach_tool_uses(message.id, &mut request_message);
865 }
866 RequestKind::Summarize => {
867 // We don't care about tool use during summarization.
868 }
869 };
870
871 request.messages.push(request_message);
872 }
873
874 if !referenced_context_ids.is_empty() {
875 let mut context_message = LanguageModelRequestMessage {
876 role: Role::User,
877 content: Vec::new(),
878 cache: false,
879 };
880
881 let referenced_context = referenced_context_ids
882 .into_iter()
883 .filter_map(|context_id| self.context.get(context_id))
884 .cloned();
885 attach_context_to_message(&mut context_message, referenced_context);
886
887 request.messages.push(context_message);
888 }
889
890 self.attach_stale_files(&mut request.messages, cx);
891
892 request
893 }
894
895 fn attach_stale_files(&self, messages: &mut Vec<LanguageModelRequestMessage>, cx: &App) {
896 const STALE_FILES_HEADER: &str = "These files changed since last read:";
897
898 let mut stale_message = String::new();
899
900 for stale_file in self.action_log.read(cx).stale_buffers(cx) {
901 let Some(file) = stale_file.read(cx).file() else {
902 continue;
903 };
904
905 if stale_message.is_empty() {
906 write!(&mut stale_message, "{}", STALE_FILES_HEADER).ok();
907 }
908
909 writeln!(&mut stale_message, "- {}", file.path().display()).ok();
910 }
911
912 if !stale_message.is_empty() {
913 let context_message = LanguageModelRequestMessage {
914 role: Role::User,
915 content: vec![stale_message.into()],
916 cache: false,
917 };
918
919 messages.push(context_message);
920 }
921 }
922
923 pub fn stream_completion(
924 &mut self,
925 request: LanguageModelRequest,
926 model: Arc<dyn LanguageModel>,
927 cx: &mut Context<Self>,
928 ) {
929 let pending_completion_id = post_inc(&mut self.completion_count);
930
931 let task = cx.spawn(async move |thread, cx| {
932 let stream = model.stream_completion(request, &cx);
933 let initial_token_usage =
934 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage.clone());
935 let stream_completion = async {
936 let mut events = stream.await?;
937 let mut stop_reason = StopReason::EndTurn;
938 let mut current_token_usage = TokenUsage::default();
939
940 while let Some(event) = events.next().await {
941 let event = event?;
942
943 thread.update(cx, |thread, cx| {
944 match event {
945 LanguageModelCompletionEvent::StartMessage { .. } => {
946 thread.insert_message(
947 Role::Assistant,
948 vec![MessageSegment::Text(String::new())],
949 cx,
950 );
951 }
952 LanguageModelCompletionEvent::Stop(reason) => {
953 stop_reason = reason;
954 }
955 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
956 thread.cumulative_token_usage =
957 thread.cumulative_token_usage.clone() + token_usage.clone()
958 - current_token_usage.clone();
959 current_token_usage = token_usage;
960 }
961 LanguageModelCompletionEvent::Text(chunk) => {
962 if let Some(last_message) = thread.messages.last_mut() {
963 if last_message.role == Role::Assistant {
964 last_message.push_text(&chunk);
965 cx.emit(ThreadEvent::StreamedAssistantText(
966 last_message.id,
967 chunk,
968 ));
969 } else {
970 // If we won't have an Assistant message yet, assume this chunk marks the beginning
971 // of a new Assistant response.
972 //
973 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
974 // will result in duplicating the text of the chunk in the rendered Markdown.
975 thread.insert_message(
976 Role::Assistant,
977 vec![MessageSegment::Text(chunk.to_string())],
978 cx,
979 );
980 };
981 }
982 }
983 LanguageModelCompletionEvent::Thinking(chunk) => {
984 if let Some(last_message) = thread.messages.last_mut() {
985 if last_message.role == Role::Assistant {
986 last_message.push_thinking(&chunk);
987 cx.emit(ThreadEvent::StreamedAssistantThinking(
988 last_message.id,
989 chunk,
990 ));
991 } else {
992 // If we won't have an Assistant message yet, assume this chunk marks the beginning
993 // of a new Assistant response.
994 //
995 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
996 // will result in duplicating the text of the chunk in the rendered Markdown.
997 thread.insert_message(
998 Role::Assistant,
999 vec![MessageSegment::Thinking(chunk.to_string())],
1000 cx,
1001 );
1002 };
1003 }
1004 }
1005 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1006 if let Some(last_assistant_message) = thread
1007 .messages
1008 .iter()
1009 .rfind(|message| message.role == Role::Assistant)
1010 {
1011 if tool_use.name.as_ref() == ScriptingTool::NAME {
1012 thread.scripting_tool_use.request_tool_use(
1013 last_assistant_message.id,
1014 tool_use,
1015 cx,
1016 );
1017 } else {
1018 thread.tool_use.request_tool_use(
1019 last_assistant_message.id,
1020 tool_use,
1021 cx,
1022 );
1023 }
1024 }
1025 }
1026 }
1027
1028 thread.touch_updated_at();
1029 cx.emit(ThreadEvent::StreamedCompletion);
1030 cx.notify();
1031 })?;
1032
1033 smol::future::yield_now().await;
1034 }
1035
1036 thread.update(cx, |thread, cx| {
1037 thread
1038 .pending_completions
1039 .retain(|completion| completion.id != pending_completion_id);
1040
1041 if thread.summary.is_none() && thread.messages.len() >= 2 {
1042 thread.summarize(cx);
1043 }
1044 })?;
1045
1046 anyhow::Ok(stop_reason)
1047 };
1048
1049 let result = stream_completion.await;
1050
1051 thread
1052 .update(cx, |thread, cx| {
1053 thread.checkpoint(cx);
1054 match result.as_ref() {
1055 Ok(stop_reason) => match stop_reason {
1056 StopReason::ToolUse => {
1057 cx.emit(ThreadEvent::UsePendingTools);
1058 }
1059 StopReason::EndTurn => {}
1060 StopReason::MaxTokens => {}
1061 },
1062 Err(error) => {
1063 if error.is::<PaymentRequiredError>() {
1064 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1065 } else if error.is::<MaxMonthlySpendReachedError>() {
1066 cx.emit(ThreadEvent::ShowError(
1067 ThreadError::MaxMonthlySpendReached,
1068 ));
1069 } else {
1070 let error_message = error
1071 .chain()
1072 .map(|err| err.to_string())
1073 .collect::<Vec<_>>()
1074 .join("\n");
1075 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1076 header: "Error interacting with language model".into(),
1077 message: SharedString::from(error_message.clone()),
1078 }));
1079 }
1080
1081 thread.cancel_last_completion(cx);
1082 }
1083 }
1084 cx.emit(ThreadEvent::DoneStreaming);
1085
1086 if let Ok(initial_usage) = initial_token_usage {
1087 let usage = thread.cumulative_token_usage.clone() - initial_usage;
1088
1089 telemetry::event!(
1090 "Assistant Thread Completion",
1091 thread_id = thread.id().to_string(),
1092 model = model.telemetry_id(),
1093 model_provider = model.provider_id().to_string(),
1094 input_tokens = usage.input_tokens,
1095 output_tokens = usage.output_tokens,
1096 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1097 cache_read_input_tokens = usage.cache_read_input_tokens,
1098 );
1099 }
1100 })
1101 .ok();
1102 });
1103
1104 self.pending_completions.push(PendingCompletion {
1105 id: pending_completion_id,
1106 _task: task,
1107 });
1108 }
1109
1110 pub fn summarize(&mut self, cx: &mut Context<Self>) {
1111 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
1112 return;
1113 };
1114 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
1115 return;
1116 };
1117
1118 if !provider.is_authenticated(cx) {
1119 return;
1120 }
1121
1122 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1123 request.messages.push(LanguageModelRequestMessage {
1124 role: Role::User,
1125 content: vec![
1126 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
1127 .into(),
1128 ],
1129 cache: false,
1130 });
1131
1132 self.pending_summary = cx.spawn(async move |this, cx| {
1133 async move {
1134 let stream = model.stream_completion_text(request, &cx);
1135 let mut messages = stream.await?;
1136
1137 let mut new_summary = String::new();
1138 while let Some(message) = messages.stream.next().await {
1139 let text = message?;
1140 let mut lines = text.lines();
1141 new_summary.extend(lines.next());
1142
1143 // Stop if the LLM generated multiple lines.
1144 if lines.next().is_some() {
1145 break;
1146 }
1147 }
1148
1149 this.update(cx, |this, cx| {
1150 if !new_summary.is_empty() {
1151 this.summary = Some(new_summary.into());
1152 }
1153
1154 cx.emit(ThreadEvent::SummaryChanged);
1155 })?;
1156
1157 anyhow::Ok(())
1158 }
1159 .log_err()
1160 .await
1161 });
1162 }
1163
1164 pub fn use_pending_tools(
1165 &mut self,
1166 cx: &mut Context<Self>,
1167 ) -> impl IntoIterator<Item = PendingToolUse> {
1168 let request = self.to_completion_request(RequestKind::Chat, cx);
1169 let pending_tool_uses = self
1170 .tool_use
1171 .pending_tool_uses()
1172 .into_iter()
1173 .filter(|tool_use| tool_use.status.is_idle())
1174 .cloned()
1175 .collect::<Vec<_>>();
1176
1177 for tool_use in pending_tool_uses.iter() {
1178 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1179 let task = tool.run(
1180 tool_use.input.clone(),
1181 &request.messages,
1182 self.project.clone(),
1183 self.action_log.clone(),
1184 cx,
1185 );
1186
1187 self.insert_tool_output(
1188 tool_use.id.clone(),
1189 tool_use.ui_text.clone().into(),
1190 task,
1191 cx,
1192 );
1193 }
1194 }
1195
1196 let pending_scripting_tool_uses = self
1197 .scripting_tool_use
1198 .pending_tool_uses()
1199 .into_iter()
1200 .filter(|tool_use| tool_use.status.is_idle())
1201 .cloned()
1202 .collect::<Vec<_>>();
1203
1204 for scripting_tool_use in pending_scripting_tool_uses.iter() {
1205 let task = match ScriptingTool::deserialize_input(scripting_tool_use.input.clone()) {
1206 Err(err) => Task::ready(Err(err.into())),
1207 Ok(input) => {
1208 let (script_id, script_task) =
1209 self.scripting_session.update(cx, move |session, cx| {
1210 session.run_script(input.lua_script, cx)
1211 });
1212
1213 let session = self.scripting_session.clone();
1214 cx.spawn(async move |_, cx| {
1215 script_task.await;
1216
1217 let message = session.read_with(cx, |session, _cx| {
1218 // Using a id to get the script output seems impractical.
1219 // Why not just include it in the Task result?
1220 // This is because we'll later report the script state as it runs,
1221 session
1222 .get(script_id)
1223 .output_message_for_llm()
1224 .expect("Script shouldn't still be running")
1225 })?;
1226
1227 Ok(message)
1228 })
1229 }
1230 };
1231
1232 let ui_text: SharedString = scripting_tool_use.name.clone().into();
1233
1234 self.insert_scripting_tool_output(scripting_tool_use.id.clone(), ui_text, task, cx);
1235 }
1236
1237 pending_tool_uses
1238 .into_iter()
1239 .chain(pending_scripting_tool_uses)
1240 }
1241
1242 pub fn insert_tool_output(
1243 &mut self,
1244 tool_use_id: LanguageModelToolUseId,
1245 ui_text: SharedString,
1246 output: Task<Result<String>>,
1247 cx: &mut Context<Self>,
1248 ) {
1249 let insert_output_task = cx.spawn({
1250 let tool_use_id = tool_use_id.clone();
1251 async move |thread, cx| {
1252 let output = output.await;
1253 thread
1254 .update(cx, |thread, cx| {
1255 let pending_tool_use = thread
1256 .tool_use
1257 .insert_tool_output(tool_use_id.clone(), output);
1258
1259 cx.emit(ThreadEvent::ToolFinished {
1260 tool_use_id,
1261 pending_tool_use,
1262 canceled: false,
1263 });
1264 })
1265 .ok();
1266 }
1267 });
1268
1269 self.tool_use
1270 .run_pending_tool(tool_use_id, ui_text, insert_output_task);
1271 }
1272
1273 pub fn insert_scripting_tool_output(
1274 &mut self,
1275 tool_use_id: LanguageModelToolUseId,
1276 ui_text: SharedString,
1277 output: Task<Result<String>>,
1278 cx: &mut Context<Self>,
1279 ) {
1280 let insert_output_task = cx.spawn({
1281 let tool_use_id = tool_use_id.clone();
1282 async move |thread, cx| {
1283 let output = output.await;
1284 thread
1285 .update(cx, |thread, cx| {
1286 let pending_tool_use = thread
1287 .scripting_tool_use
1288 .insert_tool_output(tool_use_id.clone(), output);
1289
1290 cx.emit(ThreadEvent::ToolFinished {
1291 tool_use_id,
1292 pending_tool_use,
1293 canceled: false,
1294 });
1295 })
1296 .ok();
1297 }
1298 });
1299
1300 self.scripting_tool_use
1301 .run_pending_tool(tool_use_id, ui_text, insert_output_task);
1302 }
1303
1304 pub fn attach_tool_results(
1305 &mut self,
1306 updated_context: Vec<ContextSnapshot>,
1307 cx: &mut Context<Self>,
1308 ) {
1309 self.context.extend(
1310 updated_context
1311 .into_iter()
1312 .map(|context| (context.id, context)),
1313 );
1314
1315 // Insert a user message to contain the tool results.
1316 self.insert_user_message(
1317 // TODO: Sending up a user message without any content results in the model sending back
1318 // responses that also don't have any content. We currently don't handle this case well,
1319 // so for now we provide some text to keep the model on track.
1320 "Here are the tool results.",
1321 Vec::new(),
1322 cx,
1323 );
1324 }
1325
1326 /// Cancels the last pending completion, if there are any pending.
1327 ///
1328 /// Returns whether a completion was canceled.
1329 pub fn cancel_last_completion(&mut self, cx: &mut Context<Self>) -> bool {
1330 let canceled = if self.pending_completions.pop().is_some() {
1331 true
1332 } else {
1333 let mut canceled = false;
1334 for pending_tool_use in self.tool_use.cancel_pending() {
1335 canceled = true;
1336 cx.emit(ThreadEvent::ToolFinished {
1337 tool_use_id: pending_tool_use.id.clone(),
1338 pending_tool_use: Some(pending_tool_use),
1339 canceled: true,
1340 });
1341 }
1342 canceled
1343 };
1344 self.checkpoint(cx);
1345 canceled
1346 }
1347
1348 /// Returns the feedback given to the thread, if any.
1349 pub fn feedback(&self) -> Option<ThreadFeedback> {
1350 self.feedback
1351 }
1352
1353 /// Reports feedback about the thread and stores it in our telemetry backend.
1354 pub fn report_feedback(
1355 &mut self,
1356 feedback: ThreadFeedback,
1357 cx: &mut Context<Self>,
1358 ) -> Task<Result<()>> {
1359 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
1360 let serialized_thread = self.serialize(cx);
1361 let thread_id = self.id().clone();
1362 let client = self.project.read(cx).client();
1363 self.feedback = Some(feedback);
1364 cx.notify();
1365
1366 cx.background_spawn(async move {
1367 let final_project_snapshot = final_project_snapshot.await;
1368 let serialized_thread = serialized_thread.await?;
1369 let thread_data =
1370 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
1371
1372 let rating = match feedback {
1373 ThreadFeedback::Positive => "positive",
1374 ThreadFeedback::Negative => "negative",
1375 };
1376 telemetry::event!(
1377 "Assistant Thread Rated",
1378 rating,
1379 thread_id,
1380 thread_data,
1381 final_project_snapshot
1382 );
1383 client.telemetry().flush_events();
1384
1385 Ok(())
1386 })
1387 }
1388
1389 /// Create a snapshot of the current project state including git information and unsaved buffers.
1390 fn project_snapshot(
1391 project: Entity<Project>,
1392 cx: &mut Context<Self>,
1393 ) -> Task<Arc<ProjectSnapshot>> {
1394 let git_store = project.read(cx).git_store().clone();
1395 let worktree_snapshots: Vec<_> = project
1396 .read(cx)
1397 .visible_worktrees(cx)
1398 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
1399 .collect();
1400
1401 cx.spawn(async move |_, cx| {
1402 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
1403
1404 let mut unsaved_buffers = Vec::new();
1405 cx.update(|app_cx| {
1406 let buffer_store = project.read(app_cx).buffer_store();
1407 for buffer_handle in buffer_store.read(app_cx).buffers() {
1408 let buffer = buffer_handle.read(app_cx);
1409 if buffer.is_dirty() {
1410 if let Some(file) = buffer.file() {
1411 let path = file.path().to_string_lossy().to_string();
1412 unsaved_buffers.push(path);
1413 }
1414 }
1415 }
1416 })
1417 .ok();
1418
1419 Arc::new(ProjectSnapshot {
1420 worktree_snapshots,
1421 unsaved_buffer_paths: unsaved_buffers,
1422 timestamp: Utc::now(),
1423 })
1424 })
1425 }
1426
1427 fn worktree_snapshot(
1428 worktree: Entity<project::Worktree>,
1429 git_store: Entity<GitStore>,
1430 cx: &App,
1431 ) -> Task<WorktreeSnapshot> {
1432 cx.spawn(async move |cx| {
1433 // Get worktree path and snapshot
1434 let worktree_info = cx.update(|app_cx| {
1435 let worktree = worktree.read(app_cx);
1436 let path = worktree.abs_path().to_string_lossy().to_string();
1437 let snapshot = worktree.snapshot();
1438 (path, snapshot)
1439 });
1440
1441 let Ok((worktree_path, snapshot)) = worktree_info else {
1442 return WorktreeSnapshot {
1443 worktree_path: String::new(),
1444 git_state: None,
1445 };
1446 };
1447
1448 let repo_info = git_store
1449 .update(cx, |git_store, cx| {
1450 git_store
1451 .repositories()
1452 .values()
1453 .find(|repo| repo.read(cx).worktree_id == snapshot.id())
1454 .and_then(|repo| {
1455 let repo = repo.read(cx);
1456 Some((repo.branch().cloned(), repo.local_repository()?))
1457 })
1458 })
1459 .ok()
1460 .flatten();
1461
1462 // Extract git information
1463 let git_state = match repo_info {
1464 None => None,
1465 Some((branch, repo)) => {
1466 let current_branch = branch.map(|branch| branch.name.to_string());
1467 let remote_url = repo.remote_url("origin");
1468 let head_sha = repo.head_sha();
1469
1470 // Get diff asynchronously
1471 let diff = repo
1472 .diff(git::repository::DiffType::HeadToWorktree, cx.clone())
1473 .await
1474 .ok();
1475
1476 Some(GitState {
1477 remote_url,
1478 head_sha,
1479 current_branch,
1480 diff,
1481 })
1482 }
1483 };
1484
1485 WorktreeSnapshot {
1486 worktree_path,
1487 git_state,
1488 }
1489 })
1490 }
1491
1492 pub fn to_markdown(&self, cx: &App) -> Result<String> {
1493 let mut markdown = Vec::new();
1494
1495 if let Some(summary) = self.summary() {
1496 writeln!(markdown, "# {summary}\n")?;
1497 };
1498
1499 for message in self.messages() {
1500 writeln!(
1501 markdown,
1502 "## {role}\n",
1503 role = match message.role {
1504 Role::User => "User",
1505 Role::Assistant => "Assistant",
1506 Role::System => "System",
1507 }
1508 )?;
1509 for segment in &message.segments {
1510 match segment {
1511 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
1512 MessageSegment::Thinking(text) => {
1513 writeln!(markdown, "<think>{}</think>\n", text)?
1514 }
1515 }
1516 }
1517
1518 for tool_use in self.tool_uses_for_message(message.id, cx) {
1519 writeln!(
1520 markdown,
1521 "**Use Tool: {} ({})**",
1522 tool_use.name, tool_use.id
1523 )?;
1524 writeln!(markdown, "```json")?;
1525 writeln!(
1526 markdown,
1527 "{}",
1528 serde_json::to_string_pretty(&tool_use.input)?
1529 )?;
1530 writeln!(markdown, "```")?;
1531 }
1532
1533 for tool_result in self.tool_results_for_message(message.id) {
1534 write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
1535 if tool_result.is_error {
1536 write!(markdown, " (Error)")?;
1537 }
1538
1539 writeln!(markdown, "**\n")?;
1540 writeln!(markdown, "{}", tool_result.content)?;
1541 }
1542 }
1543
1544 Ok(String::from_utf8_lossy(&markdown).to_string())
1545 }
1546
1547 pub fn action_log(&self) -> &Entity<ActionLog> {
1548 &self.action_log
1549 }
1550
1551 pub fn project(&self) -> &Entity<Project> {
1552 &self.project
1553 }
1554
1555 pub fn cumulative_token_usage(&self) -> TokenUsage {
1556 self.cumulative_token_usage.clone()
1557 }
1558}
1559
1560#[derive(Debug, Clone)]
1561pub enum ThreadError {
1562 PaymentRequired,
1563 MaxMonthlySpendReached,
1564 Message {
1565 header: SharedString,
1566 message: SharedString,
1567 },
1568}
1569
1570#[derive(Debug, Clone)]
1571pub enum ThreadEvent {
1572 ShowError(ThreadError),
1573 StreamedCompletion,
1574 StreamedAssistantText(MessageId, String),
1575 StreamedAssistantThinking(MessageId, String),
1576 DoneStreaming,
1577 MessageAdded(MessageId),
1578 MessageEdited(MessageId),
1579 MessageDeleted(MessageId),
1580 SummaryChanged,
1581 UsePendingTools,
1582 ToolFinished {
1583 #[allow(unused)]
1584 tool_use_id: LanguageModelToolUseId,
1585 /// The pending tool use that corresponds to this tool.
1586 pending_tool_use: Option<PendingToolUse>,
1587 /// Whether the tool was canceled by the user.
1588 canceled: bool,
1589 },
1590 CheckpointChanged,
1591}
1592
1593impl EventEmitter<ThreadEvent> for Thread {}
1594
1595struct PendingCompletion {
1596 id: usize,
1597 _task: Task<()>,
1598}