1use std::fmt::Write as _;
2use std::io::Write;
3use std::sync::Arc;
4
5use anyhow::{Context as _, Result};
6use assistant_tool::{ActionLog, ToolWorkingSet};
7use chrono::{DateTime, Utc};
8use collections::{BTreeMap, HashMap, HashSet};
9use fs::Fs;
10use futures::future::Shared;
11use futures::{FutureExt, StreamExt as _};
12use git;
13use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task};
14use language_model::{
15 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
16 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
17 LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
18 Role, StopReason, TokenUsage,
19};
20use project::git_store::{GitStore, GitStoreCheckpoint};
21use project::{Project, Worktree};
22use prompt_store::{
23 AssistantSystemPromptContext, PromptBuilder, RulesFile, WorktreeInfoForSystemPrompt,
24};
25use scripting_tool::{ScriptingSession, ScriptingTool};
26use serde::{Deserialize, Serialize};
27use util::{maybe, post_inc, ResultExt as _, TryFutureExt as _};
28use uuid::Uuid;
29
30use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
31use crate::thread_store::{
32 SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
33 SerializedToolUse,
34};
35use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState};
36
37#[derive(Debug, Clone, Copy)]
38pub enum RequestKind {
39 Chat,
40 /// Used when summarizing a thread.
41 Summarize,
42}
43
44#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
45pub struct ThreadId(Arc<str>);
46
47impl ThreadId {
48 pub fn new() -> Self {
49 Self(Uuid::new_v4().to_string().into())
50 }
51}
52
53impl std::fmt::Display for ThreadId {
54 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
55 write!(f, "{}", self.0)
56 }
57}
58
59#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
60pub struct MessageId(pub(crate) usize);
61
62impl MessageId {
63 fn post_inc(&mut self) -> Self {
64 Self(post_inc(&mut self.0))
65 }
66}
67
68/// A message in a [`Thread`].
69#[derive(Debug, Clone)]
70pub struct Message {
71 pub id: MessageId,
72 pub role: Role,
73 pub segments: Vec<MessageSegment>,
74}
75
76impl Message {
77 pub fn push_thinking(&mut self, text: &str) {
78 if let Some(MessageSegment::Thinking(segment)) = self.segments.last_mut() {
79 segment.push_str(text);
80 } else {
81 self.segments
82 .push(MessageSegment::Thinking(text.to_string()));
83 }
84 }
85
86 pub fn push_text(&mut self, text: &str) {
87 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
88 segment.push_str(text);
89 } else {
90 self.segments.push(MessageSegment::Text(text.to_string()));
91 }
92 }
93
94 pub fn to_string(&self) -> String {
95 let mut result = String::new();
96 for segment in &self.segments {
97 match segment {
98 MessageSegment::Text(text) => result.push_str(text),
99 MessageSegment::Thinking(text) => {
100 result.push_str("<think>");
101 result.push_str(text);
102 result.push_str("</think>");
103 }
104 }
105 }
106 result
107 }
108}
109
110#[derive(Debug, Clone)]
111pub enum MessageSegment {
112 Text(String),
113 Thinking(String),
114}
115
116#[derive(Debug, Clone, Serialize, Deserialize)]
117pub struct ProjectSnapshot {
118 pub worktree_snapshots: Vec<WorktreeSnapshot>,
119 pub unsaved_buffer_paths: Vec<String>,
120 pub timestamp: DateTime<Utc>,
121}
122
123#[derive(Debug, Clone, Serialize, Deserialize)]
124pub struct WorktreeSnapshot {
125 pub worktree_path: String,
126 pub git_state: Option<GitState>,
127}
128
129#[derive(Debug, Clone, Serialize, Deserialize)]
130pub struct GitState {
131 pub remote_url: Option<String>,
132 pub head_sha: Option<String>,
133 pub current_branch: Option<String>,
134 pub diff: Option<String>,
135}
136
137#[derive(Clone)]
138pub struct ThreadCheckpoint {
139 message_id: MessageId,
140 git_checkpoint: GitStoreCheckpoint,
141}
142
143#[derive(Copy, Clone, Debug)]
144pub enum ThreadFeedback {
145 Positive,
146 Negative,
147}
148
149pub enum LastRestoreCheckpoint {
150 Pending {
151 message_id: MessageId,
152 },
153 Error {
154 message_id: MessageId,
155 error: String,
156 },
157}
158
159impl LastRestoreCheckpoint {
160 pub fn message_id(&self) -> MessageId {
161 match self {
162 LastRestoreCheckpoint::Pending { message_id } => *message_id,
163 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
164 }
165 }
166}
167
168/// A thread of conversation with the LLM.
169pub struct Thread {
170 id: ThreadId,
171 updated_at: DateTime<Utc>,
172 summary: Option<SharedString>,
173 pending_summary: Task<Option<()>>,
174 messages: Vec<Message>,
175 next_message_id: MessageId,
176 context: BTreeMap<ContextId, ContextSnapshot>,
177 context_by_message: HashMap<MessageId, Vec<ContextId>>,
178 system_prompt_context: Option<AssistantSystemPromptContext>,
179 checkpoints_by_message: HashMap<MessageId, ThreadCheckpoint>,
180 completion_count: usize,
181 pending_completions: Vec<PendingCompletion>,
182 project: Entity<Project>,
183 prompt_builder: Arc<PromptBuilder>,
184 tools: Arc<ToolWorkingSet>,
185 tool_use: ToolUseState,
186 action_log: Entity<ActionLog>,
187 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
188 pending_checkpoint: Option<ThreadCheckpoint>,
189 scripting_session: Entity<ScriptingSession>,
190 scripting_tool_use: ToolUseState,
191 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
192 cumulative_token_usage: TokenUsage,
193 feedback: Option<ThreadFeedback>,
194}
195
196impl Thread {
197 pub fn new(
198 project: Entity<Project>,
199 tools: Arc<ToolWorkingSet>,
200 prompt_builder: Arc<PromptBuilder>,
201 cx: &mut Context<Self>,
202 ) -> Self {
203 Self {
204 id: ThreadId::new(),
205 updated_at: Utc::now(),
206 summary: None,
207 pending_summary: Task::ready(None),
208 messages: Vec::new(),
209 next_message_id: MessageId(0),
210 context: BTreeMap::default(),
211 context_by_message: HashMap::default(),
212 system_prompt_context: None,
213 checkpoints_by_message: HashMap::default(),
214 completion_count: 0,
215 pending_completions: Vec::new(),
216 project: project.clone(),
217 prompt_builder,
218 tools: tools.clone(),
219 last_restore_checkpoint: None,
220 pending_checkpoint: None,
221 tool_use: ToolUseState::new(tools.clone()),
222 scripting_session: cx.new(|cx| ScriptingSession::new(project.clone(), cx)),
223 scripting_tool_use: ToolUseState::new(tools),
224 action_log: cx.new(|_| ActionLog::new()),
225 initial_project_snapshot: {
226 let project_snapshot = Self::project_snapshot(project, cx);
227 cx.foreground_executor()
228 .spawn(async move { Some(project_snapshot.await) })
229 .shared()
230 },
231 cumulative_token_usage: TokenUsage::default(),
232 feedback: None,
233 }
234 }
235
236 pub fn deserialize(
237 id: ThreadId,
238 serialized: SerializedThread,
239 project: Entity<Project>,
240 tools: Arc<ToolWorkingSet>,
241 prompt_builder: Arc<PromptBuilder>,
242 cx: &mut Context<Self>,
243 ) -> Self {
244 let next_message_id = MessageId(
245 serialized
246 .messages
247 .last()
248 .map(|message| message.id.0 + 1)
249 .unwrap_or(0),
250 );
251 let tool_use =
252 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |name| {
253 name != ScriptingTool::NAME
254 });
255 let scripting_tool_use =
256 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |name| {
257 name == ScriptingTool::NAME
258 });
259 let scripting_session = cx.new(|cx| ScriptingSession::new(project.clone(), cx));
260
261 Self {
262 id,
263 updated_at: serialized.updated_at,
264 summary: Some(serialized.summary),
265 pending_summary: Task::ready(None),
266 messages: serialized
267 .messages
268 .into_iter()
269 .map(|message| Message {
270 id: message.id,
271 role: message.role,
272 segments: message
273 .segments
274 .into_iter()
275 .map(|segment| match segment {
276 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
277 SerializedMessageSegment::Thinking { text } => {
278 MessageSegment::Thinking(text)
279 }
280 })
281 .collect(),
282 })
283 .collect(),
284 next_message_id,
285 context: BTreeMap::default(),
286 context_by_message: HashMap::default(),
287 system_prompt_context: None,
288 checkpoints_by_message: HashMap::default(),
289 completion_count: 0,
290 pending_completions: Vec::new(),
291 last_restore_checkpoint: None,
292 pending_checkpoint: None,
293 project,
294 prompt_builder,
295 tools,
296 tool_use,
297 action_log: cx.new(|_| ActionLog::new()),
298 scripting_session,
299 scripting_tool_use,
300 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
301 // TODO: persist token usage?
302 cumulative_token_usage: TokenUsage::default(),
303 feedback: None,
304 }
305 }
306
307 pub fn id(&self) -> &ThreadId {
308 &self.id
309 }
310
311 pub fn is_empty(&self) -> bool {
312 self.messages.is_empty()
313 }
314
315 pub fn updated_at(&self) -> DateTime<Utc> {
316 self.updated_at
317 }
318
319 pub fn touch_updated_at(&mut self) {
320 self.updated_at = Utc::now();
321 }
322
323 pub fn summary(&self) -> Option<SharedString> {
324 self.summary.clone()
325 }
326
327 pub fn summary_or_default(&self) -> SharedString {
328 const DEFAULT: SharedString = SharedString::new_static("New Thread");
329 self.summary.clone().unwrap_or(DEFAULT)
330 }
331
332 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
333 self.summary = Some(summary.into());
334 cx.emit(ThreadEvent::SummaryChanged);
335 }
336
337 pub fn message(&self, id: MessageId) -> Option<&Message> {
338 self.messages.iter().find(|message| message.id == id)
339 }
340
341 pub fn messages(&self) -> impl Iterator<Item = &Message> {
342 self.messages.iter()
343 }
344
345 pub fn is_generating(&self) -> bool {
346 !self.pending_completions.is_empty() || !self.all_tools_finished()
347 }
348
349 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
350 &self.tools
351 }
352
353 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
354 self.checkpoints_by_message.get(&id).cloned()
355 }
356
357 pub fn restore_checkpoint(
358 &mut self,
359 checkpoint: ThreadCheckpoint,
360 cx: &mut Context<Self>,
361 ) -> Task<Result<()>> {
362 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
363 message_id: checkpoint.message_id,
364 });
365 cx.emit(ThreadEvent::CheckpointChanged);
366 cx.notify();
367
368 let project = self.project.read(cx);
369 let restore = project
370 .git_store()
371 .read(cx)
372 .restore_checkpoint(checkpoint.git_checkpoint.clone(), cx);
373 cx.spawn(async move |this, cx| {
374 let result = restore.await;
375 this.update(cx, |this, cx| {
376 if let Err(err) = result.as_ref() {
377 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
378 message_id: checkpoint.message_id,
379 error: err.to_string(),
380 });
381 } else {
382 this.truncate(checkpoint.message_id, cx);
383 this.last_restore_checkpoint = None;
384 }
385 this.pending_checkpoint = None;
386 cx.emit(ThreadEvent::CheckpointChanged);
387 cx.notify();
388 })?;
389 result
390 })
391 }
392
393 fn finalize_pending_checkpoint(&mut self, cx: &mut Context<Self>) {
394 let pending_checkpoint = if self.is_generating() {
395 return;
396 } else if let Some(checkpoint) = self.pending_checkpoint.take() {
397 checkpoint
398 } else {
399 return;
400 };
401
402 let git_store = self.project.read(cx).git_store().clone();
403 let final_checkpoint = git_store.read(cx).checkpoint(cx);
404 cx.spawn(async move |this, cx| match final_checkpoint.await {
405 Ok(final_checkpoint) => {
406 let equal = git_store
407 .read_with(cx, |store, cx| {
408 store.compare_checkpoints(
409 pending_checkpoint.git_checkpoint.clone(),
410 final_checkpoint.clone(),
411 cx,
412 )
413 })?
414 .await
415 .unwrap_or(false);
416
417 if equal {
418 git_store
419 .read_with(cx, |store, cx| {
420 store.delete_checkpoint(pending_checkpoint.git_checkpoint, cx)
421 })?
422 .detach();
423 } else {
424 this.update(cx, |this, cx| {
425 this.insert_checkpoint(pending_checkpoint, cx)
426 })?;
427 }
428
429 git_store
430 .read_with(cx, |store, cx| {
431 store.delete_checkpoint(final_checkpoint, cx)
432 })?
433 .detach();
434
435 Ok(())
436 }
437 Err(_) => this.update(cx, |this, cx| {
438 this.insert_checkpoint(pending_checkpoint, cx)
439 }),
440 })
441 .detach();
442 }
443
444 fn insert_checkpoint(&mut self, checkpoint: ThreadCheckpoint, cx: &mut Context<Self>) {
445 self.checkpoints_by_message
446 .insert(checkpoint.message_id, checkpoint);
447 cx.emit(ThreadEvent::CheckpointChanged);
448 cx.notify();
449 }
450
451 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
452 self.last_restore_checkpoint.as_ref()
453 }
454
455 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
456 let Some(message_ix) = self
457 .messages
458 .iter()
459 .rposition(|message| message.id == message_id)
460 else {
461 return;
462 };
463 for deleted_message in self.messages.drain(message_ix..) {
464 self.context_by_message.remove(&deleted_message.id);
465 self.checkpoints_by_message.remove(&deleted_message.id);
466 }
467 cx.notify();
468 }
469
470 pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
471 let context = self.context_by_message.get(&id)?;
472 Some(
473 context
474 .into_iter()
475 .filter_map(|context_id| self.context.get(&context_id))
476 .cloned()
477 .collect::<Vec<_>>(),
478 )
479 }
480
481 /// Returns whether all of the tool uses have finished running.
482 pub fn all_tools_finished(&self) -> bool {
483 let mut all_pending_tool_uses = self
484 .tool_use
485 .pending_tool_uses()
486 .into_iter()
487 .chain(self.scripting_tool_use.pending_tool_uses());
488
489 // If the only pending tool uses left are the ones with errors, then
490 // that means that we've finished running all of the pending tools.
491 all_pending_tool_uses.all(|tool_use| tool_use.status.is_error())
492 }
493
494 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
495 self.tool_use.tool_uses_for_message(id, cx)
496 }
497
498 pub fn scripting_tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
499 self.scripting_tool_use.tool_uses_for_message(id, cx)
500 }
501
502 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
503 self.tool_use.tool_results_for_message(id)
504 }
505
506 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
507 self.tool_use.tool_result(id)
508 }
509
510 pub fn scripting_tool_results_for_message(
511 &self,
512 id: MessageId,
513 ) -> Vec<&LanguageModelToolResult> {
514 self.scripting_tool_use.tool_results_for_message(id)
515 }
516
517 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
518 self.tool_use.message_has_tool_results(message_id)
519 }
520
521 pub fn message_has_scripting_tool_results(&self, message_id: MessageId) -> bool {
522 self.scripting_tool_use.message_has_tool_results(message_id)
523 }
524
525 pub fn insert_user_message(
526 &mut self,
527 text: impl Into<String>,
528 context: Vec<ContextSnapshot>,
529 git_checkpoint: Option<GitStoreCheckpoint>,
530 cx: &mut Context<Self>,
531 ) -> MessageId {
532 let message_id =
533 self.insert_message(Role::User, vec![MessageSegment::Text(text.into())], cx);
534 let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
535 self.context
536 .extend(context.into_iter().map(|context| (context.id, context)));
537 self.context_by_message.insert(message_id, context_ids);
538 if let Some(git_checkpoint) = git_checkpoint {
539 self.pending_checkpoint = Some(ThreadCheckpoint {
540 message_id,
541 git_checkpoint,
542 });
543 }
544 message_id
545 }
546
547 pub fn insert_message(
548 &mut self,
549 role: Role,
550 segments: Vec<MessageSegment>,
551 cx: &mut Context<Self>,
552 ) -> MessageId {
553 let id = self.next_message_id.post_inc();
554 self.messages.push(Message { id, role, segments });
555 self.touch_updated_at();
556 cx.emit(ThreadEvent::MessageAdded(id));
557 id
558 }
559
560 pub fn edit_message(
561 &mut self,
562 id: MessageId,
563 new_role: Role,
564 new_segments: Vec<MessageSegment>,
565 cx: &mut Context<Self>,
566 ) -> bool {
567 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
568 return false;
569 };
570 message.role = new_role;
571 message.segments = new_segments;
572 self.touch_updated_at();
573 cx.emit(ThreadEvent::MessageEdited(id));
574 true
575 }
576
577 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
578 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
579 return false;
580 };
581 self.messages.remove(index);
582 self.context_by_message.remove(&id);
583 self.touch_updated_at();
584 cx.emit(ThreadEvent::MessageDeleted(id));
585 true
586 }
587
588 /// Returns the representation of this [`Thread`] in a textual form.
589 ///
590 /// This is the representation we use when attaching a thread as context to another thread.
591 pub fn text(&self) -> String {
592 let mut text = String::new();
593
594 for message in &self.messages {
595 text.push_str(match message.role {
596 language_model::Role::User => "User:",
597 language_model::Role::Assistant => "Assistant:",
598 language_model::Role::System => "System:",
599 });
600 text.push('\n');
601
602 for segment in &message.segments {
603 match segment {
604 MessageSegment::Text(content) => text.push_str(content),
605 MessageSegment::Thinking(content) => {
606 text.push_str(&format!("<think>{}</think>", content))
607 }
608 }
609 }
610 text.push('\n');
611 }
612
613 text
614 }
615
616 /// Serializes this thread into a format for storage or telemetry.
617 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
618 let initial_project_snapshot = self.initial_project_snapshot.clone();
619 cx.spawn(async move |this, cx| {
620 let initial_project_snapshot = initial_project_snapshot.await;
621 this.read_with(cx, |this, cx| SerializedThread {
622 version: SerializedThread::VERSION.to_string(),
623 summary: this.summary_or_default(),
624 updated_at: this.updated_at(),
625 messages: this
626 .messages()
627 .map(|message| SerializedMessage {
628 id: message.id,
629 role: message.role,
630 segments: message
631 .segments
632 .iter()
633 .map(|segment| match segment {
634 MessageSegment::Text(text) => {
635 SerializedMessageSegment::Text { text: text.clone() }
636 }
637 MessageSegment::Thinking(text) => {
638 SerializedMessageSegment::Thinking { text: text.clone() }
639 }
640 })
641 .collect(),
642 tool_uses: this
643 .tool_uses_for_message(message.id, cx)
644 .into_iter()
645 .chain(this.scripting_tool_uses_for_message(message.id, cx))
646 .map(|tool_use| SerializedToolUse {
647 id: tool_use.id,
648 name: tool_use.name,
649 input: tool_use.input,
650 })
651 .collect(),
652 tool_results: this
653 .tool_results_for_message(message.id)
654 .into_iter()
655 .chain(this.scripting_tool_results_for_message(message.id))
656 .map(|tool_result| SerializedToolResult {
657 tool_use_id: tool_result.tool_use_id.clone(),
658 is_error: tool_result.is_error,
659 content: tool_result.content.clone(),
660 })
661 .collect(),
662 })
663 .collect(),
664 initial_project_snapshot,
665 })
666 })
667 }
668
669 pub fn set_system_prompt_context(&mut self, context: AssistantSystemPromptContext) {
670 self.system_prompt_context = Some(context);
671 }
672
673 pub fn system_prompt_context(&self) -> &Option<AssistantSystemPromptContext> {
674 &self.system_prompt_context
675 }
676
677 pub fn load_system_prompt_context(
678 &self,
679 cx: &App,
680 ) -> Task<(AssistantSystemPromptContext, Option<ThreadError>)> {
681 let project = self.project.read(cx);
682 let tasks = project
683 .visible_worktrees(cx)
684 .map(|worktree| {
685 Self::load_worktree_info_for_system_prompt(
686 project.fs().clone(),
687 worktree.read(cx),
688 cx,
689 )
690 })
691 .collect::<Vec<_>>();
692
693 cx.spawn(async |_cx| {
694 let results = futures::future::join_all(tasks).await;
695 let mut first_err = None;
696 let worktrees = results
697 .into_iter()
698 .map(|(worktree, err)| {
699 if first_err.is_none() && err.is_some() {
700 first_err = err;
701 }
702 worktree
703 })
704 .collect::<Vec<_>>();
705 (AssistantSystemPromptContext::new(worktrees), first_err)
706 })
707 }
708
709 fn load_worktree_info_for_system_prompt(
710 fs: Arc<dyn Fs>,
711 worktree: &Worktree,
712 cx: &App,
713 ) -> Task<(WorktreeInfoForSystemPrompt, Option<ThreadError>)> {
714 let root_name = worktree.root_name().into();
715 let abs_path = worktree.abs_path();
716
717 // Note that Cline supports `.clinerules` being a directory, but that is not currently
718 // supported. This doesn't seem to occur often in GitHub repositories.
719 const RULES_FILE_NAMES: [&'static str; 5] = [
720 ".rules",
721 ".cursorrules",
722 ".windsurfrules",
723 ".clinerules",
724 "CLAUDE.md",
725 ];
726 let selected_rules_file = RULES_FILE_NAMES
727 .into_iter()
728 .filter_map(|name| {
729 worktree
730 .entry_for_path(name)
731 .filter(|entry| entry.is_file())
732 .map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
733 })
734 .next();
735
736 if let Some((rel_rules_path, abs_rules_path)) = selected_rules_file {
737 cx.spawn(async move |_| {
738 let rules_file_result = maybe!(async move {
739 let abs_rules_path = abs_rules_path?;
740 let text = fs.load(&abs_rules_path).await.with_context(|| {
741 format!("Failed to load assistant rules file {:?}", abs_rules_path)
742 })?;
743 anyhow::Ok(RulesFile {
744 rel_path: rel_rules_path,
745 abs_path: abs_rules_path.into(),
746 text: text.trim().to_string(),
747 })
748 })
749 .await;
750 let (rules_file, rules_file_error) = match rules_file_result {
751 Ok(rules_file) => (Some(rules_file), None),
752 Err(err) => (
753 None,
754 Some(ThreadError::Message {
755 header: "Error loading rules file".into(),
756 message: format!("{err}").into(),
757 }),
758 ),
759 };
760 let worktree_info = WorktreeInfoForSystemPrompt {
761 root_name,
762 abs_path,
763 rules_file,
764 };
765 (worktree_info, rules_file_error)
766 })
767 } else {
768 Task::ready((
769 WorktreeInfoForSystemPrompt {
770 root_name,
771 abs_path,
772 rules_file: None,
773 },
774 None,
775 ))
776 }
777 }
778
779 pub fn send_to_model(
780 &mut self,
781 model: Arc<dyn LanguageModel>,
782 request_kind: RequestKind,
783 cx: &mut Context<Self>,
784 ) {
785 let mut request = self.to_completion_request(request_kind, cx);
786 request.tools = {
787 let mut tools = Vec::new();
788
789 if self.tools.is_scripting_tool_enabled() {
790 tools.push(LanguageModelRequestTool {
791 name: ScriptingTool::NAME.into(),
792 description: ScriptingTool::DESCRIPTION.into(),
793 input_schema: ScriptingTool::input_schema(),
794 });
795 }
796
797 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
798 LanguageModelRequestTool {
799 name: tool.name(),
800 description: tool.description(),
801 input_schema: tool.input_schema(),
802 }
803 }));
804
805 tools
806 };
807
808 self.stream_completion(request, model, cx);
809 }
810
811 pub fn to_completion_request(
812 &self,
813 request_kind: RequestKind,
814 cx: &App,
815 ) -> LanguageModelRequest {
816 let mut request = LanguageModelRequest {
817 messages: vec![],
818 tools: Vec::new(),
819 stop: Vec::new(),
820 temperature: None,
821 };
822
823 if let Some(system_prompt_context) = self.system_prompt_context.as_ref() {
824 if let Some(system_prompt) = self
825 .prompt_builder
826 .generate_assistant_system_prompt(system_prompt_context)
827 .context("failed to generate assistant system prompt")
828 .log_err()
829 {
830 request.messages.push(LanguageModelRequestMessage {
831 role: Role::System,
832 content: vec![MessageContent::Text(system_prompt)],
833 cache: true,
834 });
835 }
836 } else {
837 log::error!("system_prompt_context not set.")
838 }
839
840 let mut referenced_context_ids = HashSet::default();
841
842 for message in &self.messages {
843 if let Some(context_ids) = self.context_by_message.get(&message.id) {
844 referenced_context_ids.extend(context_ids);
845 }
846
847 let mut request_message = LanguageModelRequestMessage {
848 role: message.role,
849 content: Vec::new(),
850 cache: false,
851 };
852
853 match request_kind {
854 RequestKind::Chat => {
855 self.tool_use
856 .attach_tool_results(message.id, &mut request_message);
857 self.scripting_tool_use
858 .attach_tool_results(message.id, &mut request_message);
859 }
860 RequestKind::Summarize => {
861 // We don't care about tool use during summarization.
862 }
863 }
864
865 if !message.segments.is_empty() {
866 request_message
867 .content
868 .push(MessageContent::Text(message.to_string()));
869 }
870
871 match request_kind {
872 RequestKind::Chat => {
873 self.tool_use
874 .attach_tool_uses(message.id, &mut request_message);
875 self.scripting_tool_use
876 .attach_tool_uses(message.id, &mut request_message);
877 }
878 RequestKind::Summarize => {
879 // We don't care about tool use during summarization.
880 }
881 };
882
883 request.messages.push(request_message);
884 }
885
886 if !referenced_context_ids.is_empty() {
887 let mut context_message = LanguageModelRequestMessage {
888 role: Role::User,
889 content: Vec::new(),
890 cache: false,
891 };
892
893 let referenced_context = referenced_context_ids
894 .into_iter()
895 .filter_map(|context_id| self.context.get(context_id))
896 .cloned();
897 attach_context_to_message(&mut context_message, referenced_context);
898
899 request.messages.push(context_message);
900 }
901
902 self.attach_stale_files(&mut request.messages, cx);
903
904 request
905 }
906
907 fn attach_stale_files(&self, messages: &mut Vec<LanguageModelRequestMessage>, cx: &App) {
908 const STALE_FILES_HEADER: &str = "These files changed since last read:";
909
910 let mut stale_message = String::new();
911
912 for stale_file in self.action_log.read(cx).stale_buffers(cx) {
913 let Some(file) = stale_file.read(cx).file() else {
914 continue;
915 };
916
917 if stale_message.is_empty() {
918 write!(&mut stale_message, "{}", STALE_FILES_HEADER).ok();
919 }
920
921 writeln!(&mut stale_message, "- {}", file.path().display()).ok();
922 }
923
924 if !stale_message.is_empty() {
925 let context_message = LanguageModelRequestMessage {
926 role: Role::User,
927 content: vec![stale_message.into()],
928 cache: false,
929 };
930
931 messages.push(context_message);
932 }
933 }
934
935 pub fn stream_completion(
936 &mut self,
937 request: LanguageModelRequest,
938 model: Arc<dyn LanguageModel>,
939 cx: &mut Context<Self>,
940 ) {
941 let pending_completion_id = post_inc(&mut self.completion_count);
942
943 let task = cx.spawn(async move |thread, cx| {
944 let stream = model.stream_completion(request, &cx);
945 let initial_token_usage =
946 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage.clone());
947 let stream_completion = async {
948 let mut events = stream.await?;
949 let mut stop_reason = StopReason::EndTurn;
950 let mut current_token_usage = TokenUsage::default();
951
952 while let Some(event) = events.next().await {
953 let event = event?;
954
955 thread.update(cx, |thread, cx| {
956 match event {
957 LanguageModelCompletionEvent::StartMessage { .. } => {
958 thread.insert_message(
959 Role::Assistant,
960 vec![MessageSegment::Text(String::new())],
961 cx,
962 );
963 }
964 LanguageModelCompletionEvent::Stop(reason) => {
965 stop_reason = reason;
966 }
967 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
968 thread.cumulative_token_usage =
969 thread.cumulative_token_usage.clone() + token_usage.clone()
970 - current_token_usage.clone();
971 current_token_usage = token_usage;
972 }
973 LanguageModelCompletionEvent::Text(chunk) => {
974 if let Some(last_message) = thread.messages.last_mut() {
975 if last_message.role == Role::Assistant {
976 last_message.push_text(&chunk);
977 cx.emit(ThreadEvent::StreamedAssistantText(
978 last_message.id,
979 chunk,
980 ));
981 } else {
982 // If we won't have an Assistant message yet, assume this chunk marks the beginning
983 // of a new Assistant response.
984 //
985 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
986 // will result in duplicating the text of the chunk in the rendered Markdown.
987 thread.insert_message(
988 Role::Assistant,
989 vec![MessageSegment::Text(chunk.to_string())],
990 cx,
991 );
992 };
993 }
994 }
995 LanguageModelCompletionEvent::Thinking(chunk) => {
996 if let Some(last_message) = thread.messages.last_mut() {
997 if last_message.role == Role::Assistant {
998 last_message.push_thinking(&chunk);
999 cx.emit(ThreadEvent::StreamedAssistantThinking(
1000 last_message.id,
1001 chunk,
1002 ));
1003 } else {
1004 // If we won't have an Assistant message yet, assume this chunk marks the beginning
1005 // of a new Assistant response.
1006 //
1007 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
1008 // will result in duplicating the text of the chunk in the rendered Markdown.
1009 thread.insert_message(
1010 Role::Assistant,
1011 vec![MessageSegment::Thinking(chunk.to_string())],
1012 cx,
1013 );
1014 };
1015 }
1016 }
1017 LanguageModelCompletionEvent::ToolUse(tool_use) => {
1018 if let Some(last_assistant_message) = thread
1019 .messages
1020 .iter()
1021 .rfind(|message| message.role == Role::Assistant)
1022 {
1023 if tool_use.name.as_ref() == ScriptingTool::NAME {
1024 thread.scripting_tool_use.request_tool_use(
1025 last_assistant_message.id,
1026 tool_use,
1027 cx,
1028 );
1029 } else {
1030 thread.tool_use.request_tool_use(
1031 last_assistant_message.id,
1032 tool_use,
1033 cx,
1034 );
1035 }
1036 }
1037 }
1038 }
1039
1040 thread.touch_updated_at();
1041 cx.emit(ThreadEvent::StreamedCompletion);
1042 cx.notify();
1043 })?;
1044
1045 smol::future::yield_now().await;
1046 }
1047
1048 thread.update(cx, |thread, cx| {
1049 thread
1050 .pending_completions
1051 .retain(|completion| completion.id != pending_completion_id);
1052
1053 if thread.summary.is_none() && thread.messages.len() >= 2 {
1054 thread.summarize(cx);
1055 }
1056 })?;
1057
1058 anyhow::Ok(stop_reason)
1059 };
1060
1061 let result = stream_completion.await;
1062
1063 thread
1064 .update(cx, |thread, cx| {
1065 thread.finalize_pending_checkpoint(cx);
1066 match result.as_ref() {
1067 Ok(stop_reason) => match stop_reason {
1068 StopReason::ToolUse => {
1069 cx.emit(ThreadEvent::UsePendingTools);
1070 }
1071 StopReason::EndTurn => {}
1072 StopReason::MaxTokens => {}
1073 },
1074 Err(error) => {
1075 if error.is::<PaymentRequiredError>() {
1076 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1077 } else if error.is::<MaxMonthlySpendReachedError>() {
1078 cx.emit(ThreadEvent::ShowError(
1079 ThreadError::MaxMonthlySpendReached,
1080 ));
1081 } else {
1082 let error_message = error
1083 .chain()
1084 .map(|err| err.to_string())
1085 .collect::<Vec<_>>()
1086 .join("\n");
1087 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1088 header: "Error interacting with language model".into(),
1089 message: SharedString::from(error_message.clone()),
1090 }));
1091 }
1092
1093 thread.cancel_last_completion(cx);
1094 }
1095 }
1096 cx.emit(ThreadEvent::DoneStreaming);
1097
1098 if let Ok(initial_usage) = initial_token_usage {
1099 let usage = thread.cumulative_token_usage.clone() - initial_usage;
1100
1101 telemetry::event!(
1102 "Assistant Thread Completion",
1103 thread_id = thread.id().to_string(),
1104 model = model.telemetry_id(),
1105 model_provider = model.provider_id().to_string(),
1106 input_tokens = usage.input_tokens,
1107 output_tokens = usage.output_tokens,
1108 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1109 cache_read_input_tokens = usage.cache_read_input_tokens,
1110 );
1111 }
1112 })
1113 .ok();
1114 });
1115
1116 self.pending_completions.push(PendingCompletion {
1117 id: pending_completion_id,
1118 _task: task,
1119 });
1120 }
1121
1122 pub fn summarize(&mut self, cx: &mut Context<Self>) {
1123 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
1124 return;
1125 };
1126 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
1127 return;
1128 };
1129
1130 if !provider.is_authenticated(cx) {
1131 return;
1132 }
1133
1134 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1135 request.messages.push(LanguageModelRequestMessage {
1136 role: Role::User,
1137 content: vec![
1138 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
1139 .into(),
1140 ],
1141 cache: false,
1142 });
1143
1144 self.pending_summary = cx.spawn(async move |this, cx| {
1145 async move {
1146 let stream = model.stream_completion_text(request, &cx);
1147 let mut messages = stream.await?;
1148
1149 let mut new_summary = String::new();
1150 while let Some(message) = messages.stream.next().await {
1151 let text = message?;
1152 let mut lines = text.lines();
1153 new_summary.extend(lines.next());
1154
1155 // Stop if the LLM generated multiple lines.
1156 if lines.next().is_some() {
1157 break;
1158 }
1159 }
1160
1161 this.update(cx, |this, cx| {
1162 if !new_summary.is_empty() {
1163 this.summary = Some(new_summary.into());
1164 }
1165
1166 cx.emit(ThreadEvent::SummaryChanged);
1167 })?;
1168
1169 anyhow::Ok(())
1170 }
1171 .log_err()
1172 .await
1173 });
1174 }
1175
1176 pub fn use_pending_tools(
1177 &mut self,
1178 cx: &mut Context<Self>,
1179 ) -> impl IntoIterator<Item = PendingToolUse> {
1180 let request = self.to_completion_request(RequestKind::Chat, cx);
1181 let pending_tool_uses = self
1182 .tool_use
1183 .pending_tool_uses()
1184 .into_iter()
1185 .filter(|tool_use| tool_use.status.is_idle())
1186 .cloned()
1187 .collect::<Vec<_>>();
1188
1189 for tool_use in pending_tool_uses.iter() {
1190 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1191 let task = tool.run(
1192 tool_use.input.clone(),
1193 &request.messages,
1194 self.project.clone(),
1195 self.action_log.clone(),
1196 cx,
1197 );
1198
1199 self.insert_tool_output(
1200 tool_use.id.clone(),
1201 tool_use.ui_text.clone().into(),
1202 task,
1203 cx,
1204 );
1205 }
1206 }
1207
1208 let pending_scripting_tool_uses = self
1209 .scripting_tool_use
1210 .pending_tool_uses()
1211 .into_iter()
1212 .filter(|tool_use| tool_use.status.is_idle())
1213 .cloned()
1214 .collect::<Vec<_>>();
1215
1216 for scripting_tool_use in pending_scripting_tool_uses.iter() {
1217 let task = match ScriptingTool::deserialize_input(scripting_tool_use.input.clone()) {
1218 Err(err) => Task::ready(Err(err.into())),
1219 Ok(input) => {
1220 let (script_id, script_task) =
1221 self.scripting_session.update(cx, move |session, cx| {
1222 session.run_script(input.lua_script, cx)
1223 });
1224
1225 let session = self.scripting_session.clone();
1226 cx.spawn(async move |_, cx| {
1227 script_task.await;
1228
1229 let message = session.read_with(cx, |session, _cx| {
1230 // Using a id to get the script output seems impractical.
1231 // Why not just include it in the Task result?
1232 // This is because we'll later report the script state as it runs,
1233 session
1234 .get(script_id)
1235 .output_message_for_llm()
1236 .expect("Script shouldn't still be running")
1237 })?;
1238
1239 Ok(message)
1240 })
1241 }
1242 };
1243
1244 let ui_text: SharedString = scripting_tool_use.name.clone().into();
1245
1246 self.insert_scripting_tool_output(scripting_tool_use.id.clone(), ui_text, task, cx);
1247 }
1248
1249 pending_tool_uses
1250 .into_iter()
1251 .chain(pending_scripting_tool_uses)
1252 }
1253
1254 pub fn insert_tool_output(
1255 &mut self,
1256 tool_use_id: LanguageModelToolUseId,
1257 ui_text: SharedString,
1258 output: Task<Result<String>>,
1259 cx: &mut Context<Self>,
1260 ) {
1261 let insert_output_task = cx.spawn({
1262 let tool_use_id = tool_use_id.clone();
1263 async move |thread, cx| {
1264 let output = output.await;
1265 thread
1266 .update(cx, |thread, cx| {
1267 let pending_tool_use = thread
1268 .tool_use
1269 .insert_tool_output(tool_use_id.clone(), output);
1270
1271 cx.emit(ThreadEvent::ToolFinished {
1272 tool_use_id,
1273 pending_tool_use,
1274 canceled: false,
1275 });
1276 })
1277 .ok();
1278 }
1279 });
1280
1281 self.tool_use
1282 .run_pending_tool(tool_use_id, ui_text, insert_output_task);
1283 }
1284
1285 pub fn insert_scripting_tool_output(
1286 &mut self,
1287 tool_use_id: LanguageModelToolUseId,
1288 ui_text: SharedString,
1289 output: Task<Result<String>>,
1290 cx: &mut Context<Self>,
1291 ) {
1292 let insert_output_task = cx.spawn({
1293 let tool_use_id = tool_use_id.clone();
1294 async move |thread, cx| {
1295 let output = output.await;
1296 thread
1297 .update(cx, |thread, cx| {
1298 let pending_tool_use = thread
1299 .scripting_tool_use
1300 .insert_tool_output(tool_use_id.clone(), output);
1301
1302 cx.emit(ThreadEvent::ToolFinished {
1303 tool_use_id,
1304 pending_tool_use,
1305 canceled: false,
1306 });
1307 })
1308 .ok();
1309 }
1310 });
1311
1312 self.scripting_tool_use
1313 .run_pending_tool(tool_use_id, ui_text, insert_output_task);
1314 }
1315
1316 pub fn attach_tool_results(
1317 &mut self,
1318 updated_context: Vec<ContextSnapshot>,
1319 cx: &mut Context<Self>,
1320 ) {
1321 self.context.extend(
1322 updated_context
1323 .into_iter()
1324 .map(|context| (context.id, context)),
1325 );
1326
1327 // Insert a user message to contain the tool results.
1328 self.insert_user_message(
1329 // TODO: Sending up a user message without any content results in the model sending back
1330 // responses that also don't have any content. We currently don't handle this case well,
1331 // so for now we provide some text to keep the model on track.
1332 "Here are the tool results.",
1333 Vec::new(),
1334 None,
1335 cx,
1336 );
1337 }
1338
1339 /// Cancels the last pending completion, if there are any pending.
1340 ///
1341 /// Returns whether a completion was canceled.
1342 pub fn cancel_last_completion(&mut self, cx: &mut Context<Self>) -> bool {
1343 let canceled = if self.pending_completions.pop().is_some() {
1344 true
1345 } else {
1346 let mut canceled = false;
1347 for pending_tool_use in self.tool_use.cancel_pending() {
1348 canceled = true;
1349 cx.emit(ThreadEvent::ToolFinished {
1350 tool_use_id: pending_tool_use.id.clone(),
1351 pending_tool_use: Some(pending_tool_use),
1352 canceled: true,
1353 });
1354 }
1355 canceled
1356 };
1357 self.finalize_pending_checkpoint(cx);
1358 canceled
1359 }
1360
1361 /// Returns the feedback given to the thread, if any.
1362 pub fn feedback(&self) -> Option<ThreadFeedback> {
1363 self.feedback
1364 }
1365
1366 /// Reports feedback about the thread and stores it in our telemetry backend.
1367 pub fn report_feedback(
1368 &mut self,
1369 feedback: ThreadFeedback,
1370 cx: &mut Context<Self>,
1371 ) -> Task<Result<()>> {
1372 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
1373 let serialized_thread = self.serialize(cx);
1374 let thread_id = self.id().clone();
1375 let client = self.project.read(cx).client();
1376 self.feedback = Some(feedback);
1377 cx.notify();
1378
1379 cx.background_spawn(async move {
1380 let final_project_snapshot = final_project_snapshot.await;
1381 let serialized_thread = serialized_thread.await?;
1382 let thread_data =
1383 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
1384
1385 let rating = match feedback {
1386 ThreadFeedback::Positive => "positive",
1387 ThreadFeedback::Negative => "negative",
1388 };
1389 telemetry::event!(
1390 "Assistant Thread Rated",
1391 rating,
1392 thread_id,
1393 thread_data,
1394 final_project_snapshot
1395 );
1396 client.telemetry().flush_events();
1397
1398 Ok(())
1399 })
1400 }
1401
1402 /// Create a snapshot of the current project state including git information and unsaved buffers.
1403 fn project_snapshot(
1404 project: Entity<Project>,
1405 cx: &mut Context<Self>,
1406 ) -> Task<Arc<ProjectSnapshot>> {
1407 let git_store = project.read(cx).git_store().clone();
1408 let worktree_snapshots: Vec<_> = project
1409 .read(cx)
1410 .visible_worktrees(cx)
1411 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
1412 .collect();
1413
1414 cx.spawn(async move |_, cx| {
1415 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
1416
1417 let mut unsaved_buffers = Vec::new();
1418 cx.update(|app_cx| {
1419 let buffer_store = project.read(app_cx).buffer_store();
1420 for buffer_handle in buffer_store.read(app_cx).buffers() {
1421 let buffer = buffer_handle.read(app_cx);
1422 if buffer.is_dirty() {
1423 if let Some(file) = buffer.file() {
1424 let path = file.path().to_string_lossy().to_string();
1425 unsaved_buffers.push(path);
1426 }
1427 }
1428 }
1429 })
1430 .ok();
1431
1432 Arc::new(ProjectSnapshot {
1433 worktree_snapshots,
1434 unsaved_buffer_paths: unsaved_buffers,
1435 timestamp: Utc::now(),
1436 })
1437 })
1438 }
1439
1440 fn worktree_snapshot(
1441 worktree: Entity<project::Worktree>,
1442 git_store: Entity<GitStore>,
1443 cx: &App,
1444 ) -> Task<WorktreeSnapshot> {
1445 cx.spawn(async move |cx| {
1446 // Get worktree path and snapshot
1447 let worktree_info = cx.update(|app_cx| {
1448 let worktree = worktree.read(app_cx);
1449 let path = worktree.abs_path().to_string_lossy().to_string();
1450 let snapshot = worktree.snapshot();
1451 (path, snapshot)
1452 });
1453
1454 let Ok((worktree_path, snapshot)) = worktree_info else {
1455 return WorktreeSnapshot {
1456 worktree_path: String::new(),
1457 git_state: None,
1458 };
1459 };
1460
1461 let repo_info = git_store
1462 .update(cx, |git_store, cx| {
1463 git_store
1464 .repositories()
1465 .values()
1466 .find(|repo| repo.read(cx).worktree_id == snapshot.id())
1467 .and_then(|repo| {
1468 let repo = repo.read(cx);
1469 Some((repo.branch().cloned(), repo.local_repository()?))
1470 })
1471 })
1472 .ok()
1473 .flatten();
1474
1475 // Extract git information
1476 let git_state = match repo_info {
1477 None => None,
1478 Some((branch, repo)) => {
1479 let current_branch = branch.map(|branch| branch.name.to_string());
1480 let remote_url = repo.remote_url("origin");
1481 let head_sha = repo.head_sha();
1482
1483 // Get diff asynchronously
1484 let diff = repo
1485 .diff(git::repository::DiffType::HeadToWorktree, cx.clone())
1486 .await
1487 .ok();
1488
1489 Some(GitState {
1490 remote_url,
1491 head_sha,
1492 current_branch,
1493 diff,
1494 })
1495 }
1496 };
1497
1498 WorktreeSnapshot {
1499 worktree_path,
1500 git_state,
1501 }
1502 })
1503 }
1504
1505 pub fn to_markdown(&self, cx: &App) -> Result<String> {
1506 let mut markdown = Vec::new();
1507
1508 if let Some(summary) = self.summary() {
1509 writeln!(markdown, "# {summary}\n")?;
1510 };
1511
1512 for message in self.messages() {
1513 writeln!(
1514 markdown,
1515 "## {role}\n",
1516 role = match message.role {
1517 Role::User => "User",
1518 Role::Assistant => "Assistant",
1519 Role::System => "System",
1520 }
1521 )?;
1522 for segment in &message.segments {
1523 match segment {
1524 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
1525 MessageSegment::Thinking(text) => {
1526 writeln!(markdown, "<think>{}</think>\n", text)?
1527 }
1528 }
1529 }
1530
1531 for tool_use in self.tool_uses_for_message(message.id, cx) {
1532 writeln!(
1533 markdown,
1534 "**Use Tool: {} ({})**",
1535 tool_use.name, tool_use.id
1536 )?;
1537 writeln!(markdown, "```json")?;
1538 writeln!(
1539 markdown,
1540 "{}",
1541 serde_json::to_string_pretty(&tool_use.input)?
1542 )?;
1543 writeln!(markdown, "```")?;
1544 }
1545
1546 for tool_result in self.tool_results_for_message(message.id) {
1547 write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
1548 if tool_result.is_error {
1549 write!(markdown, " (Error)")?;
1550 }
1551
1552 writeln!(markdown, "**\n")?;
1553 writeln!(markdown, "{}", tool_result.content)?;
1554 }
1555 }
1556
1557 Ok(String::from_utf8_lossy(&markdown).to_string())
1558 }
1559
1560 pub fn action_log(&self) -> &Entity<ActionLog> {
1561 &self.action_log
1562 }
1563
1564 pub fn project(&self) -> &Entity<Project> {
1565 &self.project
1566 }
1567
1568 pub fn cumulative_token_usage(&self) -> TokenUsage {
1569 self.cumulative_token_usage.clone()
1570 }
1571}
1572
1573#[derive(Debug, Clone)]
1574pub enum ThreadError {
1575 PaymentRequired,
1576 MaxMonthlySpendReached,
1577 Message {
1578 header: SharedString,
1579 message: SharedString,
1580 },
1581}
1582
1583#[derive(Debug, Clone)]
1584pub enum ThreadEvent {
1585 ShowError(ThreadError),
1586 StreamedCompletion,
1587 StreamedAssistantText(MessageId, String),
1588 StreamedAssistantThinking(MessageId, String),
1589 DoneStreaming,
1590 MessageAdded(MessageId),
1591 MessageEdited(MessageId),
1592 MessageDeleted(MessageId),
1593 SummaryChanged,
1594 UsePendingTools,
1595 ToolFinished {
1596 #[allow(unused)]
1597 tool_use_id: LanguageModelToolUseId,
1598 /// The pending tool use that corresponds to this tool.
1599 pending_tool_use: Option<PendingToolUse>,
1600 /// Whether the tool was canceled by the user.
1601 canceled: bool,
1602 },
1603 CheckpointChanged,
1604}
1605
1606impl EventEmitter<ThreadEvent> for Thread {}
1607
1608struct PendingCompletion {
1609 id: usize,
1610 _task: Task<()>,
1611}