1use std::fmt::Write as _;
2use std::io::Write;
3use std::sync::Arc;
4
5use anyhow::{Context as _, Result};
6use assistant_tool::{ActionLog, ToolWorkingSet};
7use chrono::{DateTime, Utc};
8use collections::{BTreeMap, HashMap, HashSet};
9use fs::Fs;
10use futures::future::Shared;
11use futures::{FutureExt, StreamExt as _};
12use git;
13use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task};
14use language_model::{
15 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
16 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
17 LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
18 Role, StopReason, TokenUsage,
19};
20use project::git_store::{GitStore, GitStoreCheckpoint};
21use project::{Project, Worktree};
22use prompt_store::{
23 AssistantSystemPromptContext, PromptBuilder, RulesFile, WorktreeInfoForSystemPrompt,
24};
25use scripting_tool::{ScriptingSession, ScriptingTool};
26use serde::{Deserialize, Serialize};
27use util::{maybe, post_inc, ResultExt as _, TryFutureExt as _};
28use uuid::Uuid;
29
30use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
31use crate::thread_store::{
32 SerializedMessage, SerializedMessageSegment, SerializedThread, SerializedToolResult,
33 SerializedToolUse,
34};
35use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState};
36
37#[derive(Debug, Clone, Copy)]
38pub enum RequestKind {
39 Chat,
40 /// Used when summarizing a thread.
41 Summarize,
42}
43
44#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
45pub struct ThreadId(Arc<str>);
46
47impl ThreadId {
48 pub fn new() -> Self {
49 Self(Uuid::new_v4().to_string().into())
50 }
51}
52
53impl std::fmt::Display for ThreadId {
54 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
55 write!(f, "{}", self.0)
56 }
57}
58
59#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
60pub struct MessageId(pub(crate) usize);
61
62impl MessageId {
63 fn post_inc(&mut self) -> Self {
64 Self(post_inc(&mut self.0))
65 }
66}
67
68/// A message in a [`Thread`].
69#[derive(Debug, Clone)]
70pub struct Message {
71 pub id: MessageId,
72 pub role: Role,
73 pub segments: Vec<MessageSegment>,
74}
75
76impl Message {
77 pub fn push_thinking(&mut self, text: &str) {
78 if let Some(MessageSegment::Thinking(segment)) = self.segments.last_mut() {
79 segment.push_str(text);
80 } else {
81 self.segments
82 .push(MessageSegment::Thinking(text.to_string()));
83 }
84 }
85
86 pub fn push_text(&mut self, text: &str) {
87 if let Some(MessageSegment::Text(segment)) = self.segments.last_mut() {
88 segment.push_str(text);
89 } else {
90 self.segments.push(MessageSegment::Text(text.to_string()));
91 }
92 }
93
94 pub fn to_string(&self) -> String {
95 let mut result = String::new();
96 for segment in &self.segments {
97 match segment {
98 MessageSegment::Text(text) => result.push_str(text),
99 MessageSegment::Thinking(text) => {
100 result.push_str("<think>");
101 result.push_str(text);
102 result.push_str("</think>");
103 }
104 }
105 }
106 result
107 }
108}
109
110#[derive(Debug, Clone)]
111pub enum MessageSegment {
112 Text(String),
113 Thinking(String),
114}
115
116#[derive(Debug, Clone, Serialize, Deserialize)]
117pub struct ProjectSnapshot {
118 pub worktree_snapshots: Vec<WorktreeSnapshot>,
119 pub unsaved_buffer_paths: Vec<String>,
120 pub timestamp: DateTime<Utc>,
121}
122
123#[derive(Debug, Clone, Serialize, Deserialize)]
124pub struct WorktreeSnapshot {
125 pub worktree_path: String,
126 pub git_state: Option<GitState>,
127}
128
129#[derive(Debug, Clone, Serialize, Deserialize)]
130pub struct GitState {
131 pub remote_url: Option<String>,
132 pub head_sha: Option<String>,
133 pub current_branch: Option<String>,
134 pub diff: Option<String>,
135}
136
137#[derive(Clone)]
138pub struct ThreadCheckpoint {
139 message_id: MessageId,
140 git_checkpoint: GitStoreCheckpoint,
141}
142
143#[derive(Copy, Clone, Debug)]
144pub enum ThreadFeedback {
145 Positive,
146 Negative,
147}
148
149pub enum LastRestoreCheckpoint {
150 Pending {
151 message_id: MessageId,
152 },
153 Error {
154 message_id: MessageId,
155 error: String,
156 },
157}
158
159impl LastRestoreCheckpoint {
160 pub fn message_id(&self) -> MessageId {
161 match self {
162 LastRestoreCheckpoint::Pending { message_id } => *message_id,
163 LastRestoreCheckpoint::Error { message_id, .. } => *message_id,
164 }
165 }
166}
167
168/// A thread of conversation with the LLM.
169pub struct Thread {
170 id: ThreadId,
171 updated_at: DateTime<Utc>,
172 summary: Option<SharedString>,
173 pending_summary: Task<Option<()>>,
174 messages: Vec<Message>,
175 next_message_id: MessageId,
176 context: BTreeMap<ContextId, ContextSnapshot>,
177 context_by_message: HashMap<MessageId, Vec<ContextId>>,
178 system_prompt_context: Option<AssistantSystemPromptContext>,
179 checkpoints_by_message: HashMap<MessageId, GitStoreCheckpoint>,
180 completion_count: usize,
181 pending_completions: Vec<PendingCompletion>,
182 project: Entity<Project>,
183 prompt_builder: Arc<PromptBuilder>,
184 tools: Arc<ToolWorkingSet>,
185 tool_use: ToolUseState,
186 action_log: Entity<ActionLog>,
187 last_restore_checkpoint: Option<LastRestoreCheckpoint>,
188 scripting_session: Entity<ScriptingSession>,
189 scripting_tool_use: ToolUseState,
190 initial_project_snapshot: Shared<Task<Option<Arc<ProjectSnapshot>>>>,
191 cumulative_token_usage: TokenUsage,
192 feedback: Option<ThreadFeedback>,
193}
194
195impl Thread {
196 pub fn new(
197 project: Entity<Project>,
198 tools: Arc<ToolWorkingSet>,
199 prompt_builder: Arc<PromptBuilder>,
200 cx: &mut Context<Self>,
201 ) -> Self {
202 Self {
203 id: ThreadId::new(),
204 updated_at: Utc::now(),
205 summary: None,
206 pending_summary: Task::ready(None),
207 messages: Vec::new(),
208 next_message_id: MessageId(0),
209 context: BTreeMap::default(),
210 context_by_message: HashMap::default(),
211 system_prompt_context: None,
212 checkpoints_by_message: HashMap::default(),
213 completion_count: 0,
214 pending_completions: Vec::new(),
215 project: project.clone(),
216 prompt_builder,
217 tools: tools.clone(),
218 last_restore_checkpoint: None,
219 tool_use: ToolUseState::new(tools.clone()),
220 scripting_session: cx.new(|cx| ScriptingSession::new(project.clone(), cx)),
221 scripting_tool_use: ToolUseState::new(tools),
222 action_log: cx.new(|_| ActionLog::new()),
223 initial_project_snapshot: {
224 let project_snapshot = Self::project_snapshot(project, cx);
225 cx.foreground_executor()
226 .spawn(async move { Some(project_snapshot.await) })
227 .shared()
228 },
229 cumulative_token_usage: TokenUsage::default(),
230 feedback: None,
231 }
232 }
233
234 pub fn deserialize(
235 id: ThreadId,
236 serialized: SerializedThread,
237 project: Entity<Project>,
238 tools: Arc<ToolWorkingSet>,
239 prompt_builder: Arc<PromptBuilder>,
240 cx: &mut Context<Self>,
241 ) -> Self {
242 let next_message_id = MessageId(
243 serialized
244 .messages
245 .last()
246 .map(|message| message.id.0 + 1)
247 .unwrap_or(0),
248 );
249 let tool_use =
250 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |name| {
251 name != ScriptingTool::NAME
252 });
253 let scripting_tool_use =
254 ToolUseState::from_serialized_messages(tools.clone(), &serialized.messages, |name| {
255 name == ScriptingTool::NAME
256 });
257 let scripting_session = cx.new(|cx| ScriptingSession::new(project.clone(), cx));
258
259 Self {
260 id,
261 updated_at: serialized.updated_at,
262 summary: Some(serialized.summary),
263 pending_summary: Task::ready(None),
264 messages: serialized
265 .messages
266 .into_iter()
267 .map(|message| Message {
268 id: message.id,
269 role: message.role,
270 segments: message
271 .segments
272 .into_iter()
273 .map(|segment| match segment {
274 SerializedMessageSegment::Text { text } => MessageSegment::Text(text),
275 SerializedMessageSegment::Thinking { text } => {
276 MessageSegment::Thinking(text)
277 }
278 })
279 .collect(),
280 })
281 .collect(),
282 next_message_id,
283 context: BTreeMap::default(),
284 context_by_message: HashMap::default(),
285 system_prompt_context: None,
286 checkpoints_by_message: HashMap::default(),
287 completion_count: 0,
288 pending_completions: Vec::new(),
289 last_restore_checkpoint: None,
290 project,
291 prompt_builder,
292 tools,
293 tool_use,
294 action_log: cx.new(|_| ActionLog::new()),
295 scripting_session,
296 scripting_tool_use,
297 initial_project_snapshot: Task::ready(serialized.initial_project_snapshot).shared(),
298 // TODO: persist token usage?
299 cumulative_token_usage: TokenUsage::default(),
300 feedback: None,
301 }
302 }
303
304 pub fn id(&self) -> &ThreadId {
305 &self.id
306 }
307
308 pub fn is_empty(&self) -> bool {
309 self.messages.is_empty()
310 }
311
312 pub fn updated_at(&self) -> DateTime<Utc> {
313 self.updated_at
314 }
315
316 pub fn touch_updated_at(&mut self) {
317 self.updated_at = Utc::now();
318 }
319
320 pub fn summary(&self) -> Option<SharedString> {
321 self.summary.clone()
322 }
323
324 pub fn summary_or_default(&self) -> SharedString {
325 const DEFAULT: SharedString = SharedString::new_static("New Thread");
326 self.summary.clone().unwrap_or(DEFAULT)
327 }
328
329 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
330 self.summary = Some(summary.into());
331 cx.emit(ThreadEvent::SummaryChanged);
332 }
333
334 pub fn message(&self, id: MessageId) -> Option<&Message> {
335 self.messages.iter().find(|message| message.id == id)
336 }
337
338 pub fn messages(&self) -> impl Iterator<Item = &Message> {
339 self.messages.iter()
340 }
341
342 pub fn is_generating(&self) -> bool {
343 !self.pending_completions.is_empty() || !self.all_tools_finished()
344 }
345
346 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
347 &self.tools
348 }
349
350 pub fn checkpoint_for_message(&self, id: MessageId) -> Option<ThreadCheckpoint> {
351 let checkpoint = self.checkpoints_by_message.get(&id).cloned()?;
352 Some(ThreadCheckpoint {
353 message_id: id,
354 git_checkpoint: checkpoint,
355 })
356 }
357
358 pub fn restore_checkpoint(
359 &mut self,
360 checkpoint: ThreadCheckpoint,
361 cx: &mut Context<Self>,
362 ) -> Task<Result<()>> {
363 self.last_restore_checkpoint = Some(LastRestoreCheckpoint::Pending {
364 message_id: checkpoint.message_id,
365 });
366 cx.emit(ThreadEvent::CheckpointChanged);
367
368 let project = self.project.read(cx);
369 let restore = project
370 .git_store()
371 .read(cx)
372 .restore_checkpoint(checkpoint.git_checkpoint, cx);
373 cx.spawn(async move |this, cx| {
374 let result = restore.await;
375 this.update(cx, |this, cx| {
376 if let Err(err) = result.as_ref() {
377 this.last_restore_checkpoint = Some(LastRestoreCheckpoint::Error {
378 message_id: checkpoint.message_id,
379 error: err.to_string(),
380 });
381 } else {
382 this.last_restore_checkpoint = None;
383 this.truncate(checkpoint.message_id, cx);
384 }
385 cx.emit(ThreadEvent::CheckpointChanged);
386 })?;
387 result
388 })
389 }
390
391 pub fn last_restore_checkpoint(&self) -> Option<&LastRestoreCheckpoint> {
392 self.last_restore_checkpoint.as_ref()
393 }
394
395 pub fn truncate(&mut self, message_id: MessageId, cx: &mut Context<Self>) {
396 let Some(message_ix) = self
397 .messages
398 .iter()
399 .rposition(|message| message.id == message_id)
400 else {
401 return;
402 };
403 for deleted_message in self.messages.drain(message_ix..) {
404 self.context_by_message.remove(&deleted_message.id);
405 self.checkpoints_by_message.remove(&deleted_message.id);
406 }
407 cx.notify();
408 }
409
410 pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
411 let context = self.context_by_message.get(&id)?;
412 Some(
413 context
414 .into_iter()
415 .filter_map(|context_id| self.context.get(&context_id))
416 .cloned()
417 .collect::<Vec<_>>(),
418 )
419 }
420
421 /// Returns whether all of the tool uses have finished running.
422 pub fn all_tools_finished(&self) -> bool {
423 let mut all_pending_tool_uses = self
424 .tool_use
425 .pending_tool_uses()
426 .into_iter()
427 .chain(self.scripting_tool_use.pending_tool_uses());
428
429 // If the only pending tool uses left are the ones with errors, then
430 // that means that we've finished running all of the pending tools.
431 all_pending_tool_uses.all(|tool_use| tool_use.status.is_error())
432 }
433
434 pub fn tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
435 self.tool_use.tool_uses_for_message(id, cx)
436 }
437
438 pub fn scripting_tool_uses_for_message(&self, id: MessageId, cx: &App) -> Vec<ToolUse> {
439 self.scripting_tool_use.tool_uses_for_message(id, cx)
440 }
441
442 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
443 self.tool_use.tool_results_for_message(id)
444 }
445
446 pub fn tool_result(&self, id: &LanguageModelToolUseId) -> Option<&LanguageModelToolResult> {
447 self.tool_use.tool_result(id)
448 }
449
450 pub fn scripting_tool_results_for_message(
451 &self,
452 id: MessageId,
453 ) -> Vec<&LanguageModelToolResult> {
454 self.scripting_tool_use.tool_results_for_message(id)
455 }
456
457 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
458 self.tool_use.message_has_tool_results(message_id)
459 }
460
461 pub fn message_has_scripting_tool_results(&self, message_id: MessageId) -> bool {
462 self.scripting_tool_use.message_has_tool_results(message_id)
463 }
464
465 pub fn insert_user_message(
466 &mut self,
467 text: impl Into<String>,
468 context: Vec<ContextSnapshot>,
469 checkpoint: Option<GitStoreCheckpoint>,
470 cx: &mut Context<Self>,
471 ) -> MessageId {
472 let message_id =
473 self.insert_message(Role::User, vec![MessageSegment::Text(text.into())], cx);
474 let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
475 self.context
476 .extend(context.into_iter().map(|context| (context.id, context)));
477 self.context_by_message.insert(message_id, context_ids);
478 if let Some(checkpoint) = checkpoint {
479 self.checkpoints_by_message.insert(message_id, checkpoint);
480 }
481 message_id
482 }
483
484 pub fn insert_message(
485 &mut self,
486 role: Role,
487 segments: Vec<MessageSegment>,
488 cx: &mut Context<Self>,
489 ) -> MessageId {
490 let id = self.next_message_id.post_inc();
491 self.messages.push(Message { id, role, segments });
492 self.touch_updated_at();
493 cx.emit(ThreadEvent::MessageAdded(id));
494 id
495 }
496
497 pub fn edit_message(
498 &mut self,
499 id: MessageId,
500 new_role: Role,
501 new_segments: Vec<MessageSegment>,
502 cx: &mut Context<Self>,
503 ) -> bool {
504 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
505 return false;
506 };
507 message.role = new_role;
508 message.segments = new_segments;
509 self.touch_updated_at();
510 cx.emit(ThreadEvent::MessageEdited(id));
511 true
512 }
513
514 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
515 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
516 return false;
517 };
518 self.messages.remove(index);
519 self.context_by_message.remove(&id);
520 self.touch_updated_at();
521 cx.emit(ThreadEvent::MessageDeleted(id));
522 true
523 }
524
525 /// Returns the representation of this [`Thread`] in a textual form.
526 ///
527 /// This is the representation we use when attaching a thread as context to another thread.
528 pub fn text(&self) -> String {
529 let mut text = String::new();
530
531 for message in &self.messages {
532 text.push_str(match message.role {
533 language_model::Role::User => "User:",
534 language_model::Role::Assistant => "Assistant:",
535 language_model::Role::System => "System:",
536 });
537 text.push('\n');
538
539 for segment in &message.segments {
540 match segment {
541 MessageSegment::Text(content) => text.push_str(content),
542 MessageSegment::Thinking(content) => {
543 text.push_str(&format!("<think>{}</think>", content))
544 }
545 }
546 }
547 text.push('\n');
548 }
549
550 text
551 }
552
553 /// Serializes this thread into a format for storage or telemetry.
554 pub fn serialize(&self, cx: &mut Context<Self>) -> Task<Result<SerializedThread>> {
555 let initial_project_snapshot = self.initial_project_snapshot.clone();
556 cx.spawn(async move |this, cx| {
557 let initial_project_snapshot = initial_project_snapshot.await;
558 this.read_with(cx, |this, cx| SerializedThread {
559 version: SerializedThread::VERSION.to_string(),
560 summary: this.summary_or_default(),
561 updated_at: this.updated_at(),
562 messages: this
563 .messages()
564 .map(|message| SerializedMessage {
565 id: message.id,
566 role: message.role,
567 segments: message
568 .segments
569 .iter()
570 .map(|segment| match segment {
571 MessageSegment::Text(text) => {
572 SerializedMessageSegment::Text { text: text.clone() }
573 }
574 MessageSegment::Thinking(text) => {
575 SerializedMessageSegment::Thinking { text: text.clone() }
576 }
577 })
578 .collect(),
579 tool_uses: this
580 .tool_uses_for_message(message.id, cx)
581 .into_iter()
582 .chain(this.scripting_tool_uses_for_message(message.id, cx))
583 .map(|tool_use| SerializedToolUse {
584 id: tool_use.id,
585 name: tool_use.name,
586 input: tool_use.input,
587 })
588 .collect(),
589 tool_results: this
590 .tool_results_for_message(message.id)
591 .into_iter()
592 .chain(this.scripting_tool_results_for_message(message.id))
593 .map(|tool_result| SerializedToolResult {
594 tool_use_id: tool_result.tool_use_id.clone(),
595 is_error: tool_result.is_error,
596 content: tool_result.content.clone(),
597 })
598 .collect(),
599 })
600 .collect(),
601 initial_project_snapshot,
602 })
603 })
604 }
605
606 pub fn set_system_prompt_context(&mut self, context: AssistantSystemPromptContext) {
607 self.system_prompt_context = Some(context);
608 }
609
610 pub fn system_prompt_context(&self) -> &Option<AssistantSystemPromptContext> {
611 &self.system_prompt_context
612 }
613
614 pub fn load_system_prompt_context(
615 &self,
616 cx: &App,
617 ) -> Task<(AssistantSystemPromptContext, Option<ThreadError>)> {
618 let project = self.project.read(cx);
619 let tasks = project
620 .visible_worktrees(cx)
621 .map(|worktree| {
622 Self::load_worktree_info_for_system_prompt(
623 project.fs().clone(),
624 worktree.read(cx),
625 cx,
626 )
627 })
628 .collect::<Vec<_>>();
629
630 cx.spawn(async |_cx| {
631 let results = futures::future::join_all(tasks).await;
632 let mut first_err = None;
633 let worktrees = results
634 .into_iter()
635 .map(|(worktree, err)| {
636 if first_err.is_none() && err.is_some() {
637 first_err = err;
638 }
639 worktree
640 })
641 .collect::<Vec<_>>();
642 (AssistantSystemPromptContext::new(worktrees), first_err)
643 })
644 }
645
646 fn load_worktree_info_for_system_prompt(
647 fs: Arc<dyn Fs>,
648 worktree: &Worktree,
649 cx: &App,
650 ) -> Task<(WorktreeInfoForSystemPrompt, Option<ThreadError>)> {
651 let root_name = worktree.root_name().into();
652 let abs_path = worktree.abs_path();
653
654 // Note that Cline supports `.clinerules` being a directory, but that is not currently
655 // supported. This doesn't seem to occur often in GitHub repositories.
656 const RULES_FILE_NAMES: [&'static str; 5] = [
657 ".rules",
658 ".cursorrules",
659 ".windsurfrules",
660 ".clinerules",
661 "CLAUDE.md",
662 ];
663 let selected_rules_file = RULES_FILE_NAMES
664 .into_iter()
665 .filter_map(|name| {
666 worktree
667 .entry_for_path(name)
668 .filter(|entry| entry.is_file())
669 .map(|entry| (entry.path.clone(), worktree.absolutize(&entry.path)))
670 })
671 .next();
672
673 if let Some((rel_rules_path, abs_rules_path)) = selected_rules_file {
674 cx.spawn(async move |_| {
675 let rules_file_result = maybe!(async move {
676 let abs_rules_path = abs_rules_path?;
677 let text = fs.load(&abs_rules_path).await.with_context(|| {
678 format!("Failed to load assistant rules file {:?}", abs_rules_path)
679 })?;
680 anyhow::Ok(RulesFile {
681 rel_path: rel_rules_path,
682 abs_path: abs_rules_path.into(),
683 text: text.trim().to_string(),
684 })
685 })
686 .await;
687 let (rules_file, rules_file_error) = match rules_file_result {
688 Ok(rules_file) => (Some(rules_file), None),
689 Err(err) => (
690 None,
691 Some(ThreadError::Message {
692 header: "Error loading rules file".into(),
693 message: format!("{err}").into(),
694 }),
695 ),
696 };
697 let worktree_info = WorktreeInfoForSystemPrompt {
698 root_name,
699 abs_path,
700 rules_file,
701 };
702 (worktree_info, rules_file_error)
703 })
704 } else {
705 Task::ready((
706 WorktreeInfoForSystemPrompt {
707 root_name,
708 abs_path,
709 rules_file: None,
710 },
711 None,
712 ))
713 }
714 }
715
716 pub fn send_to_model(
717 &mut self,
718 model: Arc<dyn LanguageModel>,
719 request_kind: RequestKind,
720 cx: &mut Context<Self>,
721 ) {
722 let mut request = self.to_completion_request(request_kind, cx);
723 request.tools = {
724 let mut tools = Vec::new();
725
726 if self.tools.is_scripting_tool_enabled() {
727 tools.push(LanguageModelRequestTool {
728 name: ScriptingTool::NAME.into(),
729 description: ScriptingTool::DESCRIPTION.into(),
730 input_schema: ScriptingTool::input_schema(),
731 });
732 }
733
734 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
735 LanguageModelRequestTool {
736 name: tool.name(),
737 description: tool.description(),
738 input_schema: tool.input_schema(),
739 }
740 }));
741
742 tools
743 };
744
745 self.stream_completion(request, model, cx);
746 }
747
748 pub fn to_completion_request(
749 &self,
750 request_kind: RequestKind,
751 cx: &App,
752 ) -> LanguageModelRequest {
753 let mut request = LanguageModelRequest {
754 messages: vec![],
755 tools: Vec::new(),
756 stop: Vec::new(),
757 temperature: None,
758 };
759
760 if let Some(system_prompt_context) = self.system_prompt_context.as_ref() {
761 if let Some(system_prompt) = self
762 .prompt_builder
763 .generate_assistant_system_prompt(system_prompt_context)
764 .context("failed to generate assistant system prompt")
765 .log_err()
766 {
767 request.messages.push(LanguageModelRequestMessage {
768 role: Role::System,
769 content: vec![MessageContent::Text(system_prompt)],
770 cache: true,
771 });
772 }
773 } else {
774 log::error!("system_prompt_context not set.")
775 }
776
777 let mut referenced_context_ids = HashSet::default();
778
779 for message in &self.messages {
780 if let Some(context_ids) = self.context_by_message.get(&message.id) {
781 referenced_context_ids.extend(context_ids);
782 }
783
784 let mut request_message = LanguageModelRequestMessage {
785 role: message.role,
786 content: Vec::new(),
787 cache: false,
788 };
789
790 match request_kind {
791 RequestKind::Chat => {
792 self.tool_use
793 .attach_tool_results(message.id, &mut request_message);
794 self.scripting_tool_use
795 .attach_tool_results(message.id, &mut request_message);
796 }
797 RequestKind::Summarize => {
798 // We don't care about tool use during summarization.
799 }
800 }
801
802 if !message.segments.is_empty() {
803 request_message
804 .content
805 .push(MessageContent::Text(message.to_string()));
806 }
807
808 match request_kind {
809 RequestKind::Chat => {
810 self.tool_use
811 .attach_tool_uses(message.id, &mut request_message);
812 self.scripting_tool_use
813 .attach_tool_uses(message.id, &mut request_message);
814 }
815 RequestKind::Summarize => {
816 // We don't care about tool use during summarization.
817 }
818 };
819
820 request.messages.push(request_message);
821 }
822
823 if !referenced_context_ids.is_empty() {
824 let mut context_message = LanguageModelRequestMessage {
825 role: Role::User,
826 content: Vec::new(),
827 cache: false,
828 };
829
830 let referenced_context = referenced_context_ids
831 .into_iter()
832 .filter_map(|context_id| self.context.get(context_id))
833 .cloned();
834 attach_context_to_message(&mut context_message, referenced_context);
835
836 request.messages.push(context_message);
837 }
838
839 self.attach_stale_files(&mut request.messages, cx);
840
841 request
842 }
843
844 fn attach_stale_files(&self, messages: &mut Vec<LanguageModelRequestMessage>, cx: &App) {
845 const STALE_FILES_HEADER: &str = "These files changed since last read:";
846
847 let mut stale_message = String::new();
848
849 for stale_file in self.action_log.read(cx).stale_buffers(cx) {
850 let Some(file) = stale_file.read(cx).file() else {
851 continue;
852 };
853
854 if stale_message.is_empty() {
855 write!(&mut stale_message, "{}", STALE_FILES_HEADER).ok();
856 }
857
858 writeln!(&mut stale_message, "- {}", file.path().display()).ok();
859 }
860
861 if !stale_message.is_empty() {
862 let context_message = LanguageModelRequestMessage {
863 role: Role::User,
864 content: vec![stale_message.into()],
865 cache: false,
866 };
867
868 messages.push(context_message);
869 }
870 }
871
872 pub fn stream_completion(
873 &mut self,
874 request: LanguageModelRequest,
875 model: Arc<dyn LanguageModel>,
876 cx: &mut Context<Self>,
877 ) {
878 let pending_completion_id = post_inc(&mut self.completion_count);
879
880 let task = cx.spawn(async move |thread, cx| {
881 let stream = model.stream_completion(request, &cx);
882 let initial_token_usage =
883 thread.read_with(cx, |thread, _cx| thread.cumulative_token_usage.clone());
884 let stream_completion = async {
885 let mut events = stream.await?;
886 let mut stop_reason = StopReason::EndTurn;
887 let mut current_token_usage = TokenUsage::default();
888
889 while let Some(event) = events.next().await {
890 let event = event?;
891
892 thread.update(cx, |thread, cx| {
893 match event {
894 LanguageModelCompletionEvent::StartMessage { .. } => {
895 thread.insert_message(
896 Role::Assistant,
897 vec![MessageSegment::Text(String::new())],
898 cx,
899 );
900 }
901 LanguageModelCompletionEvent::Stop(reason) => {
902 stop_reason = reason;
903 }
904 LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
905 thread.cumulative_token_usage =
906 thread.cumulative_token_usage.clone() + token_usage.clone()
907 - current_token_usage.clone();
908 current_token_usage = token_usage;
909 }
910 LanguageModelCompletionEvent::Text(chunk) => {
911 if let Some(last_message) = thread.messages.last_mut() {
912 if last_message.role == Role::Assistant {
913 last_message.push_text(&chunk);
914 cx.emit(ThreadEvent::StreamedAssistantText(
915 last_message.id,
916 chunk,
917 ));
918 } else {
919 // If we won't have an Assistant message yet, assume this chunk marks the beginning
920 // of a new Assistant response.
921 //
922 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
923 // will result in duplicating the text of the chunk in the rendered Markdown.
924 thread.insert_message(
925 Role::Assistant,
926 vec![MessageSegment::Text(chunk.to_string())],
927 cx,
928 );
929 };
930 }
931 }
932 LanguageModelCompletionEvent::Thinking(chunk) => {
933 if let Some(last_message) = thread.messages.last_mut() {
934 if last_message.role == Role::Assistant {
935 last_message.push_thinking(&chunk);
936 cx.emit(ThreadEvent::StreamedAssistantThinking(
937 last_message.id,
938 chunk,
939 ));
940 } else {
941 // If we won't have an Assistant message yet, assume this chunk marks the beginning
942 // of a new Assistant response.
943 //
944 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
945 // will result in duplicating the text of the chunk in the rendered Markdown.
946 thread.insert_message(
947 Role::Assistant,
948 vec![MessageSegment::Thinking(chunk.to_string())],
949 cx,
950 );
951 };
952 }
953 }
954 LanguageModelCompletionEvent::ToolUse(tool_use) => {
955 if let Some(last_assistant_message) = thread
956 .messages
957 .iter()
958 .rfind(|message| message.role == Role::Assistant)
959 {
960 if tool_use.name.as_ref() == ScriptingTool::NAME {
961 thread.scripting_tool_use.request_tool_use(
962 last_assistant_message.id,
963 tool_use,
964 cx,
965 );
966 } else {
967 thread.tool_use.request_tool_use(
968 last_assistant_message.id,
969 tool_use,
970 cx,
971 );
972 }
973 }
974 }
975 }
976
977 thread.touch_updated_at();
978 cx.emit(ThreadEvent::StreamedCompletion);
979 cx.notify();
980 })?;
981
982 smol::future::yield_now().await;
983 }
984
985 thread.update(cx, |thread, cx| {
986 thread
987 .pending_completions
988 .retain(|completion| completion.id != pending_completion_id);
989
990 if thread.summary.is_none() && thread.messages.len() >= 2 {
991 thread.summarize(cx);
992 }
993 })?;
994
995 anyhow::Ok(stop_reason)
996 };
997
998 let result = stream_completion.await;
999
1000 thread
1001 .update(cx, |thread, cx| {
1002 match result.as_ref() {
1003 Ok(stop_reason) => match stop_reason {
1004 StopReason::ToolUse => {
1005 cx.emit(ThreadEvent::UsePendingTools);
1006 }
1007 StopReason::EndTurn => {}
1008 StopReason::MaxTokens => {}
1009 },
1010 Err(error) => {
1011 if error.is::<PaymentRequiredError>() {
1012 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
1013 } else if error.is::<MaxMonthlySpendReachedError>() {
1014 cx.emit(ThreadEvent::ShowError(
1015 ThreadError::MaxMonthlySpendReached,
1016 ));
1017 } else {
1018 let error_message = error
1019 .chain()
1020 .map(|err| err.to_string())
1021 .collect::<Vec<_>>()
1022 .join("\n");
1023 cx.emit(ThreadEvent::ShowError(ThreadError::Message {
1024 header: "Error interacting with language model".into(),
1025 message: SharedString::from(error_message.clone()),
1026 }));
1027 }
1028
1029 thread.cancel_last_completion(cx);
1030 }
1031 }
1032 cx.emit(ThreadEvent::DoneStreaming);
1033
1034 if let Ok(initial_usage) = initial_token_usage {
1035 let usage = thread.cumulative_token_usage.clone() - initial_usage;
1036
1037 telemetry::event!(
1038 "Assistant Thread Completion",
1039 thread_id = thread.id().to_string(),
1040 model = model.telemetry_id(),
1041 model_provider = model.provider_id().to_string(),
1042 input_tokens = usage.input_tokens,
1043 output_tokens = usage.output_tokens,
1044 cache_creation_input_tokens = usage.cache_creation_input_tokens,
1045 cache_read_input_tokens = usage.cache_read_input_tokens,
1046 );
1047 }
1048 })
1049 .ok();
1050 });
1051
1052 self.pending_completions.push(PendingCompletion {
1053 id: pending_completion_id,
1054 _task: task,
1055 });
1056 }
1057
1058 pub fn summarize(&mut self, cx: &mut Context<Self>) {
1059 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
1060 return;
1061 };
1062 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
1063 return;
1064 };
1065
1066 if !provider.is_authenticated(cx) {
1067 return;
1068 }
1069
1070 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
1071 request.messages.push(LanguageModelRequestMessage {
1072 role: Role::User,
1073 content: vec![
1074 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
1075 .into(),
1076 ],
1077 cache: false,
1078 });
1079
1080 self.pending_summary = cx.spawn(async move |this, cx| {
1081 async move {
1082 let stream = model.stream_completion_text(request, &cx);
1083 let mut messages = stream.await?;
1084
1085 let mut new_summary = String::new();
1086 while let Some(message) = messages.stream.next().await {
1087 let text = message?;
1088 let mut lines = text.lines();
1089 new_summary.extend(lines.next());
1090
1091 // Stop if the LLM generated multiple lines.
1092 if lines.next().is_some() {
1093 break;
1094 }
1095 }
1096
1097 this.update(cx, |this, cx| {
1098 if !new_summary.is_empty() {
1099 this.summary = Some(new_summary.into());
1100 }
1101
1102 cx.emit(ThreadEvent::SummaryChanged);
1103 })?;
1104
1105 anyhow::Ok(())
1106 }
1107 .log_err()
1108 .await
1109 });
1110 }
1111
1112 pub fn use_pending_tools(
1113 &mut self,
1114 cx: &mut Context<Self>,
1115 ) -> impl IntoIterator<Item = PendingToolUse> {
1116 let request = self.to_completion_request(RequestKind::Chat, cx);
1117 let pending_tool_uses = self
1118 .tool_use
1119 .pending_tool_uses()
1120 .into_iter()
1121 .filter(|tool_use| tool_use.status.is_idle())
1122 .cloned()
1123 .collect::<Vec<_>>();
1124
1125 for tool_use in pending_tool_uses.iter() {
1126 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
1127 let task = tool.run(
1128 tool_use.input.clone(),
1129 &request.messages,
1130 self.project.clone(),
1131 self.action_log.clone(),
1132 cx,
1133 );
1134
1135 self.insert_tool_output(
1136 tool_use.id.clone(),
1137 tool_use.ui_text.clone().into(),
1138 task,
1139 cx,
1140 );
1141 }
1142 }
1143
1144 let pending_scripting_tool_uses = self
1145 .scripting_tool_use
1146 .pending_tool_uses()
1147 .into_iter()
1148 .filter(|tool_use| tool_use.status.is_idle())
1149 .cloned()
1150 .collect::<Vec<_>>();
1151
1152 for scripting_tool_use in pending_scripting_tool_uses.iter() {
1153 let task = match ScriptingTool::deserialize_input(scripting_tool_use.input.clone()) {
1154 Err(err) => Task::ready(Err(err.into())),
1155 Ok(input) => {
1156 let (script_id, script_task) =
1157 self.scripting_session.update(cx, move |session, cx| {
1158 session.run_script(input.lua_script, cx)
1159 });
1160
1161 let session = self.scripting_session.clone();
1162 cx.spawn(async move |_, cx| {
1163 script_task.await;
1164
1165 let message = session.read_with(cx, |session, _cx| {
1166 // Using a id to get the script output seems impractical.
1167 // Why not just include it in the Task result?
1168 // This is because we'll later report the script state as it runs,
1169 session
1170 .get(script_id)
1171 .output_message_for_llm()
1172 .expect("Script shouldn't still be running")
1173 })?;
1174
1175 Ok(message)
1176 })
1177 }
1178 };
1179
1180 let ui_text: SharedString = scripting_tool_use.name.clone().into();
1181
1182 self.insert_scripting_tool_output(scripting_tool_use.id.clone(), ui_text, task, cx);
1183 }
1184
1185 pending_tool_uses
1186 .into_iter()
1187 .chain(pending_scripting_tool_uses)
1188 }
1189
1190 pub fn insert_tool_output(
1191 &mut self,
1192 tool_use_id: LanguageModelToolUseId,
1193 ui_text: SharedString,
1194 output: Task<Result<String>>,
1195 cx: &mut Context<Self>,
1196 ) {
1197 let insert_output_task = cx.spawn({
1198 let tool_use_id = tool_use_id.clone();
1199 async move |thread, cx| {
1200 let output = output.await;
1201 thread
1202 .update(cx, |thread, cx| {
1203 let pending_tool_use = thread
1204 .tool_use
1205 .insert_tool_output(tool_use_id.clone(), output);
1206
1207 cx.emit(ThreadEvent::ToolFinished {
1208 tool_use_id,
1209 pending_tool_use,
1210 canceled: false,
1211 });
1212 })
1213 .ok();
1214 }
1215 });
1216
1217 self.tool_use
1218 .run_pending_tool(tool_use_id, ui_text, insert_output_task);
1219 }
1220
1221 pub fn insert_scripting_tool_output(
1222 &mut self,
1223 tool_use_id: LanguageModelToolUseId,
1224 ui_text: SharedString,
1225 output: Task<Result<String>>,
1226 cx: &mut Context<Self>,
1227 ) {
1228 let insert_output_task = cx.spawn({
1229 let tool_use_id = tool_use_id.clone();
1230 async move |thread, cx| {
1231 let output = output.await;
1232 thread
1233 .update(cx, |thread, cx| {
1234 let pending_tool_use = thread
1235 .scripting_tool_use
1236 .insert_tool_output(tool_use_id.clone(), output);
1237
1238 cx.emit(ThreadEvent::ToolFinished {
1239 tool_use_id,
1240 pending_tool_use,
1241 canceled: false,
1242 });
1243 })
1244 .ok();
1245 }
1246 });
1247
1248 self.scripting_tool_use
1249 .run_pending_tool(tool_use_id, ui_text, insert_output_task);
1250 }
1251
1252 pub fn attach_tool_results(
1253 &mut self,
1254 updated_context: Vec<ContextSnapshot>,
1255 cx: &mut Context<Self>,
1256 ) {
1257 self.context.extend(
1258 updated_context
1259 .into_iter()
1260 .map(|context| (context.id, context)),
1261 );
1262
1263 // Insert a user message to contain the tool results.
1264 self.insert_user_message(
1265 // TODO: Sending up a user message without any content results in the model sending back
1266 // responses that also don't have any content. We currently don't handle this case well,
1267 // so for now we provide some text to keep the model on track.
1268 "Here are the tool results.",
1269 Vec::new(),
1270 None,
1271 cx,
1272 );
1273 }
1274
1275 /// Cancels the last pending completion, if there are any pending.
1276 ///
1277 /// Returns whether a completion was canceled.
1278 pub fn cancel_last_completion(&mut self, cx: &mut Context<Self>) -> bool {
1279 if self.pending_completions.pop().is_some() {
1280 true
1281 } else {
1282 let mut canceled = false;
1283 for pending_tool_use in self.tool_use.cancel_pending() {
1284 canceled = true;
1285 cx.emit(ThreadEvent::ToolFinished {
1286 tool_use_id: pending_tool_use.id.clone(),
1287 pending_tool_use: Some(pending_tool_use),
1288 canceled: true,
1289 });
1290 }
1291 canceled
1292 }
1293 }
1294
1295 /// Returns the feedback given to the thread, if any.
1296 pub fn feedback(&self) -> Option<ThreadFeedback> {
1297 self.feedback
1298 }
1299
1300 /// Reports feedback about the thread and stores it in our telemetry backend.
1301 pub fn report_feedback(
1302 &mut self,
1303 feedback: ThreadFeedback,
1304 cx: &mut Context<Self>,
1305 ) -> Task<Result<()>> {
1306 let final_project_snapshot = Self::project_snapshot(self.project.clone(), cx);
1307 let serialized_thread = self.serialize(cx);
1308 let thread_id = self.id().clone();
1309 let client = self.project.read(cx).client();
1310 self.feedback = Some(feedback);
1311 cx.notify();
1312
1313 cx.background_spawn(async move {
1314 let final_project_snapshot = final_project_snapshot.await;
1315 let serialized_thread = serialized_thread.await?;
1316 let thread_data =
1317 serde_json::to_value(serialized_thread).unwrap_or_else(|_| serde_json::Value::Null);
1318
1319 let rating = match feedback {
1320 ThreadFeedback::Positive => "positive",
1321 ThreadFeedback::Negative => "negative",
1322 };
1323 telemetry::event!(
1324 "Assistant Thread Rated",
1325 rating,
1326 thread_id,
1327 thread_data,
1328 final_project_snapshot
1329 );
1330 client.telemetry().flush_events();
1331
1332 Ok(())
1333 })
1334 }
1335
1336 /// Create a snapshot of the current project state including git information and unsaved buffers.
1337 fn project_snapshot(
1338 project: Entity<Project>,
1339 cx: &mut Context<Self>,
1340 ) -> Task<Arc<ProjectSnapshot>> {
1341 let git_store = project.read(cx).git_store().clone();
1342 let worktree_snapshots: Vec<_> = project
1343 .read(cx)
1344 .visible_worktrees(cx)
1345 .map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
1346 .collect();
1347
1348 cx.spawn(async move |_, cx| {
1349 let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
1350
1351 let mut unsaved_buffers = Vec::new();
1352 cx.update(|app_cx| {
1353 let buffer_store = project.read(app_cx).buffer_store();
1354 for buffer_handle in buffer_store.read(app_cx).buffers() {
1355 let buffer = buffer_handle.read(app_cx);
1356 if buffer.is_dirty() {
1357 if let Some(file) = buffer.file() {
1358 let path = file.path().to_string_lossy().to_string();
1359 unsaved_buffers.push(path);
1360 }
1361 }
1362 }
1363 })
1364 .ok();
1365
1366 Arc::new(ProjectSnapshot {
1367 worktree_snapshots,
1368 unsaved_buffer_paths: unsaved_buffers,
1369 timestamp: Utc::now(),
1370 })
1371 })
1372 }
1373
1374 fn worktree_snapshot(
1375 worktree: Entity<project::Worktree>,
1376 git_store: Entity<GitStore>,
1377 cx: &App,
1378 ) -> Task<WorktreeSnapshot> {
1379 cx.spawn(async move |cx| {
1380 // Get worktree path and snapshot
1381 let worktree_info = cx.update(|app_cx| {
1382 let worktree = worktree.read(app_cx);
1383 let path = worktree.abs_path().to_string_lossy().to_string();
1384 let snapshot = worktree.snapshot();
1385 (path, snapshot)
1386 });
1387
1388 let Ok((worktree_path, snapshot)) = worktree_info else {
1389 return WorktreeSnapshot {
1390 worktree_path: String::new(),
1391 git_state: None,
1392 };
1393 };
1394
1395 let repo_info = git_store
1396 .update(cx, |git_store, cx| {
1397 git_store
1398 .repositories()
1399 .values()
1400 .find(|repo| repo.read(cx).worktree_id == snapshot.id())
1401 .and_then(|repo| {
1402 let repo = repo.read(cx);
1403 Some((repo.branch().cloned(), repo.local_repository()?))
1404 })
1405 })
1406 .ok()
1407 .flatten();
1408
1409 // Extract git information
1410 let git_state = match repo_info {
1411 None => None,
1412 Some((branch, repo)) => {
1413 let current_branch = branch.map(|branch| branch.name.to_string());
1414 let remote_url = repo.remote_url("origin");
1415 let head_sha = repo.head_sha();
1416
1417 // Get diff asynchronously
1418 let diff = repo
1419 .diff(git::repository::DiffType::HeadToWorktree, cx.clone())
1420 .await
1421 .ok();
1422
1423 Some(GitState {
1424 remote_url,
1425 head_sha,
1426 current_branch,
1427 diff,
1428 })
1429 }
1430 };
1431
1432 WorktreeSnapshot {
1433 worktree_path,
1434 git_state,
1435 }
1436 })
1437 }
1438
1439 pub fn to_markdown(&self, cx: &App) -> Result<String> {
1440 let mut markdown = Vec::new();
1441
1442 if let Some(summary) = self.summary() {
1443 writeln!(markdown, "# {summary}\n")?;
1444 };
1445
1446 for message in self.messages() {
1447 writeln!(
1448 markdown,
1449 "## {role}\n",
1450 role = match message.role {
1451 Role::User => "User",
1452 Role::Assistant => "Assistant",
1453 Role::System => "System",
1454 }
1455 )?;
1456 for segment in &message.segments {
1457 match segment {
1458 MessageSegment::Text(text) => writeln!(markdown, "{}\n", text)?,
1459 MessageSegment::Thinking(text) => {
1460 writeln!(markdown, "<think>{}</think>\n", text)?
1461 }
1462 }
1463 }
1464
1465 for tool_use in self.tool_uses_for_message(message.id, cx) {
1466 writeln!(
1467 markdown,
1468 "**Use Tool: {} ({})**",
1469 tool_use.name, tool_use.id
1470 )?;
1471 writeln!(markdown, "```json")?;
1472 writeln!(
1473 markdown,
1474 "{}",
1475 serde_json::to_string_pretty(&tool_use.input)?
1476 )?;
1477 writeln!(markdown, "```")?;
1478 }
1479
1480 for tool_result in self.tool_results_for_message(message.id) {
1481 write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
1482 if tool_result.is_error {
1483 write!(markdown, " (Error)")?;
1484 }
1485
1486 writeln!(markdown, "**\n")?;
1487 writeln!(markdown, "{}", tool_result.content)?;
1488 }
1489 }
1490
1491 Ok(String::from_utf8_lossy(&markdown).to_string())
1492 }
1493
1494 pub fn action_log(&self) -> &Entity<ActionLog> {
1495 &self.action_log
1496 }
1497
1498 pub fn project(&self) -> &Entity<Project> {
1499 &self.project
1500 }
1501
1502 pub fn cumulative_token_usage(&self) -> TokenUsage {
1503 self.cumulative_token_usage.clone()
1504 }
1505}
1506
1507#[derive(Debug, Clone)]
1508pub enum ThreadError {
1509 PaymentRequired,
1510 MaxMonthlySpendReached,
1511 Message {
1512 header: SharedString,
1513 message: SharedString,
1514 },
1515}
1516
1517#[derive(Debug, Clone)]
1518pub enum ThreadEvent {
1519 ShowError(ThreadError),
1520 StreamedCompletion,
1521 StreamedAssistantText(MessageId, String),
1522 StreamedAssistantThinking(MessageId, String),
1523 DoneStreaming,
1524 MessageAdded(MessageId),
1525 MessageEdited(MessageId),
1526 MessageDeleted(MessageId),
1527 SummaryChanged,
1528 UsePendingTools,
1529 ToolFinished {
1530 #[allow(unused)]
1531 tool_use_id: LanguageModelToolUseId,
1532 /// The pending tool use that corresponds to this tool.
1533 pending_tool_use: Option<PendingToolUse>,
1534 /// Whether the tool was canceled by the user.
1535 canceled: bool,
1536 },
1537 CheckpointChanged,
1538}
1539
1540impl EventEmitter<ThreadEvent> for Thread {}
1541
1542struct PendingCompletion {
1543 id: usize,
1544 _task: Task<()>,
1545}