1use std::sync::Arc;
2
3use anyhow::{Context as _, Result};
4use assistant_tool::ToolWorkingSet;
5use chrono::{DateTime, Utc};
6use collections::{BTreeMap, HashMap, HashSet};
7use futures::StreamExt as _;
8use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task};
9use language_model::{
10 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
11 LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
12 LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
13 Role, StopReason,
14};
15use project::Project;
16use prompt_store::PromptBuilder;
17use scripting_tool::{ScriptingSession, ScriptingTool};
18use serde::{Deserialize, Serialize};
19use util::{post_inc, ResultExt, TryFutureExt as _};
20use uuid::Uuid;
21
22use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
23use crate::thread_store::SavedThread;
24use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState};
25
26#[derive(Debug, Clone, Copy)]
27pub enum RequestKind {
28 Chat,
29 /// Used when summarizing a thread.
30 Summarize,
31}
32
33#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
34pub struct ThreadId(Arc<str>);
35
36impl ThreadId {
37 pub fn new() -> Self {
38 Self(Uuid::new_v4().to_string().into())
39 }
40}
41
42impl std::fmt::Display for ThreadId {
43 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
44 write!(f, "{}", self.0)
45 }
46}
47
48#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
49pub struct MessageId(pub(crate) usize);
50
51impl MessageId {
52 fn post_inc(&mut self) -> Self {
53 Self(post_inc(&mut self.0))
54 }
55}
56
57/// A message in a [`Thread`].
58#[derive(Debug, Clone)]
59pub struct Message {
60 pub id: MessageId,
61 pub role: Role,
62 pub text: String,
63}
64
65/// A thread of conversation with the LLM.
66pub struct Thread {
67 id: ThreadId,
68 updated_at: DateTime<Utc>,
69 summary: Option<SharedString>,
70 pending_summary: Task<Option<()>>,
71 messages: Vec<Message>,
72 next_message_id: MessageId,
73 context: BTreeMap<ContextId, ContextSnapshot>,
74 context_by_message: HashMap<MessageId, Vec<ContextId>>,
75 completion_count: usize,
76 pending_completions: Vec<PendingCompletion>,
77 project: Entity<Project>,
78 prompt_builder: Arc<PromptBuilder>,
79 tools: Arc<ToolWorkingSet>,
80 tool_use: ToolUseState,
81 scripting_session: Entity<ScriptingSession>,
82 scripting_tool_use: ToolUseState,
83}
84
85impl Thread {
86 pub fn new(
87 project: Entity<Project>,
88 tools: Arc<ToolWorkingSet>,
89 prompt_builder: Arc<PromptBuilder>,
90 cx: &mut Context<Self>,
91 ) -> Self {
92 let scripting_session = cx.new(|cx| ScriptingSession::new(project.clone(), cx));
93
94 Self {
95 id: ThreadId::new(),
96 updated_at: Utc::now(),
97 summary: None,
98 pending_summary: Task::ready(None),
99 messages: Vec::new(),
100 next_message_id: MessageId(0),
101 context: BTreeMap::default(),
102 context_by_message: HashMap::default(),
103 completion_count: 0,
104 pending_completions: Vec::new(),
105 project,
106 prompt_builder,
107 tools,
108 tool_use: ToolUseState::new(),
109 scripting_session,
110 scripting_tool_use: ToolUseState::new(),
111 }
112 }
113
114 pub fn from_saved(
115 id: ThreadId,
116 saved: SavedThread,
117 project: Entity<Project>,
118 tools: Arc<ToolWorkingSet>,
119 prompt_builder: Arc<PromptBuilder>,
120 cx: &mut Context<Self>,
121 ) -> Self {
122 let next_message_id = MessageId(
123 saved
124 .messages
125 .last()
126 .map(|message| message.id.0 + 1)
127 .unwrap_or(0),
128 );
129 let tool_use =
130 ToolUseState::from_saved_messages(&saved.messages, |name| name != ScriptingTool::NAME);
131 let scripting_tool_use =
132 ToolUseState::from_saved_messages(&saved.messages, |name| name == ScriptingTool::NAME);
133 let scripting_session = cx.new(|cx| ScriptingSession::new(project.clone(), cx));
134
135 Self {
136 id,
137 updated_at: saved.updated_at,
138 summary: Some(saved.summary),
139 pending_summary: Task::ready(None),
140 messages: saved
141 .messages
142 .into_iter()
143 .map(|message| Message {
144 id: message.id,
145 role: message.role,
146 text: message.text,
147 })
148 .collect(),
149 next_message_id,
150 context: BTreeMap::default(),
151 context_by_message: HashMap::default(),
152 completion_count: 0,
153 pending_completions: Vec::new(),
154 project,
155 prompt_builder,
156 tools,
157 tool_use,
158 scripting_session,
159 scripting_tool_use,
160 }
161 }
162
163 pub fn id(&self) -> &ThreadId {
164 &self.id
165 }
166
167 pub fn is_empty(&self) -> bool {
168 self.messages.is_empty()
169 }
170
171 pub fn updated_at(&self) -> DateTime<Utc> {
172 self.updated_at
173 }
174
175 pub fn touch_updated_at(&mut self) {
176 self.updated_at = Utc::now();
177 }
178
179 pub fn summary(&self) -> Option<SharedString> {
180 self.summary.clone()
181 }
182
183 pub fn summary_or_default(&self) -> SharedString {
184 const DEFAULT: SharedString = SharedString::new_static("New Thread");
185 self.summary.clone().unwrap_or(DEFAULT)
186 }
187
188 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
189 self.summary = Some(summary.into());
190 cx.emit(ThreadEvent::SummaryChanged);
191 }
192
193 pub fn message(&self, id: MessageId) -> Option<&Message> {
194 self.messages.iter().find(|message| message.id == id)
195 }
196
197 pub fn messages(&self) -> impl Iterator<Item = &Message> {
198 self.messages.iter()
199 }
200
201 pub fn is_streaming(&self) -> bool {
202 !self.pending_completions.is_empty()
203 }
204
205 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
206 &self.tools
207 }
208
209 pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
210 let context = self.context_by_message.get(&id)?;
211 Some(
212 context
213 .into_iter()
214 .filter_map(|context_id| self.context.get(&context_id))
215 .cloned()
216 .collect::<Vec<_>>(),
217 )
218 }
219
220 /// Returns whether all of the tool uses have finished running.
221 pub fn all_tools_finished(&self) -> bool {
222 let mut all_pending_tool_uses = self
223 .tool_use
224 .pending_tool_uses()
225 .into_iter()
226 .chain(self.scripting_tool_use.pending_tool_uses());
227
228 // If the only pending tool uses left are the ones with errors, then that means that we've finished running all
229 // of the pending tools.
230 all_pending_tool_uses.all(|tool_use| tool_use.status.is_error())
231 }
232
233 pub fn tool_uses_for_message(&self, id: MessageId) -> Vec<ToolUse> {
234 self.tool_use.tool_uses_for_message(id)
235 }
236
237 pub fn scripting_tool_uses_for_message(&self, id: MessageId) -> Vec<ToolUse> {
238 self.scripting_tool_use.tool_uses_for_message(id)
239 }
240
241 pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
242 self.tool_use.tool_results_for_message(id)
243 }
244
245 pub fn scripting_tool_results_for_message(
246 &self,
247 id: MessageId,
248 ) -> Vec<&LanguageModelToolResult> {
249 self.scripting_tool_use.tool_results_for_message(id)
250 }
251
252 pub fn scripting_changed_buffers<'a>(
253 &self,
254 cx: &'a App,
255 ) -> impl ExactSizeIterator<Item = &'a Entity<language::Buffer>> {
256 self.scripting_session.read(cx).changed_buffers()
257 }
258
259 pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
260 self.tool_use.message_has_tool_results(message_id)
261 }
262
263 pub fn message_has_scripting_tool_results(&self, message_id: MessageId) -> bool {
264 self.scripting_tool_use.message_has_tool_results(message_id)
265 }
266
267 pub fn insert_user_message(
268 &mut self,
269 text: impl Into<String>,
270 context: Vec<ContextSnapshot>,
271 cx: &mut Context<Self>,
272 ) -> MessageId {
273 let message_id = self.insert_message(Role::User, text, cx);
274 let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
275 self.context
276 .extend(context.into_iter().map(|context| (context.id, context)));
277 self.context_by_message.insert(message_id, context_ids);
278 message_id
279 }
280
281 pub fn insert_message(
282 &mut self,
283 role: Role,
284 text: impl Into<String>,
285 cx: &mut Context<Self>,
286 ) -> MessageId {
287 let id = self.next_message_id.post_inc();
288 self.messages.push(Message {
289 id,
290 role,
291 text: text.into(),
292 });
293 self.touch_updated_at();
294 cx.emit(ThreadEvent::MessageAdded(id));
295 id
296 }
297
298 pub fn edit_message(
299 &mut self,
300 id: MessageId,
301 new_role: Role,
302 new_text: String,
303 cx: &mut Context<Self>,
304 ) -> bool {
305 let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
306 return false;
307 };
308 message.role = new_role;
309 message.text = new_text;
310 self.touch_updated_at();
311 cx.emit(ThreadEvent::MessageEdited(id));
312 true
313 }
314
315 pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
316 let Some(index) = self.messages.iter().position(|message| message.id == id) else {
317 return false;
318 };
319 self.messages.remove(index);
320 self.context_by_message.remove(&id);
321 self.touch_updated_at();
322 cx.emit(ThreadEvent::MessageDeleted(id));
323 true
324 }
325
326 /// Returns the representation of this [`Thread`] in a textual form.
327 ///
328 /// This is the representation we use when attaching a thread as context to another thread.
329 pub fn text(&self) -> String {
330 let mut text = String::new();
331
332 for message in &self.messages {
333 text.push_str(match message.role {
334 language_model::Role::User => "User:",
335 language_model::Role::Assistant => "Assistant:",
336 language_model::Role::System => "System:",
337 });
338 text.push('\n');
339
340 text.push_str(&message.text);
341 text.push('\n');
342 }
343
344 text
345 }
346
347 pub fn send_to_model(
348 &mut self,
349 model: Arc<dyn LanguageModel>,
350 request_kind: RequestKind,
351 cx: &mut Context<Self>,
352 ) {
353 let mut request = self.to_completion_request(request_kind, cx);
354 request.tools = {
355 let mut tools = Vec::new();
356
357 if self.tools.is_scripting_tool_enabled() {
358 tools.push(LanguageModelRequestTool {
359 name: ScriptingTool::NAME.into(),
360 description: ScriptingTool::DESCRIPTION.into(),
361 input_schema: ScriptingTool::input_schema(),
362 });
363 }
364
365 tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
366 LanguageModelRequestTool {
367 name: tool.name(),
368 description: tool.description(),
369 input_schema: tool.input_schema(),
370 }
371 }));
372
373 tools
374 };
375
376 self.stream_completion(request, model, cx);
377 }
378
379 pub fn to_completion_request(
380 &self,
381 request_kind: RequestKind,
382 cx: &App,
383 ) -> LanguageModelRequest {
384 let worktree_root_names = self
385 .project
386 .read(cx)
387 .worktree_root_names(cx)
388 .map(ToString::to_string)
389 .collect::<Vec<_>>();
390 let system_prompt = self
391 .prompt_builder
392 .generate_assistant_system_prompt(worktree_root_names)
393 .context("failed to generate assistant system prompt")
394 .log_err()
395 .unwrap_or_default();
396
397 let mut request = LanguageModelRequest {
398 messages: vec![LanguageModelRequestMessage {
399 role: Role::System,
400 content: vec![MessageContent::Text(system_prompt)],
401 cache: true,
402 }],
403 tools: Vec::new(),
404 stop: Vec::new(),
405 temperature: None,
406 };
407
408 let mut referenced_context_ids = HashSet::default();
409
410 for message in &self.messages {
411 if let Some(context_ids) = self.context_by_message.get(&message.id) {
412 referenced_context_ids.extend(context_ids);
413 }
414
415 let mut request_message = LanguageModelRequestMessage {
416 role: message.role,
417 content: Vec::new(),
418 cache: false,
419 };
420
421 match request_kind {
422 RequestKind::Chat => {
423 self.tool_use
424 .attach_tool_results(message.id, &mut request_message);
425 self.scripting_tool_use
426 .attach_tool_results(message.id, &mut request_message);
427 }
428 RequestKind::Summarize => {
429 // We don't care about tool use during summarization.
430 }
431 }
432
433 if !message.text.is_empty() {
434 request_message
435 .content
436 .push(MessageContent::Text(message.text.clone()));
437 }
438
439 match request_kind {
440 RequestKind::Chat => {
441 self.tool_use
442 .attach_tool_uses(message.id, &mut request_message);
443 self.scripting_tool_use
444 .attach_tool_uses(message.id, &mut request_message);
445 }
446 RequestKind::Summarize => {
447 // We don't care about tool use during summarization.
448 }
449 };
450
451 request.messages.push(request_message);
452 }
453
454 if !referenced_context_ids.is_empty() {
455 let mut context_message = LanguageModelRequestMessage {
456 role: Role::User,
457 content: Vec::new(),
458 cache: false,
459 };
460
461 let referenced_context = referenced_context_ids
462 .into_iter()
463 .filter_map(|context_id| self.context.get(context_id))
464 .cloned();
465 attach_context_to_message(&mut context_message, referenced_context);
466
467 request.messages.push(context_message);
468 }
469
470 request
471 }
472
473 pub fn stream_completion(
474 &mut self,
475 request: LanguageModelRequest,
476 model: Arc<dyn LanguageModel>,
477 cx: &mut Context<Self>,
478 ) {
479 let pending_completion_id = post_inc(&mut self.completion_count);
480
481 let task = cx.spawn(|thread, mut cx| async move {
482 let stream = model.stream_completion(request, &cx);
483 let stream_completion = async {
484 let mut events = stream.await?;
485 let mut stop_reason = StopReason::EndTurn;
486
487 while let Some(event) = events.next().await {
488 let event = event?;
489
490 thread.update(&mut cx, |thread, cx| {
491 match event {
492 LanguageModelCompletionEvent::StartMessage { .. } => {
493 thread.insert_message(Role::Assistant, String::new(), cx);
494 }
495 LanguageModelCompletionEvent::Stop(reason) => {
496 stop_reason = reason;
497 }
498 LanguageModelCompletionEvent::Text(chunk) => {
499 if let Some(last_message) = thread.messages.last_mut() {
500 if last_message.role == Role::Assistant {
501 last_message.text.push_str(&chunk);
502 cx.emit(ThreadEvent::StreamedAssistantText(
503 last_message.id,
504 chunk,
505 ));
506 } else {
507 // If we won't have an Assistant message yet, assume this chunk marks the beginning
508 // of a new Assistant response.
509 //
510 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
511 // will result in duplicating the text of the chunk in the rendered Markdown.
512 thread.insert_message(Role::Assistant, chunk, cx);
513 };
514 }
515 }
516 LanguageModelCompletionEvent::ToolUse(tool_use) => {
517 if let Some(last_assistant_message) = thread
518 .messages
519 .iter()
520 .rfind(|message| message.role == Role::Assistant)
521 {
522 if tool_use.name.as_ref() == ScriptingTool::NAME {
523 thread
524 .scripting_tool_use
525 .request_tool_use(last_assistant_message.id, tool_use);
526 } else {
527 thread
528 .tool_use
529 .request_tool_use(last_assistant_message.id, tool_use);
530 }
531 }
532 }
533 }
534
535 thread.touch_updated_at();
536 cx.emit(ThreadEvent::StreamedCompletion);
537 cx.notify();
538 })?;
539
540 smol::future::yield_now().await;
541 }
542
543 thread.update(&mut cx, |thread, cx| {
544 thread
545 .pending_completions
546 .retain(|completion| completion.id != pending_completion_id);
547
548 if thread.summary.is_none() && thread.messages.len() >= 2 {
549 thread.summarize(cx);
550 }
551 })?;
552
553 anyhow::Ok(stop_reason)
554 };
555
556 let result = stream_completion.await;
557
558 thread
559 .update(&mut cx, |thread, cx| match result.as_ref() {
560 Ok(stop_reason) => match stop_reason {
561 StopReason::ToolUse => {
562 cx.emit(ThreadEvent::UsePendingTools);
563 }
564 StopReason::EndTurn => {}
565 StopReason::MaxTokens => {}
566 },
567 Err(error) => {
568 if error.is::<PaymentRequiredError>() {
569 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
570 } else if error.is::<MaxMonthlySpendReachedError>() {
571 cx.emit(ThreadEvent::ShowError(ThreadError::MaxMonthlySpendReached));
572 } else {
573 let error_message = error
574 .chain()
575 .map(|err| err.to_string())
576 .collect::<Vec<_>>()
577 .join("\n");
578 cx.emit(ThreadEvent::ShowError(ThreadError::Message(
579 SharedString::from(error_message.clone()),
580 )));
581 }
582
583 thread.cancel_last_completion();
584 }
585 })
586 .ok();
587 });
588
589 self.pending_completions.push(PendingCompletion {
590 id: pending_completion_id,
591 _task: task,
592 });
593 }
594
595 pub fn summarize(&mut self, cx: &mut Context<Self>) {
596 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
597 return;
598 };
599 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
600 return;
601 };
602
603 if !provider.is_authenticated(cx) {
604 return;
605 }
606
607 let mut request = self.to_completion_request(RequestKind::Summarize, cx);
608 request.messages.push(LanguageModelRequestMessage {
609 role: Role::User,
610 content: vec![
611 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
612 .into(),
613 ],
614 cache: false,
615 });
616
617 self.pending_summary = cx.spawn(|this, mut cx| {
618 async move {
619 let stream = model.stream_completion_text(request, &cx);
620 let mut messages = stream.await?;
621
622 let mut new_summary = String::new();
623 while let Some(message) = messages.stream.next().await {
624 let text = message?;
625 let mut lines = text.lines();
626 new_summary.extend(lines.next());
627
628 // Stop if the LLM generated multiple lines.
629 if lines.next().is_some() {
630 break;
631 }
632 }
633
634 this.update(&mut cx, |this, cx| {
635 if !new_summary.is_empty() {
636 this.summary = Some(new_summary.into());
637 }
638
639 cx.emit(ThreadEvent::SummaryChanged);
640 })?;
641
642 anyhow::Ok(())
643 }
644 .log_err()
645 });
646 }
647
648 pub fn use_pending_tools(&mut self, cx: &mut Context<Self>) {
649 let request = self.to_completion_request(RequestKind::Chat, cx);
650 let pending_tool_uses = self
651 .tool_use
652 .pending_tool_uses()
653 .into_iter()
654 .filter(|tool_use| tool_use.status.is_idle())
655 .cloned()
656 .collect::<Vec<_>>();
657
658 for tool_use in pending_tool_uses {
659 if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
660 let task = tool.run(tool_use.input, &request.messages, self.project.clone(), cx);
661
662 self.insert_tool_output(tool_use.id.clone(), task, cx);
663 }
664 }
665
666 let pending_scripting_tool_uses = self
667 .scripting_tool_use
668 .pending_tool_uses()
669 .into_iter()
670 .filter(|tool_use| tool_use.status.is_idle())
671 .cloned()
672 .collect::<Vec<_>>();
673
674 for scripting_tool_use in pending_scripting_tool_uses {
675 let task = match ScriptingTool::deserialize_input(scripting_tool_use.input) {
676 Err(err) => Task::ready(Err(err.into())),
677 Ok(input) => {
678 let (script_id, script_task) =
679 self.scripting_session.update(cx, move |session, cx| {
680 session.run_script(input.lua_script, cx)
681 });
682
683 let session = self.scripting_session.clone();
684 cx.spawn(|_, cx| async move {
685 script_task.await;
686
687 let message = session.read_with(&cx, |session, _cx| {
688 // Using a id to get the script output seems impractical.
689 // Why not just include it in the Task result?
690 // This is because we'll later report the script state as it runs,
691 session
692 .get(script_id)
693 .output_message_for_llm()
694 .expect("Script shouldn't still be running")
695 })?;
696
697 Ok(message)
698 })
699 }
700 };
701
702 self.insert_scripting_tool_output(scripting_tool_use.id.clone(), task, cx);
703 }
704 }
705
706 pub fn insert_tool_output(
707 &mut self,
708 tool_use_id: LanguageModelToolUseId,
709 output: Task<Result<String>>,
710 cx: &mut Context<Self>,
711 ) {
712 let insert_output_task = cx.spawn(|thread, mut cx| {
713 let tool_use_id = tool_use_id.clone();
714 async move {
715 let output = output.await;
716 thread
717 .update(&mut cx, |thread, cx| {
718 let pending_tool_use = thread
719 .tool_use
720 .insert_tool_output(tool_use_id.clone(), output);
721
722 cx.emit(ThreadEvent::ToolFinished {
723 tool_use_id,
724 pending_tool_use,
725 });
726 })
727 .ok();
728 }
729 });
730
731 self.tool_use
732 .run_pending_tool(tool_use_id, insert_output_task);
733 }
734
735 pub fn insert_scripting_tool_output(
736 &mut self,
737 tool_use_id: LanguageModelToolUseId,
738 output: Task<Result<String>>,
739 cx: &mut Context<Self>,
740 ) {
741 let insert_output_task = cx.spawn(|thread, mut cx| {
742 let tool_use_id = tool_use_id.clone();
743 async move {
744 let output = output.await;
745 thread
746 .update(&mut cx, |thread, cx| {
747 let pending_tool_use = thread
748 .scripting_tool_use
749 .insert_tool_output(tool_use_id.clone(), output);
750
751 cx.emit(ThreadEvent::ToolFinished {
752 tool_use_id,
753 pending_tool_use,
754 });
755 })
756 .ok();
757 }
758 });
759
760 self.scripting_tool_use
761 .run_pending_tool(tool_use_id, insert_output_task);
762 }
763
764 pub fn send_tool_results_to_model(
765 &mut self,
766 model: Arc<dyn LanguageModel>,
767 cx: &mut Context<Self>,
768 ) {
769 // Insert a user message to contain the tool results.
770 self.insert_user_message(
771 // TODO: Sending up a user message without any content results in the model sending back
772 // responses that also don't have any content. We currently don't handle this case well,
773 // so for now we provide some text to keep the model on track.
774 "Here are the tool results.",
775 Vec::new(),
776 cx,
777 );
778 self.send_to_model(model, RequestKind::Chat, cx);
779 }
780
781 /// Cancels the last pending completion, if there are any pending.
782 ///
783 /// Returns whether a completion was canceled.
784 pub fn cancel_last_completion(&mut self) -> bool {
785 if let Some(_last_completion) = self.pending_completions.pop() {
786 true
787 } else {
788 false
789 }
790 }
791}
792
793#[derive(Debug, Clone)]
794pub enum ThreadError {
795 PaymentRequired,
796 MaxMonthlySpendReached,
797 Message(SharedString),
798}
799
800#[derive(Debug, Clone)]
801pub enum ThreadEvent {
802 ShowError(ThreadError),
803 StreamedCompletion,
804 StreamedAssistantText(MessageId, String),
805 MessageAdded(MessageId),
806 MessageEdited(MessageId),
807 MessageDeleted(MessageId),
808 SummaryChanged,
809 UsePendingTools,
810 ToolFinished {
811 #[allow(unused)]
812 tool_use_id: LanguageModelToolUseId,
813 /// The pending tool use that corresponds to this tool.
814 pending_tool_use: Option<PendingToolUse>,
815 },
816}
817
818impl EventEmitter<ThreadEvent> for Thread {}
819
820struct PendingCompletion {
821 id: usize,
822 _task: Task<()>,
823}