thread.rs

  1use std::io::Write;
  2use std::sync::Arc;
  3
  4use anyhow::{Context as _, Result};
  5use assistant_tool::ToolWorkingSet;
  6use chrono::{DateTime, Utc};
  7use collections::{BTreeMap, HashMap, HashSet};
  8use futures::StreamExt as _;
  9use gpui::{App, AppContext, Context, Entity, EventEmitter, SharedString, Task};
 10use language_model::{
 11    LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
 12    LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
 13    LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
 14    Role, StopReason, TokenUsage,
 15};
 16use project::Project;
 17use prompt_store::{AssistantSystemPromptWorktree, PromptBuilder};
 18use scripting_tool::{ScriptingSession, ScriptingTool};
 19use serde::{Deserialize, Serialize};
 20use util::{post_inc, ResultExt, TryFutureExt as _};
 21use uuid::Uuid;
 22
 23use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
 24use crate::thread_store::SavedThread;
 25use crate::tool_use::{PendingToolUse, ToolUse, ToolUseState};
 26
 27#[derive(Debug, Clone, Copy)]
 28pub enum RequestKind {
 29    Chat,
 30    /// Used when summarizing a thread.
 31    Summarize,
 32}
 33
 34#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
 35pub struct ThreadId(Arc<str>);
 36
 37impl ThreadId {
 38    pub fn new() -> Self {
 39        Self(Uuid::new_v4().to_string().into())
 40    }
 41}
 42
 43impl std::fmt::Display for ThreadId {
 44    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
 45        write!(f, "{}", self.0)
 46    }
 47}
 48
 49#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
 50pub struct MessageId(pub(crate) usize);
 51
 52impl MessageId {
 53    fn post_inc(&mut self) -> Self {
 54        Self(post_inc(&mut self.0))
 55    }
 56}
 57
 58/// A message in a [`Thread`].
 59#[derive(Debug, Clone)]
 60pub struct Message {
 61    pub id: MessageId,
 62    pub role: Role,
 63    pub text: String,
 64}
 65
 66/// A thread of conversation with the LLM.
 67pub struct Thread {
 68    id: ThreadId,
 69    updated_at: DateTime<Utc>,
 70    summary: Option<SharedString>,
 71    pending_summary: Task<Option<()>>,
 72    messages: Vec<Message>,
 73    next_message_id: MessageId,
 74    context: BTreeMap<ContextId, ContextSnapshot>,
 75    context_by_message: HashMap<MessageId, Vec<ContextId>>,
 76    completion_count: usize,
 77    pending_completions: Vec<PendingCompletion>,
 78    project: Entity<Project>,
 79    prompt_builder: Arc<PromptBuilder>,
 80    tools: Arc<ToolWorkingSet>,
 81    tool_use: ToolUseState,
 82    scripting_session: Entity<ScriptingSession>,
 83    scripting_tool_use: ToolUseState,
 84    cumulative_token_usage: TokenUsage,
 85}
 86
 87impl Thread {
 88    pub fn new(
 89        project: Entity<Project>,
 90        tools: Arc<ToolWorkingSet>,
 91        prompt_builder: Arc<PromptBuilder>,
 92        cx: &mut Context<Self>,
 93    ) -> Self {
 94        let scripting_session = cx.new(|cx| ScriptingSession::new(project.clone(), cx));
 95
 96        Self {
 97            id: ThreadId::new(),
 98            updated_at: Utc::now(),
 99            summary: None,
100            pending_summary: Task::ready(None),
101            messages: Vec::new(),
102            next_message_id: MessageId(0),
103            context: BTreeMap::default(),
104            context_by_message: HashMap::default(),
105            completion_count: 0,
106            pending_completions: Vec::new(),
107            project,
108            prompt_builder,
109            tools,
110            tool_use: ToolUseState::new(),
111            scripting_session,
112            scripting_tool_use: ToolUseState::new(),
113            cumulative_token_usage: TokenUsage::default(),
114        }
115    }
116
117    pub fn from_saved(
118        id: ThreadId,
119        saved: SavedThread,
120        project: Entity<Project>,
121        tools: Arc<ToolWorkingSet>,
122        prompt_builder: Arc<PromptBuilder>,
123        cx: &mut Context<Self>,
124    ) -> Self {
125        let next_message_id = MessageId(
126            saved
127                .messages
128                .last()
129                .map(|message| message.id.0 + 1)
130                .unwrap_or(0),
131        );
132        let tool_use =
133            ToolUseState::from_saved_messages(&saved.messages, |name| name != ScriptingTool::NAME);
134        let scripting_tool_use =
135            ToolUseState::from_saved_messages(&saved.messages, |name| name == ScriptingTool::NAME);
136        let scripting_session = cx.new(|cx| ScriptingSession::new(project.clone(), cx));
137
138        Self {
139            id,
140            updated_at: saved.updated_at,
141            summary: Some(saved.summary),
142            pending_summary: Task::ready(None),
143            messages: saved
144                .messages
145                .into_iter()
146                .map(|message| Message {
147                    id: message.id,
148                    role: message.role,
149                    text: message.text,
150                })
151                .collect(),
152            next_message_id,
153            context: BTreeMap::default(),
154            context_by_message: HashMap::default(),
155            completion_count: 0,
156            pending_completions: Vec::new(),
157            project,
158            prompt_builder,
159            tools,
160            tool_use,
161            scripting_session,
162            scripting_tool_use,
163            // TODO: persist token usage?
164            cumulative_token_usage: TokenUsage::default(),
165        }
166    }
167
168    pub fn id(&self) -> &ThreadId {
169        &self.id
170    }
171
172    pub fn is_empty(&self) -> bool {
173        self.messages.is_empty()
174    }
175
176    pub fn updated_at(&self) -> DateTime<Utc> {
177        self.updated_at
178    }
179
180    pub fn touch_updated_at(&mut self) {
181        self.updated_at = Utc::now();
182    }
183
184    pub fn summary(&self) -> Option<SharedString> {
185        self.summary.clone()
186    }
187
188    pub fn summary_or_default(&self) -> SharedString {
189        const DEFAULT: SharedString = SharedString::new_static("New Thread");
190        self.summary.clone().unwrap_or(DEFAULT)
191    }
192
193    pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
194        self.summary = Some(summary.into());
195        cx.emit(ThreadEvent::SummaryChanged);
196    }
197
198    pub fn message(&self, id: MessageId) -> Option<&Message> {
199        self.messages.iter().find(|message| message.id == id)
200    }
201
202    pub fn messages(&self) -> impl Iterator<Item = &Message> {
203        self.messages.iter()
204    }
205
206    pub fn is_streaming(&self) -> bool {
207        !self.pending_completions.is_empty() || !self.all_tools_finished()
208    }
209
210    pub fn tools(&self) -> &Arc<ToolWorkingSet> {
211        &self.tools
212    }
213
214    pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
215        let context = self.context_by_message.get(&id)?;
216        Some(
217            context
218                .into_iter()
219                .filter_map(|context_id| self.context.get(&context_id))
220                .cloned()
221                .collect::<Vec<_>>(),
222        )
223    }
224
225    /// Returns whether all of the tool uses have finished running.
226    pub fn all_tools_finished(&self) -> bool {
227        let mut all_pending_tool_uses = self
228            .tool_use
229            .pending_tool_uses()
230            .into_iter()
231            .chain(self.scripting_tool_use.pending_tool_uses());
232
233        // If the only pending tool uses left are the ones with errors, then that means that we've finished running all
234        // of the pending tools.
235        all_pending_tool_uses.all(|tool_use| tool_use.status.is_error())
236    }
237
238    pub fn tool_uses_for_message(&self, id: MessageId) -> Vec<ToolUse> {
239        self.tool_use.tool_uses_for_message(id)
240    }
241
242    pub fn scripting_tool_uses_for_message(&self, id: MessageId) -> Vec<ToolUse> {
243        self.scripting_tool_use.tool_uses_for_message(id)
244    }
245
246    pub fn tool_results_for_message(&self, id: MessageId) -> Vec<&LanguageModelToolResult> {
247        self.tool_use.tool_results_for_message(id)
248    }
249
250    pub fn scripting_tool_results_for_message(
251        &self,
252        id: MessageId,
253    ) -> Vec<&LanguageModelToolResult> {
254        self.scripting_tool_use.tool_results_for_message(id)
255    }
256
257    pub fn scripting_changed_buffers<'a>(
258        &self,
259        cx: &'a App,
260    ) -> impl ExactSizeIterator<Item = &'a Entity<language::Buffer>> {
261        self.scripting_session.read(cx).changed_buffers()
262    }
263
264    pub fn message_has_tool_results(&self, message_id: MessageId) -> bool {
265        self.tool_use.message_has_tool_results(message_id)
266    }
267
268    pub fn message_has_scripting_tool_results(&self, message_id: MessageId) -> bool {
269        self.scripting_tool_use.message_has_tool_results(message_id)
270    }
271
272    pub fn insert_user_message(
273        &mut self,
274        text: impl Into<String>,
275        context: Vec<ContextSnapshot>,
276        cx: &mut Context<Self>,
277    ) -> MessageId {
278        let message_id = self.insert_message(Role::User, text, cx);
279        let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
280        self.context
281            .extend(context.into_iter().map(|context| (context.id, context)));
282        self.context_by_message.insert(message_id, context_ids);
283        message_id
284    }
285
286    pub fn insert_message(
287        &mut self,
288        role: Role,
289        text: impl Into<String>,
290        cx: &mut Context<Self>,
291    ) -> MessageId {
292        let id = self.next_message_id.post_inc();
293        self.messages.push(Message {
294            id,
295            role,
296            text: text.into(),
297        });
298        self.touch_updated_at();
299        cx.emit(ThreadEvent::MessageAdded(id));
300        id
301    }
302
303    pub fn edit_message(
304        &mut self,
305        id: MessageId,
306        new_role: Role,
307        new_text: String,
308        cx: &mut Context<Self>,
309    ) -> bool {
310        let Some(message) = self.messages.iter_mut().find(|message| message.id == id) else {
311            return false;
312        };
313        message.role = new_role;
314        message.text = new_text;
315        self.touch_updated_at();
316        cx.emit(ThreadEvent::MessageEdited(id));
317        true
318    }
319
320    pub fn delete_message(&mut self, id: MessageId, cx: &mut Context<Self>) -> bool {
321        let Some(index) = self.messages.iter().position(|message| message.id == id) else {
322            return false;
323        };
324        self.messages.remove(index);
325        self.context_by_message.remove(&id);
326        self.touch_updated_at();
327        cx.emit(ThreadEvent::MessageDeleted(id));
328        true
329    }
330
331    /// Returns the representation of this [`Thread`] in a textual form.
332    ///
333    /// This is the representation we use when attaching a thread as context to another thread.
334    pub fn text(&self) -> String {
335        let mut text = String::new();
336
337        for message in &self.messages {
338            text.push_str(match message.role {
339                language_model::Role::User => "User:",
340                language_model::Role::Assistant => "Assistant:",
341                language_model::Role::System => "System:",
342            });
343            text.push('\n');
344
345            text.push_str(&message.text);
346            text.push('\n');
347        }
348
349        text
350    }
351
352    pub fn send_to_model(
353        &mut self,
354        model: Arc<dyn LanguageModel>,
355        request_kind: RequestKind,
356        cx: &mut Context<Self>,
357    ) {
358        let mut request = self.to_completion_request(request_kind, cx);
359        request.tools = {
360            let mut tools = Vec::new();
361
362            if self.tools.is_scripting_tool_enabled() {
363                tools.push(LanguageModelRequestTool {
364                    name: ScriptingTool::NAME.into(),
365                    description: ScriptingTool::DESCRIPTION.into(),
366                    input_schema: ScriptingTool::input_schema(),
367                });
368            }
369
370            tools.extend(self.tools().enabled_tools(cx).into_iter().map(|tool| {
371                LanguageModelRequestTool {
372                    name: tool.name(),
373                    description: tool.description(),
374                    input_schema: tool.input_schema(),
375                }
376            }));
377
378            tools
379        };
380
381        self.stream_completion(request, model, cx);
382    }
383
384    pub fn to_completion_request(
385        &self,
386        request_kind: RequestKind,
387        cx: &App,
388    ) -> LanguageModelRequest {
389        let worktree_root_names = self
390            .project
391            .read(cx)
392            .visible_worktrees(cx)
393            .map(|worktree| {
394                let worktree = worktree.read(cx);
395                AssistantSystemPromptWorktree {
396                    root_name: worktree.root_name().into(),
397                    abs_path: worktree.abs_path(),
398                }
399            })
400            .collect::<Vec<_>>();
401        let system_prompt = self
402            .prompt_builder
403            .generate_assistant_system_prompt(worktree_root_names)
404            .context("failed to generate assistant system prompt")
405            .log_err()
406            .unwrap_or_default();
407
408        let mut request = LanguageModelRequest {
409            messages: vec![LanguageModelRequestMessage {
410                role: Role::System,
411                content: vec![MessageContent::Text(system_prompt)],
412                cache: true,
413            }],
414            tools: Vec::new(),
415            stop: Vec::new(),
416            temperature: None,
417        };
418
419        let mut referenced_context_ids = HashSet::default();
420
421        for message in &self.messages {
422            if let Some(context_ids) = self.context_by_message.get(&message.id) {
423                referenced_context_ids.extend(context_ids);
424            }
425
426            let mut request_message = LanguageModelRequestMessage {
427                role: message.role,
428                content: Vec::new(),
429                cache: false,
430            };
431
432            match request_kind {
433                RequestKind::Chat => {
434                    self.tool_use
435                        .attach_tool_results(message.id, &mut request_message);
436                    self.scripting_tool_use
437                        .attach_tool_results(message.id, &mut request_message);
438                }
439                RequestKind::Summarize => {
440                    // We don't care about tool use during summarization.
441                }
442            }
443
444            if !message.text.is_empty() {
445                request_message
446                    .content
447                    .push(MessageContent::Text(message.text.clone()));
448            }
449
450            match request_kind {
451                RequestKind::Chat => {
452                    self.tool_use
453                        .attach_tool_uses(message.id, &mut request_message);
454                    self.scripting_tool_use
455                        .attach_tool_uses(message.id, &mut request_message);
456                }
457                RequestKind::Summarize => {
458                    // We don't care about tool use during summarization.
459                }
460            };
461
462            request.messages.push(request_message);
463        }
464
465        if !referenced_context_ids.is_empty() {
466            let mut context_message = LanguageModelRequestMessage {
467                role: Role::User,
468                content: Vec::new(),
469                cache: false,
470            };
471
472            let referenced_context = referenced_context_ids
473                .into_iter()
474                .filter_map(|context_id| self.context.get(context_id))
475                .cloned();
476            attach_context_to_message(&mut context_message, referenced_context);
477
478            request.messages.push(context_message);
479        }
480
481        request
482    }
483
484    pub fn stream_completion(
485        &mut self,
486        request: LanguageModelRequest,
487        model: Arc<dyn LanguageModel>,
488        cx: &mut Context<Self>,
489    ) {
490        let pending_completion_id = post_inc(&mut self.completion_count);
491
492        let task = cx.spawn(|thread, mut cx| async move {
493            let stream = model.stream_completion(request, &cx);
494            let stream_completion = async {
495                let mut events = stream.await?;
496                let mut stop_reason = StopReason::EndTurn;
497                let mut current_token_usage = TokenUsage::default();
498
499                while let Some(event) = events.next().await {
500                    let event = event?;
501
502                    thread.update(&mut cx, |thread, cx| {
503                        match event {
504                            LanguageModelCompletionEvent::StartMessage { .. } => {
505                                thread.insert_message(Role::Assistant, String::new(), cx);
506                            }
507                            LanguageModelCompletionEvent::Stop(reason) => {
508                                stop_reason = reason;
509                            }
510                            LanguageModelCompletionEvent::UsageUpdate(token_usage) => {
511                                thread.cumulative_token_usage =
512                                    thread.cumulative_token_usage.clone() + token_usage.clone()
513                                        - current_token_usage.clone();
514                                current_token_usage = token_usage;
515                            }
516                            LanguageModelCompletionEvent::Text(chunk) => {
517                                if let Some(last_message) = thread.messages.last_mut() {
518                                    if last_message.role == Role::Assistant {
519                                        last_message.text.push_str(&chunk);
520                                        cx.emit(ThreadEvent::StreamedAssistantText(
521                                            last_message.id,
522                                            chunk,
523                                        ));
524                                    } else {
525                                        // If we won't have an Assistant message yet, assume this chunk marks the beginning
526                                        // of a new Assistant response.
527                                        //
528                                        // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
529                                        // will result in duplicating the text of the chunk in the rendered Markdown.
530                                        thread.insert_message(Role::Assistant, chunk, cx);
531                                    };
532                                }
533                            }
534                            LanguageModelCompletionEvent::ToolUse(tool_use) => {
535                                if let Some(last_assistant_message) = thread
536                                    .messages
537                                    .iter()
538                                    .rfind(|message| message.role == Role::Assistant)
539                                {
540                                    if tool_use.name.as_ref() == ScriptingTool::NAME {
541                                        thread
542                                            .scripting_tool_use
543                                            .request_tool_use(last_assistant_message.id, tool_use);
544                                    } else {
545                                        thread
546                                            .tool_use
547                                            .request_tool_use(last_assistant_message.id, tool_use);
548                                    }
549                                }
550                            }
551                        }
552
553                        thread.touch_updated_at();
554                        cx.emit(ThreadEvent::StreamedCompletion);
555                        cx.notify();
556                    })?;
557
558                    smol::future::yield_now().await;
559                }
560
561                thread.update(&mut cx, |thread, cx| {
562                    thread
563                        .pending_completions
564                        .retain(|completion| completion.id != pending_completion_id);
565
566                    if thread.summary.is_none() && thread.messages.len() >= 2 {
567                        thread.summarize(cx);
568                    }
569                })?;
570
571                anyhow::Ok(stop_reason)
572            };
573
574            let result = stream_completion.await;
575
576            thread
577                .update(&mut cx, |thread, cx| match result.as_ref() {
578                    Ok(stop_reason) => match stop_reason {
579                        StopReason::ToolUse => {
580                            cx.emit(ThreadEvent::UsePendingTools);
581                        }
582                        StopReason::EndTurn => {}
583                        StopReason::MaxTokens => {}
584                    },
585                    Err(error) => {
586                        if error.is::<PaymentRequiredError>() {
587                            cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
588                        } else if error.is::<MaxMonthlySpendReachedError>() {
589                            cx.emit(ThreadEvent::ShowError(ThreadError::MaxMonthlySpendReached));
590                        } else {
591                            let error_message = error
592                                .chain()
593                                .map(|err| err.to_string())
594                                .collect::<Vec<_>>()
595                                .join("\n");
596                            cx.emit(ThreadEvent::ShowError(ThreadError::Message(
597                                SharedString::from(error_message.clone()),
598                            )));
599                        }
600
601                        thread.cancel_last_completion();
602                    }
603                })
604                .ok();
605        });
606
607        self.pending_completions.push(PendingCompletion {
608            id: pending_completion_id,
609            _task: task,
610        });
611    }
612
613    pub fn summarize(&mut self, cx: &mut Context<Self>) {
614        let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
615            return;
616        };
617        let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
618            return;
619        };
620
621        if !provider.is_authenticated(cx) {
622            return;
623        }
624
625        let mut request = self.to_completion_request(RequestKind::Summarize, cx);
626        request.messages.push(LanguageModelRequestMessage {
627            role: Role::User,
628            content: vec![
629                "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
630                    .into(),
631            ],
632            cache: false,
633        });
634
635        self.pending_summary = cx.spawn(|this, mut cx| {
636            async move {
637                let stream = model.stream_completion_text(request, &cx);
638                let mut messages = stream.await?;
639
640                let mut new_summary = String::new();
641                while let Some(message) = messages.stream.next().await {
642                    let text = message?;
643                    let mut lines = text.lines();
644                    new_summary.extend(lines.next());
645
646                    // Stop if the LLM generated multiple lines.
647                    if lines.next().is_some() {
648                        break;
649                    }
650                }
651
652                this.update(&mut cx, |this, cx| {
653                    if !new_summary.is_empty() {
654                        this.summary = Some(new_summary.into());
655                    }
656
657                    cx.emit(ThreadEvent::SummaryChanged);
658                })?;
659
660                anyhow::Ok(())
661            }
662            .log_err()
663        });
664    }
665
666    pub fn use_pending_tools(&mut self, cx: &mut Context<Self>) {
667        let request = self.to_completion_request(RequestKind::Chat, cx);
668        let pending_tool_uses = self
669            .tool_use
670            .pending_tool_uses()
671            .into_iter()
672            .filter(|tool_use| tool_use.status.is_idle())
673            .cloned()
674            .collect::<Vec<_>>();
675
676        for tool_use in pending_tool_uses {
677            if let Some(tool) = self.tools.tool(&tool_use.name, cx) {
678                let task = tool.run(tool_use.input, &request.messages, self.project.clone(), cx);
679
680                self.insert_tool_output(tool_use.id.clone(), task, cx);
681            }
682        }
683
684        let pending_scripting_tool_uses = self
685            .scripting_tool_use
686            .pending_tool_uses()
687            .into_iter()
688            .filter(|tool_use| tool_use.status.is_idle())
689            .cloned()
690            .collect::<Vec<_>>();
691
692        for scripting_tool_use in pending_scripting_tool_uses {
693            let task = match ScriptingTool::deserialize_input(scripting_tool_use.input) {
694                Err(err) => Task::ready(Err(err.into())),
695                Ok(input) => {
696                    let (script_id, script_task) =
697                        self.scripting_session.update(cx, move |session, cx| {
698                            session.run_script(input.lua_script, cx)
699                        });
700
701                    let session = self.scripting_session.clone();
702                    cx.spawn(|_, cx| async move {
703                        script_task.await;
704
705                        let message = session.read_with(&cx, |session, _cx| {
706                            // Using a id to get the script output seems impractical.
707                            // Why not just include it in the Task result?
708                            // This is because we'll later report the script state as it runs,
709                            session
710                                .get(script_id)
711                                .output_message_for_llm()
712                                .expect("Script shouldn't still be running")
713                        })?;
714
715                        Ok(message)
716                    })
717                }
718            };
719
720            self.insert_scripting_tool_output(scripting_tool_use.id.clone(), task, cx);
721        }
722    }
723
724    pub fn insert_tool_output(
725        &mut self,
726        tool_use_id: LanguageModelToolUseId,
727        output: Task<Result<String>>,
728        cx: &mut Context<Self>,
729    ) {
730        let insert_output_task = cx.spawn(|thread, mut cx| {
731            let tool_use_id = tool_use_id.clone();
732            async move {
733                let output = output.await;
734                thread
735                    .update(&mut cx, |thread, cx| {
736                        let pending_tool_use = thread
737                            .tool_use
738                            .insert_tool_output(tool_use_id.clone(), output);
739
740                        cx.emit(ThreadEvent::ToolFinished {
741                            tool_use_id,
742                            pending_tool_use,
743                        });
744                    })
745                    .ok();
746            }
747        });
748
749        self.tool_use
750            .run_pending_tool(tool_use_id, insert_output_task);
751    }
752
753    pub fn insert_scripting_tool_output(
754        &mut self,
755        tool_use_id: LanguageModelToolUseId,
756        output: Task<Result<String>>,
757        cx: &mut Context<Self>,
758    ) {
759        let insert_output_task = cx.spawn(|thread, mut cx| {
760            let tool_use_id = tool_use_id.clone();
761            async move {
762                let output = output.await;
763                thread
764                    .update(&mut cx, |thread, cx| {
765                        let pending_tool_use = thread
766                            .scripting_tool_use
767                            .insert_tool_output(tool_use_id.clone(), output);
768
769                        cx.emit(ThreadEvent::ToolFinished {
770                            tool_use_id,
771                            pending_tool_use,
772                        });
773                    })
774                    .ok();
775            }
776        });
777
778        self.scripting_tool_use
779            .run_pending_tool(tool_use_id, insert_output_task);
780    }
781
782    pub fn send_tool_results_to_model(
783        &mut self,
784        model: Arc<dyn LanguageModel>,
785        cx: &mut Context<Self>,
786    ) {
787        // Insert a user message to contain the tool results.
788        self.insert_user_message(
789            // TODO: Sending up a user message without any content results in the model sending back
790            // responses that also don't have any content. We currently don't handle this case well,
791            // so for now we provide some text to keep the model on track.
792            "Here are the tool results.",
793            Vec::new(),
794            cx,
795        );
796        self.send_to_model(model, RequestKind::Chat, cx);
797    }
798
799    /// Cancels the last pending completion, if there are any pending.
800    ///
801    /// Returns whether a completion was canceled.
802    pub fn cancel_last_completion(&mut self) -> bool {
803        if let Some(_last_completion) = self.pending_completions.pop() {
804            true
805        } else {
806            false
807        }
808    }
809
810    pub fn to_markdown(&self) -> Result<String> {
811        let mut markdown = Vec::new();
812
813        if let Some(summary) = self.summary() {
814            writeln!(markdown, "# {summary}\n")?;
815        };
816
817        for message in self.messages() {
818            writeln!(
819                markdown,
820                "## {role}\n",
821                role = match message.role {
822                    Role::User => "User",
823                    Role::Assistant => "Assistant",
824                    Role::System => "System",
825                }
826            )?;
827            writeln!(markdown, "{}\n", message.text)?;
828
829            for tool_use in self.tool_uses_for_message(message.id) {
830                writeln!(
831                    markdown,
832                    "**Use Tool: {} ({})**",
833                    tool_use.name, tool_use.id
834                )?;
835                writeln!(markdown, "```json")?;
836                writeln!(
837                    markdown,
838                    "{}",
839                    serde_json::to_string_pretty(&tool_use.input)?
840                )?;
841                writeln!(markdown, "```")?;
842            }
843
844            for tool_result in self.tool_results_for_message(message.id) {
845                write!(markdown, "**Tool Results: {}", tool_result.tool_use_id)?;
846                if tool_result.is_error {
847                    write!(markdown, " (Error)")?;
848                }
849
850                writeln!(markdown, "**\n")?;
851                writeln!(markdown, "{}", tool_result.content)?;
852            }
853        }
854
855        Ok(String::from_utf8_lossy(&markdown).to_string())
856    }
857
858    pub fn cumulative_token_usage(&self) -> TokenUsage {
859        self.cumulative_token_usage.clone()
860    }
861}
862
863#[derive(Debug, Clone)]
864pub enum ThreadError {
865    PaymentRequired,
866    MaxMonthlySpendReached,
867    Message(SharedString),
868}
869
870#[derive(Debug, Clone)]
871pub enum ThreadEvent {
872    ShowError(ThreadError),
873    StreamedCompletion,
874    StreamedAssistantText(MessageId, String),
875    MessageAdded(MessageId),
876    MessageEdited(MessageId),
877    MessageDeleted(MessageId),
878    SummaryChanged,
879    UsePendingTools,
880    ToolFinished {
881        #[allow(unused)]
882        tool_use_id: LanguageModelToolUseId,
883        /// The pending tool use that corresponds to this tool.
884        pending_tool_use: Option<PendingToolUse>,
885    },
886}
887
888impl EventEmitter<ThreadEvent> for Thread {}
889
890struct PendingCompletion {
891    id: usize,
892    _task: Task<()>,
893}