thread.rs

  1use std::sync::Arc;
  2
  3use anyhow::Result;
  4use assistant_tool::ToolWorkingSet;
  5use chrono::{DateTime, Utc};
  6use collections::{BTreeMap, HashMap, HashSet};
  7use futures::future::Shared;
  8use futures::{FutureExt as _, StreamExt as _};
  9use gpui::{App, Context, EventEmitter, SharedString, Task};
 10use language_model::{
 11    LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
 12    LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolUse,
 13    LanguageModelToolUseId, MaxMonthlySpendReachedError, MessageContent, PaymentRequiredError,
 14    Role, StopReason,
 15};
 16use serde::{Deserialize, Serialize};
 17use util::{post_inc, TryFutureExt as _};
 18use uuid::Uuid;
 19
 20use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
 21use crate::thread_store::SavedThread;
 22
 23#[derive(Debug, Clone, Copy)]
 24pub enum RequestKind {
 25    Chat,
 26    /// Used when summarizing a thread.
 27    Summarize,
 28}
 29
 30#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
 31pub struct ThreadId(Arc<str>);
 32
 33impl ThreadId {
 34    pub fn new() -> Self {
 35        Self(Uuid::new_v4().to_string().into())
 36    }
 37}
 38
 39impl std::fmt::Display for ThreadId {
 40    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
 41        write!(f, "{}", self.0)
 42    }
 43}
 44
 45#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
 46pub struct MessageId(usize);
 47
 48impl MessageId {
 49    fn post_inc(&mut self) -> Self {
 50        Self(post_inc(&mut self.0))
 51    }
 52}
 53
 54/// A message in a [`Thread`].
 55#[derive(Debug, Clone)]
 56pub struct Message {
 57    pub id: MessageId,
 58    pub role: Role,
 59    pub text: String,
 60}
 61
 62/// A thread of conversation with the LLM.
 63pub struct Thread {
 64    id: ThreadId,
 65    updated_at: DateTime<Utc>,
 66    summary: Option<SharedString>,
 67    pending_summary: Task<Option<()>>,
 68    messages: Vec<Message>,
 69    next_message_id: MessageId,
 70    context: BTreeMap<ContextId, ContextSnapshot>,
 71    context_by_message: HashMap<MessageId, Vec<ContextId>>,
 72    completion_count: usize,
 73    pending_completions: Vec<PendingCompletion>,
 74    tools: Arc<ToolWorkingSet>,
 75    tool_uses_by_message: HashMap<MessageId, Vec<LanguageModelToolUse>>,
 76    tool_results_by_message: HashMap<MessageId, Vec<LanguageModelToolResult>>,
 77    pending_tool_uses_by_id: HashMap<LanguageModelToolUseId, PendingToolUse>,
 78}
 79
 80impl Thread {
 81    pub fn new(tools: Arc<ToolWorkingSet>, _cx: &mut Context<Self>) -> Self {
 82        Self {
 83            id: ThreadId::new(),
 84            updated_at: Utc::now(),
 85            summary: None,
 86            pending_summary: Task::ready(None),
 87            messages: Vec::new(),
 88            next_message_id: MessageId(0),
 89            context: BTreeMap::default(),
 90            context_by_message: HashMap::default(),
 91            completion_count: 0,
 92            pending_completions: Vec::new(),
 93            tools,
 94            tool_uses_by_message: HashMap::default(),
 95            tool_results_by_message: HashMap::default(),
 96            pending_tool_uses_by_id: HashMap::default(),
 97        }
 98    }
 99
100    pub fn from_saved(
101        id: ThreadId,
102        saved: SavedThread,
103        tools: Arc<ToolWorkingSet>,
104        _cx: &mut Context<Self>,
105    ) -> Self {
106        let next_message_id = MessageId(saved.messages.len());
107
108        Self {
109            id,
110            updated_at: saved.updated_at,
111            summary: Some(saved.summary),
112            pending_summary: Task::ready(None),
113            messages: saved
114                .messages
115                .into_iter()
116                .map(|message| Message {
117                    id: message.id,
118                    role: message.role,
119                    text: message.text,
120                })
121                .collect(),
122            next_message_id,
123            context: BTreeMap::default(),
124            context_by_message: HashMap::default(),
125            completion_count: 0,
126            pending_completions: Vec::new(),
127            tools,
128            tool_uses_by_message: HashMap::default(),
129            tool_results_by_message: HashMap::default(),
130            pending_tool_uses_by_id: HashMap::default(),
131        }
132    }
133
134    pub fn id(&self) -> &ThreadId {
135        &self.id
136    }
137
138    pub fn is_empty(&self) -> bool {
139        self.messages.is_empty()
140    }
141
142    pub fn updated_at(&self) -> DateTime<Utc> {
143        self.updated_at
144    }
145
146    pub fn touch_updated_at(&mut self) {
147        self.updated_at = Utc::now();
148    }
149
150    pub fn summary(&self) -> Option<SharedString> {
151        self.summary.clone()
152    }
153
154    pub fn summary_or_default(&self) -> SharedString {
155        const DEFAULT: SharedString = SharedString::new_static("New Thread");
156        self.summary.clone().unwrap_or(DEFAULT)
157    }
158
159    pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut Context<Self>) {
160        self.summary = Some(summary.into());
161        cx.emit(ThreadEvent::SummaryChanged);
162    }
163
164    pub fn message(&self, id: MessageId) -> Option<&Message> {
165        self.messages.iter().find(|message| message.id == id)
166    }
167
168    pub fn messages(&self) -> impl Iterator<Item = &Message> {
169        self.messages.iter()
170    }
171
172    pub fn is_streaming(&self) -> bool {
173        !self.pending_completions.is_empty()
174    }
175
176    pub fn tools(&self) -> &Arc<ToolWorkingSet> {
177        &self.tools
178    }
179
180    pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
181        let context = self.context_by_message.get(&id)?;
182        Some(
183            context
184                .into_iter()
185                .filter_map(|context_id| self.context.get(&context_id))
186                .cloned()
187                .collect::<Vec<_>>(),
188        )
189    }
190
191    pub fn pending_tool_uses(&self) -> Vec<&PendingToolUse> {
192        self.pending_tool_uses_by_id.values().collect()
193    }
194
195    pub fn insert_user_message(
196        &mut self,
197        text: impl Into<String>,
198        context: Vec<ContextSnapshot>,
199        cx: &mut Context<Self>,
200    ) {
201        let message_id = self.insert_message(Role::User, text, cx);
202        let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
203        self.context
204            .extend(context.into_iter().map(|context| (context.id, context)));
205        self.context_by_message.insert(message_id, context_ids);
206    }
207
208    pub fn insert_message(
209        &mut self,
210        role: Role,
211        text: impl Into<String>,
212        cx: &mut Context<Self>,
213    ) -> MessageId {
214        let id = self.next_message_id.post_inc();
215        self.messages.push(Message {
216            id,
217            role,
218            text: text.into(),
219        });
220        self.touch_updated_at();
221        cx.emit(ThreadEvent::MessageAdded(id));
222        id
223    }
224
225    /// Returns the representation of this [`Thread`] in a textual form.
226    ///
227    /// This is the representation we use when attaching a thread as context to another thread.
228    pub fn text(&self) -> String {
229        let mut text = String::new();
230
231        for message in &self.messages {
232            text.push_str(match message.role {
233                language_model::Role::User => "User:",
234                language_model::Role::Assistant => "Assistant:",
235                language_model::Role::System => "System:",
236            });
237            text.push('\n');
238
239            text.push_str(&message.text);
240            text.push('\n');
241        }
242
243        text
244    }
245
246    pub fn to_completion_request(
247        &self,
248        request_kind: RequestKind,
249        _cx: &App,
250    ) -> LanguageModelRequest {
251        let mut request = LanguageModelRequest {
252            messages: vec![],
253            tools: Vec::new(),
254            stop: Vec::new(),
255            temperature: None,
256        };
257
258        let mut referenced_context_ids = HashSet::default();
259
260        for message in &self.messages {
261            if let Some(context_ids) = self.context_by_message.get(&message.id) {
262                referenced_context_ids.extend(context_ids);
263            }
264
265            let mut request_message = LanguageModelRequestMessage {
266                role: message.role,
267                content: Vec::new(),
268                cache: false,
269            };
270            if let Some(tool_results) = self.tool_results_by_message.get(&message.id) {
271                match request_kind {
272                    RequestKind::Chat => {
273                        for tool_result in tool_results {
274                            request_message
275                                .content
276                                .push(MessageContent::ToolResult(tool_result.clone()));
277                        }
278                    }
279                    RequestKind::Summarize => {
280                        // We don't care about tool use during summarization.
281                    }
282                }
283            }
284
285            if !message.text.is_empty() {
286                request_message
287                    .content
288                    .push(MessageContent::Text(message.text.clone()));
289            }
290
291            if let Some(tool_uses) = self.tool_uses_by_message.get(&message.id) {
292                match request_kind {
293                    RequestKind::Chat => {
294                        for tool_use in tool_uses {
295                            request_message
296                                .content
297                                .push(MessageContent::ToolUse(tool_use.clone()));
298                        }
299                    }
300                    RequestKind::Summarize => {
301                        // We don't care about tool use during summarization.
302                    }
303                }
304            }
305
306            request.messages.push(request_message);
307        }
308
309        if !referenced_context_ids.is_empty() {
310            let mut context_message = LanguageModelRequestMessage {
311                role: Role::User,
312                content: Vec::new(),
313                cache: false,
314            };
315
316            let referenced_context = referenced_context_ids
317                .into_iter()
318                .filter_map(|context_id| self.context.get(context_id))
319                .cloned();
320            attach_context_to_message(&mut context_message, referenced_context);
321
322            request.messages.push(context_message);
323        }
324
325        request
326    }
327
328    pub fn stream_completion(
329        &mut self,
330        request: LanguageModelRequest,
331        model: Arc<dyn LanguageModel>,
332        cx: &mut Context<Self>,
333    ) {
334        let pending_completion_id = post_inc(&mut self.completion_count);
335
336        let task = cx.spawn(|thread, mut cx| async move {
337            let stream = model.stream_completion(request, &cx);
338            let stream_completion = async {
339                let mut events = stream.await?;
340                let mut stop_reason = StopReason::EndTurn;
341
342                while let Some(event) = events.next().await {
343                    let event = event?;
344
345                    thread.update(&mut cx, |thread, cx| {
346                        match event {
347                            LanguageModelCompletionEvent::StartMessage { .. } => {
348                                thread.insert_message(Role::Assistant, String::new(), cx);
349                            }
350                            LanguageModelCompletionEvent::Stop(reason) => {
351                                stop_reason = reason;
352                            }
353                            LanguageModelCompletionEvent::Text(chunk) => {
354                                if let Some(last_message) = thread.messages.last_mut() {
355                                    if last_message.role == Role::Assistant {
356                                        last_message.text.push_str(&chunk);
357                                        cx.emit(ThreadEvent::StreamedAssistantText(
358                                            last_message.id,
359                                            chunk,
360                                        ));
361                                    } else {
362                                        // If we won't have an Assistant message yet, assume this chunk marks the beginning
363                                        // of a new Assistant response.
364                                        //
365                                        // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
366                                        // will result in duplicating the text of the chunk in the rendered Markdown.
367                                        thread.insert_message(Role::Assistant, chunk, cx);
368                                    }
369                                }
370                            }
371                            LanguageModelCompletionEvent::ToolUse(tool_use) => {
372                                if let Some(last_assistant_message) = thread
373                                    .messages
374                                    .iter()
375                                    .rfind(|message| message.role == Role::Assistant)
376                                {
377                                    thread
378                                        .tool_uses_by_message
379                                        .entry(last_assistant_message.id)
380                                        .or_default()
381                                        .push(tool_use.clone());
382
383                                    thread.pending_tool_uses_by_id.insert(
384                                        tool_use.id.clone(),
385                                        PendingToolUse {
386                                            assistant_message_id: last_assistant_message.id,
387                                            id: tool_use.id,
388                                            name: tool_use.name,
389                                            input: tool_use.input,
390                                            status: PendingToolUseStatus::Idle,
391                                        },
392                                    );
393                                }
394                            }
395                        }
396
397                        thread.touch_updated_at();
398                        cx.emit(ThreadEvent::StreamedCompletion);
399                        cx.notify();
400                    })?;
401
402                    smol::future::yield_now().await;
403                }
404
405                thread.update(&mut cx, |thread, cx| {
406                    thread
407                        .pending_completions
408                        .retain(|completion| completion.id != pending_completion_id);
409
410                    if thread.summary.is_none() && thread.messages.len() >= 2 {
411                        thread.summarize(cx);
412                    }
413                })?;
414
415                anyhow::Ok(stop_reason)
416            };
417
418            let result = stream_completion.await;
419
420            thread
421                .update(&mut cx, |thread, cx| match result.as_ref() {
422                    Ok(stop_reason) => match stop_reason {
423                        StopReason::ToolUse => {
424                            cx.emit(ThreadEvent::UsePendingTools);
425                        }
426                        StopReason::EndTurn => {}
427                        StopReason::MaxTokens => {}
428                    },
429                    Err(error) => {
430                        if error.is::<PaymentRequiredError>() {
431                            cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
432                        } else if error.is::<MaxMonthlySpendReachedError>() {
433                            cx.emit(ThreadEvent::ShowError(ThreadError::MaxMonthlySpendReached));
434                        } else {
435                            let error_message = error
436                                .chain()
437                                .map(|err| err.to_string())
438                                .collect::<Vec<_>>()
439                                .join("\n");
440                            cx.emit(ThreadEvent::ShowError(ThreadError::Message(
441                                SharedString::from(error_message.clone()),
442                            )));
443                        }
444
445                        thread.cancel_last_completion();
446                    }
447                })
448                .ok();
449        });
450
451        self.pending_completions.push(PendingCompletion {
452            id: pending_completion_id,
453            _task: task,
454        });
455    }
456
457    pub fn summarize(&mut self, cx: &mut Context<Self>) {
458        let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
459            return;
460        };
461        let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
462            return;
463        };
464
465        if !provider.is_authenticated(cx) {
466            return;
467        }
468
469        let mut request = self.to_completion_request(RequestKind::Summarize, cx);
470        request.messages.push(LanguageModelRequestMessage {
471            role: Role::User,
472            content: vec![
473                "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
474                    .into(),
475            ],
476            cache: false,
477        });
478
479        self.pending_summary = cx.spawn(|this, mut cx| {
480            async move {
481                let stream = model.stream_completion_text(request, &cx);
482                let mut messages = stream.await?;
483
484                let mut new_summary = String::new();
485                while let Some(message) = messages.stream.next().await {
486                    let text = message?;
487                    let mut lines = text.lines();
488                    new_summary.extend(lines.next());
489
490                    // Stop if the LLM generated multiple lines.
491                    if lines.next().is_some() {
492                        break;
493                    }
494                }
495
496                this.update(&mut cx, |this, cx| {
497                    if !new_summary.is_empty() {
498                        this.summary = Some(new_summary.into());
499                    }
500
501                    cx.emit(ThreadEvent::SummaryChanged);
502                })?;
503
504                anyhow::Ok(())
505            }
506            .log_err()
507        });
508    }
509
510    pub fn insert_tool_output(
511        &mut self,
512        assistant_message_id: MessageId,
513        tool_use_id: LanguageModelToolUseId,
514        output: Task<Result<String>>,
515        cx: &mut Context<Self>,
516    ) {
517        let insert_output_task = cx.spawn(|thread, mut cx| {
518            let tool_use_id = tool_use_id.clone();
519            async move {
520                let output = output.await;
521                thread
522                    .update(&mut cx, |thread, cx| {
523                        // The tool use was requested by an Assistant message,
524                        // so we want to attach the tool results to the next
525                        // user message.
526                        let next_user_message = MessageId(assistant_message_id.0 + 1);
527
528                        let tool_results = thread
529                            .tool_results_by_message
530                            .entry(next_user_message)
531                            .or_default();
532
533                        match output {
534                            Ok(output) => {
535                                tool_results.push(LanguageModelToolResult {
536                                    tool_use_id: tool_use_id.clone(),
537                                    content: output,
538                                    is_error: false,
539                                });
540
541                                cx.emit(ThreadEvent::ToolFinished { tool_use_id });
542                            }
543                            Err(err) => {
544                                tool_results.push(LanguageModelToolResult {
545                                    tool_use_id: tool_use_id.clone(),
546                                    content: err.to_string(),
547                                    is_error: true,
548                                });
549
550                                if let Some(tool_use) =
551                                    thread.pending_tool_uses_by_id.get_mut(&tool_use_id)
552                                {
553                                    tool_use.status = PendingToolUseStatus::Error(err.to_string());
554                                }
555                            }
556                        }
557                    })
558                    .ok();
559            }
560        });
561
562        if let Some(tool_use) = self.pending_tool_uses_by_id.get_mut(&tool_use_id) {
563            tool_use.status = PendingToolUseStatus::Running {
564                _task: insert_output_task.shared(),
565            };
566        }
567    }
568
569    /// Cancels the last pending completion, if there are any pending.
570    ///
571    /// Returns whether a completion was canceled.
572    pub fn cancel_last_completion(&mut self) -> bool {
573        if let Some(_last_completion) = self.pending_completions.pop() {
574            true
575        } else {
576            false
577        }
578    }
579}
580
581#[derive(Debug, Clone)]
582pub enum ThreadError {
583    PaymentRequired,
584    MaxMonthlySpendReached,
585    Message(SharedString),
586}
587
588#[derive(Debug, Clone)]
589pub enum ThreadEvent {
590    ShowError(ThreadError),
591    StreamedCompletion,
592    StreamedAssistantText(MessageId, String),
593    MessageAdded(MessageId),
594    SummaryChanged,
595    UsePendingTools,
596    ToolFinished {
597        #[allow(unused)]
598        tool_use_id: LanguageModelToolUseId,
599    },
600}
601
602impl EventEmitter<ThreadEvent> for Thread {}
603
604struct PendingCompletion {
605    id: usize,
606    _task: Task<()>,
607}
608
609#[derive(Debug, Clone)]
610pub struct PendingToolUse {
611    pub id: LanguageModelToolUseId,
612    /// The ID of the Assistant message in which the tool use was requested.
613    pub assistant_message_id: MessageId,
614    pub name: String,
615    pub input: serde_json::Value,
616    pub status: PendingToolUseStatus,
617}
618
619#[derive(Debug, Clone)]
620pub enum PendingToolUseStatus {
621    Idle,
622    Running { _task: Shared<Task<()>> },
623    Error(#[allow(unused)] String),
624}
625
626impl PendingToolUseStatus {
627    pub fn is_idle(&self) -> bool {
628        matches!(self, PendingToolUseStatus::Idle)
629    }
630}