1use std::sync::Arc;
2
3use anyhow::Result;
4use assistant_tool::ToolWorkingSet;
5use chrono::{DateTime, Utc};
6use collections::{BTreeMap, HashMap, HashSet};
7use futures::future::Shared;
8use futures::{FutureExt as _, StreamExt as _};
9use gpui::{AppContext, EventEmitter, ModelContext, SharedString, Task};
10use language_model::{
11 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
12 LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolUse,
13 LanguageModelToolUseId, MessageContent, Role, StopReason,
14};
15use language_models::provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError};
16use serde::{Deserialize, Serialize};
17use util::{post_inc, TryFutureExt as _};
18use uuid::Uuid;
19
20use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
21
22#[derive(Debug, Clone, Copy)]
23pub enum RequestKind {
24 Chat,
25}
26
27#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
28pub struct ThreadId(Arc<str>);
29
30impl ThreadId {
31 pub fn new() -> Self {
32 Self(Uuid::new_v4().to_string().into())
33 }
34}
35
36impl std::fmt::Display for ThreadId {
37 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
38 write!(f, "{}", self.0)
39 }
40}
41
42#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
43pub struct MessageId(usize);
44
45impl MessageId {
46 fn post_inc(&mut self) -> Self {
47 Self(post_inc(&mut self.0))
48 }
49}
50
51/// A message in a [`Thread`].
52#[derive(Debug, Clone)]
53pub struct Message {
54 pub id: MessageId,
55 pub role: Role,
56 pub text: String,
57}
58
59/// A thread of conversation with the LLM.
60pub struct Thread {
61 id: ThreadId,
62 updated_at: DateTime<Utc>,
63 summary: Option<SharedString>,
64 pending_summary: Task<Option<()>>,
65 messages: Vec<Message>,
66 next_message_id: MessageId,
67 context: BTreeMap<ContextId, ContextSnapshot>,
68 context_by_message: HashMap<MessageId, Vec<ContextId>>,
69 completion_count: usize,
70 pending_completions: Vec<PendingCompletion>,
71 tools: Arc<ToolWorkingSet>,
72 tool_uses_by_message: HashMap<MessageId, Vec<LanguageModelToolUse>>,
73 tool_results_by_message: HashMap<MessageId, Vec<LanguageModelToolResult>>,
74 pending_tool_uses_by_id: HashMap<LanguageModelToolUseId, PendingToolUse>,
75}
76
77impl Thread {
78 pub fn new(tools: Arc<ToolWorkingSet>, _cx: &mut ModelContext<Self>) -> Self {
79 Self {
80 id: ThreadId::new(),
81 updated_at: Utc::now(),
82 summary: None,
83 pending_summary: Task::ready(None),
84 messages: Vec::new(),
85 next_message_id: MessageId(0),
86 context: BTreeMap::default(),
87 context_by_message: HashMap::default(),
88 completion_count: 0,
89 pending_completions: Vec::new(),
90 tools,
91 tool_uses_by_message: HashMap::default(),
92 tool_results_by_message: HashMap::default(),
93 pending_tool_uses_by_id: HashMap::default(),
94 }
95 }
96
97 pub fn id(&self) -> &ThreadId {
98 &self.id
99 }
100
101 pub fn is_empty(&self) -> bool {
102 self.messages.is_empty()
103 }
104
105 pub fn updated_at(&self) -> DateTime<Utc> {
106 self.updated_at
107 }
108
109 pub fn touch_updated_at(&mut self) {
110 self.updated_at = Utc::now();
111 }
112
113 pub fn summary(&self) -> Option<SharedString> {
114 self.summary.clone()
115 }
116
117 pub fn summary_or_default(&self) -> SharedString {
118 const DEFAULT: SharedString = SharedString::new_static("New Thread");
119 self.summary.clone().unwrap_or(DEFAULT)
120 }
121
122 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut ModelContext<Self>) {
123 self.summary = Some(summary.into());
124 cx.emit(ThreadEvent::SummaryChanged);
125 }
126
127 pub fn message(&self, id: MessageId) -> Option<&Message> {
128 self.messages.iter().find(|message| message.id == id)
129 }
130
131 pub fn messages(&self) -> impl Iterator<Item = &Message> {
132 self.messages.iter()
133 }
134
135 pub fn is_streaming(&self) -> bool {
136 !self.pending_completions.is_empty()
137 }
138
139 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
140 &self.tools
141 }
142
143 pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
144 let context = self.context_by_message.get(&id)?;
145 Some(
146 context
147 .into_iter()
148 .filter_map(|context_id| self.context.get(&context_id))
149 .cloned()
150 .collect::<Vec<_>>(),
151 )
152 }
153
154 pub fn pending_tool_uses(&self) -> Vec<&PendingToolUse> {
155 self.pending_tool_uses_by_id.values().collect()
156 }
157
158 pub fn insert_user_message(
159 &mut self,
160 text: impl Into<String>,
161 context: Vec<ContextSnapshot>,
162 cx: &mut ModelContext<Self>,
163 ) {
164 let message_id = self.insert_message(Role::User, text, cx);
165 let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
166 self.context
167 .extend(context.into_iter().map(|context| (context.id, context)));
168 self.context_by_message.insert(message_id, context_ids);
169 }
170
171 pub fn insert_message(
172 &mut self,
173 role: Role,
174 text: impl Into<String>,
175 cx: &mut ModelContext<Self>,
176 ) -> MessageId {
177 let id = self.next_message_id.post_inc();
178 self.messages.push(Message {
179 id,
180 role,
181 text: text.into(),
182 });
183 self.touch_updated_at();
184 cx.emit(ThreadEvent::MessageAdded(id));
185 id
186 }
187
188 /// Returns the representation of this [`Thread`] in a textual form.
189 ///
190 /// This is the representation we use when attaching a thread as context to another thread.
191 pub fn text(&self) -> String {
192 let mut text = String::new();
193
194 for message in &self.messages {
195 text.push_str(match message.role {
196 language_model::Role::User => "User:",
197 language_model::Role::Assistant => "Assistant:",
198 language_model::Role::System => "System:",
199 });
200 text.push('\n');
201
202 text.push_str(&message.text);
203 text.push('\n');
204 }
205
206 text
207 }
208
209 pub fn to_completion_request(
210 &self,
211 _request_kind: RequestKind,
212 _cx: &AppContext,
213 ) -> LanguageModelRequest {
214 let mut request = LanguageModelRequest {
215 messages: vec![],
216 tools: Vec::new(),
217 stop: Vec::new(),
218 temperature: None,
219 };
220
221 let mut referenced_context_ids = HashSet::default();
222
223 for message in &self.messages {
224 if let Some(context_ids) = self.context_by_message.get(&message.id) {
225 referenced_context_ids.extend(context_ids);
226 }
227
228 let mut request_message = LanguageModelRequestMessage {
229 role: message.role,
230 content: Vec::new(),
231 cache: false,
232 };
233
234 if let Some(tool_results) = self.tool_results_by_message.get(&message.id) {
235 for tool_result in tool_results {
236 request_message
237 .content
238 .push(MessageContent::ToolResult(tool_result.clone()));
239 }
240 }
241
242 if !message.text.is_empty() {
243 request_message
244 .content
245 .push(MessageContent::Text(message.text.clone()));
246 }
247
248 if let Some(tool_uses) = self.tool_uses_by_message.get(&message.id) {
249 for tool_use in tool_uses {
250 request_message
251 .content
252 .push(MessageContent::ToolUse(tool_use.clone()));
253 }
254 }
255
256 request.messages.push(request_message);
257 }
258
259 if !referenced_context_ids.is_empty() {
260 let mut context_message = LanguageModelRequestMessage {
261 role: Role::User,
262 content: Vec::new(),
263 cache: false,
264 };
265
266 let referenced_context = referenced_context_ids
267 .into_iter()
268 .filter_map(|context_id| self.context.get(context_id))
269 .cloned();
270 attach_context_to_message(&mut context_message, referenced_context);
271
272 request.messages.push(context_message);
273 }
274
275 request
276 }
277
278 pub fn stream_completion(
279 &mut self,
280 request: LanguageModelRequest,
281 model: Arc<dyn LanguageModel>,
282 cx: &mut ModelContext<Self>,
283 ) {
284 let pending_completion_id = post_inc(&mut self.completion_count);
285
286 let task = cx.spawn(|thread, mut cx| async move {
287 let stream = model.stream_completion(request, &cx);
288 let stream_completion = async {
289 let mut events = stream.await?;
290 let mut stop_reason = StopReason::EndTurn;
291
292 while let Some(event) = events.next().await {
293 let event = event?;
294
295 thread.update(&mut cx, |thread, cx| {
296 match event {
297 LanguageModelCompletionEvent::StartMessage { .. } => {
298 thread.insert_message(Role::Assistant, String::new(), cx);
299 }
300 LanguageModelCompletionEvent::Stop(reason) => {
301 stop_reason = reason;
302 }
303 LanguageModelCompletionEvent::Text(chunk) => {
304 if let Some(last_message) = thread.messages.last_mut() {
305 if last_message.role == Role::Assistant {
306 last_message.text.push_str(&chunk);
307 cx.emit(ThreadEvent::StreamedAssistantText(
308 last_message.id,
309 chunk,
310 ));
311 } else {
312 // If we won't have an Assistant message yet, assume this chunk marks the beginning
313 // of a new Assistant response.
314 //
315 // Importantly: We do *not* want to emit a `StreamedAssistantText` event here, as it
316 // will result in duplicating the text of the chunk in the rendered Markdown.
317 thread.insert_message(Role::Assistant, chunk, cx);
318 }
319 }
320 }
321 LanguageModelCompletionEvent::ToolUse(tool_use) => {
322 if let Some(last_assistant_message) = thread
323 .messages
324 .iter()
325 .rfind(|message| message.role == Role::Assistant)
326 {
327 thread
328 .tool_uses_by_message
329 .entry(last_assistant_message.id)
330 .or_default()
331 .push(tool_use.clone());
332
333 thread.pending_tool_uses_by_id.insert(
334 tool_use.id.clone(),
335 PendingToolUse {
336 assistant_message_id: last_assistant_message.id,
337 id: tool_use.id,
338 name: tool_use.name,
339 input: tool_use.input,
340 status: PendingToolUseStatus::Idle,
341 },
342 );
343 }
344 }
345 }
346
347 thread.touch_updated_at();
348 cx.emit(ThreadEvent::StreamedCompletion);
349 cx.notify();
350 })?;
351
352 smol::future::yield_now().await;
353 }
354
355 thread.update(&mut cx, |thread, cx| {
356 thread
357 .pending_completions
358 .retain(|completion| completion.id != pending_completion_id);
359
360 if thread.summary.is_none() && thread.messages.len() >= 2 {
361 thread.summarize(cx);
362 }
363 })?;
364
365 anyhow::Ok(stop_reason)
366 };
367
368 let result = stream_completion.await;
369
370 thread
371 .update(&mut cx, |thread, cx| match result.as_ref() {
372 Ok(stop_reason) => match stop_reason {
373 StopReason::ToolUse => {
374 cx.emit(ThreadEvent::UsePendingTools);
375 }
376 StopReason::EndTurn => {}
377 StopReason::MaxTokens => {}
378 },
379 Err(error) => {
380 if error.is::<PaymentRequiredError>() {
381 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
382 } else if error.is::<MaxMonthlySpendReachedError>() {
383 cx.emit(ThreadEvent::ShowError(ThreadError::MaxMonthlySpendReached));
384 } else {
385 let error_message = error
386 .chain()
387 .map(|err| err.to_string())
388 .collect::<Vec<_>>()
389 .join("\n");
390 cx.emit(ThreadEvent::ShowError(ThreadError::Message(
391 SharedString::from(error_message.clone()),
392 )));
393 }
394
395 thread.cancel_last_completion();
396 }
397 })
398 .ok();
399 });
400
401 self.pending_completions.push(PendingCompletion {
402 id: pending_completion_id,
403 _task: task,
404 });
405 }
406
407 pub fn summarize(&mut self, cx: &mut ModelContext<Self>) {
408 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
409 return;
410 };
411 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
412 return;
413 };
414
415 if !provider.is_authenticated(cx) {
416 return;
417 }
418
419 let mut request = self.to_completion_request(RequestKind::Chat, cx);
420 request.messages.push(LanguageModelRequestMessage {
421 role: Role::User,
422 content: vec![
423 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
424 .into(),
425 ],
426 cache: false,
427 });
428
429 self.pending_summary = cx.spawn(|this, mut cx| {
430 async move {
431 let stream = model.stream_completion_text(request, &cx);
432 let mut messages = stream.await?;
433
434 let mut new_summary = String::new();
435 while let Some(message) = messages.stream.next().await {
436 let text = message?;
437 let mut lines = text.lines();
438 new_summary.extend(lines.next());
439
440 // Stop if the LLM generated multiple lines.
441 if lines.next().is_some() {
442 break;
443 }
444 }
445
446 this.update(&mut cx, |this, cx| {
447 if !new_summary.is_empty() {
448 this.summary = Some(new_summary.into());
449 }
450
451 cx.emit(ThreadEvent::SummaryChanged);
452 })?;
453
454 anyhow::Ok(())
455 }
456 .log_err()
457 });
458 }
459
460 pub fn insert_tool_output(
461 &mut self,
462 assistant_message_id: MessageId,
463 tool_use_id: LanguageModelToolUseId,
464 output: Task<Result<String>>,
465 cx: &mut ModelContext<Self>,
466 ) {
467 let insert_output_task = cx.spawn(|thread, mut cx| {
468 let tool_use_id = tool_use_id.clone();
469 async move {
470 let output = output.await;
471 thread
472 .update(&mut cx, |thread, cx| {
473 // The tool use was requested by an Assistant message,
474 // so we want to attach the tool results to the next
475 // user message.
476 let next_user_message = MessageId(assistant_message_id.0 + 1);
477
478 let tool_results = thread
479 .tool_results_by_message
480 .entry(next_user_message)
481 .or_default();
482
483 match output {
484 Ok(output) => {
485 tool_results.push(LanguageModelToolResult {
486 tool_use_id: tool_use_id.to_string(),
487 content: output,
488 is_error: false,
489 });
490
491 cx.emit(ThreadEvent::ToolFinished { tool_use_id });
492 }
493 Err(err) => {
494 tool_results.push(LanguageModelToolResult {
495 tool_use_id: tool_use_id.to_string(),
496 content: err.to_string(),
497 is_error: true,
498 });
499
500 if let Some(tool_use) =
501 thread.pending_tool_uses_by_id.get_mut(&tool_use_id)
502 {
503 tool_use.status = PendingToolUseStatus::Error(err.to_string());
504 }
505 }
506 }
507 })
508 .ok();
509 }
510 });
511
512 if let Some(tool_use) = self.pending_tool_uses_by_id.get_mut(&tool_use_id) {
513 tool_use.status = PendingToolUseStatus::Running {
514 _task: insert_output_task.shared(),
515 };
516 }
517 }
518
519 /// Cancels the last pending completion, if there are any pending.
520 ///
521 /// Returns whether a completion was canceled.
522 pub fn cancel_last_completion(&mut self) -> bool {
523 if let Some(_last_completion) = self.pending_completions.pop() {
524 true
525 } else {
526 false
527 }
528 }
529}
530
531#[derive(Debug, Clone)]
532pub enum ThreadError {
533 PaymentRequired,
534 MaxMonthlySpendReached,
535 Message(SharedString),
536}
537
538#[derive(Debug, Clone)]
539pub enum ThreadEvent {
540 ShowError(ThreadError),
541 StreamedCompletion,
542 StreamedAssistantText(MessageId, String),
543 MessageAdded(MessageId),
544 SummaryChanged,
545 UsePendingTools,
546 ToolFinished {
547 #[allow(unused)]
548 tool_use_id: LanguageModelToolUseId,
549 },
550}
551
552impl EventEmitter<ThreadEvent> for Thread {}
553
554struct PendingCompletion {
555 id: usize,
556 _task: Task<()>,
557}
558
559#[derive(Debug, Clone)]
560pub struct PendingToolUse {
561 pub id: LanguageModelToolUseId,
562 /// The ID of the Assistant message in which the tool use was requested.
563 pub assistant_message_id: MessageId,
564 pub name: String,
565 pub input: serde_json::Value,
566 pub status: PendingToolUseStatus,
567}
568
569#[derive(Debug, Clone)]
570pub enum PendingToolUseStatus {
571 Idle,
572 Running { _task: Shared<Task<()>> },
573 Error(#[allow(unused)] String),
574}
575
576impl PendingToolUseStatus {
577 pub fn is_idle(&self) -> bool {
578 matches!(self, PendingToolUseStatus::Idle)
579 }
580}