1use std::sync::Arc;
2
3use anyhow::Result;
4use assistant_tool::ToolWorkingSet;
5use chrono::{DateTime, Utc};
6use collections::{BTreeMap, HashMap, HashSet};
7use futures::future::Shared;
8use futures::{FutureExt as _, StreamExt as _};
9use gpui::{AppContext, EventEmitter, ModelContext, SharedString, Task};
10use language_model::{
11 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
12 LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolUse,
13 LanguageModelToolUseId, MessageContent, Role, StopReason,
14};
15use language_models::provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError};
16use serde::{Deserialize, Serialize};
17use util::{post_inc, TryFutureExt as _};
18use uuid::Uuid;
19
20use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
21
22#[derive(Debug, Clone, Copy)]
23pub enum RequestKind {
24 Chat,
25}
26
27#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
28pub struct ThreadId(Arc<str>);
29
30impl ThreadId {
31 pub fn new() -> Self {
32 Self(Uuid::new_v4().to_string().into())
33 }
34}
35
36impl std::fmt::Display for ThreadId {
37 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
38 write!(f, "{}", self.0)
39 }
40}
41
42#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
43pub struct MessageId(usize);
44
45impl MessageId {
46 fn post_inc(&mut self) -> Self {
47 Self(post_inc(&mut self.0))
48 }
49}
50
51/// A message in a [`Thread`].
52#[derive(Debug, Clone)]
53pub struct Message {
54 pub id: MessageId,
55 pub role: Role,
56 pub text: String,
57}
58
59/// A thread of conversation with the LLM.
60pub struct Thread {
61 id: ThreadId,
62 updated_at: DateTime<Utc>,
63 summary: Option<SharedString>,
64 pending_summary: Task<Option<()>>,
65 messages: Vec<Message>,
66 next_message_id: MessageId,
67 context: BTreeMap<ContextId, ContextSnapshot>,
68 context_by_message: HashMap<MessageId, Vec<ContextId>>,
69 completion_count: usize,
70 pending_completions: Vec<PendingCompletion>,
71 tools: Arc<ToolWorkingSet>,
72 tool_uses_by_message: HashMap<MessageId, Vec<LanguageModelToolUse>>,
73 tool_results_by_message: HashMap<MessageId, Vec<LanguageModelToolResult>>,
74 pending_tool_uses_by_id: HashMap<LanguageModelToolUseId, PendingToolUse>,
75}
76
77impl Thread {
78 pub fn new(tools: Arc<ToolWorkingSet>, _cx: &mut ModelContext<Self>) -> Self {
79 Self {
80 id: ThreadId::new(),
81 updated_at: Utc::now(),
82 summary: None,
83 pending_summary: Task::ready(None),
84 messages: Vec::new(),
85 next_message_id: MessageId(0),
86 context: BTreeMap::default(),
87 context_by_message: HashMap::default(),
88 completion_count: 0,
89 pending_completions: Vec::new(),
90 tools,
91 tool_uses_by_message: HashMap::default(),
92 tool_results_by_message: HashMap::default(),
93 pending_tool_uses_by_id: HashMap::default(),
94 }
95 }
96
97 pub fn id(&self) -> &ThreadId {
98 &self.id
99 }
100
101 pub fn is_empty(&self) -> bool {
102 self.messages.is_empty()
103 }
104
105 pub fn updated_at(&self) -> DateTime<Utc> {
106 self.updated_at
107 }
108
109 pub fn touch_updated_at(&mut self) {
110 self.updated_at = Utc::now();
111 }
112
113 pub fn summary(&self) -> Option<SharedString> {
114 self.summary.clone()
115 }
116
117 pub fn summary_or_default(&self) -> SharedString {
118 const DEFAULT: SharedString = SharedString::new_static("New Thread");
119 self.summary.clone().unwrap_or(DEFAULT)
120 }
121
122 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut ModelContext<Self>) {
123 self.summary = Some(summary.into());
124 cx.emit(ThreadEvent::SummaryChanged);
125 }
126
127 pub fn message(&self, id: MessageId) -> Option<&Message> {
128 self.messages.iter().find(|message| message.id == id)
129 }
130
131 pub fn messages(&self) -> impl Iterator<Item = &Message> {
132 self.messages.iter()
133 }
134
135 pub fn is_streaming(&self) -> bool {
136 !self.pending_completions.is_empty()
137 }
138
139 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
140 &self.tools
141 }
142
143 pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
144 let context = self.context_by_message.get(&id)?;
145 Some(
146 context
147 .into_iter()
148 .filter_map(|context_id| self.context.get(&context_id))
149 .cloned()
150 .collect::<Vec<_>>(),
151 )
152 }
153
154 pub fn pending_tool_uses(&self) -> Vec<&PendingToolUse> {
155 self.pending_tool_uses_by_id.values().collect()
156 }
157
158 pub fn insert_user_message(
159 &mut self,
160 text: impl Into<String>,
161 context: Vec<ContextSnapshot>,
162 cx: &mut ModelContext<Self>,
163 ) {
164 let message_id = self.insert_message(Role::User, text, cx);
165 let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
166 self.context
167 .extend(context.into_iter().map(|context| (context.id, context)));
168 self.context_by_message.insert(message_id, context_ids);
169 }
170
171 pub fn insert_message(
172 &mut self,
173 role: Role,
174 text: impl Into<String>,
175 cx: &mut ModelContext<Self>,
176 ) -> MessageId {
177 let id = self.next_message_id.post_inc();
178 self.messages.push(Message {
179 id,
180 role,
181 text: text.into(),
182 });
183 self.touch_updated_at();
184 cx.emit(ThreadEvent::MessageAdded(id));
185 id
186 }
187
188 /// Returns the representation of this [`Thread`] in a textual form.
189 ///
190 /// This is the representation we use when attaching a thread as context to another thread.
191 pub fn text(&self) -> String {
192 let mut text = String::new();
193
194 for message in &self.messages {
195 text.push_str(match message.role {
196 language_model::Role::User => "User:",
197 language_model::Role::Assistant => "Assistant:",
198 language_model::Role::System => "System:",
199 });
200 text.push('\n');
201
202 text.push_str(&message.text);
203 text.push('\n');
204 }
205
206 text
207 }
208
209 pub fn to_completion_request(
210 &self,
211 _request_kind: RequestKind,
212 _cx: &AppContext,
213 ) -> LanguageModelRequest {
214 let mut request = LanguageModelRequest {
215 messages: vec![],
216 tools: Vec::new(),
217 stop: Vec::new(),
218 temperature: None,
219 };
220
221 let mut referenced_context_ids = HashSet::default();
222
223 for message in &self.messages {
224 if let Some(context_ids) = self.context_by_message.get(&message.id) {
225 referenced_context_ids.extend(context_ids);
226 }
227
228 let mut request_message = LanguageModelRequestMessage {
229 role: message.role,
230 content: Vec::new(),
231 cache: false,
232 };
233
234 if let Some(tool_results) = self.tool_results_by_message.get(&message.id) {
235 for tool_result in tool_results {
236 request_message
237 .content
238 .push(MessageContent::ToolResult(tool_result.clone()));
239 }
240 }
241
242 if !message.text.is_empty() {
243 request_message
244 .content
245 .push(MessageContent::Text(message.text.clone()));
246 }
247
248 if let Some(tool_uses) = self.tool_uses_by_message.get(&message.id) {
249 for tool_use in tool_uses {
250 request_message
251 .content
252 .push(MessageContent::ToolUse(tool_use.clone()));
253 }
254 }
255
256 request.messages.push(request_message);
257 }
258
259 if !referenced_context_ids.is_empty() {
260 let mut context_message = LanguageModelRequestMessage {
261 role: Role::User,
262 content: Vec::new(),
263 cache: false,
264 };
265
266 let referenced_context = referenced_context_ids
267 .into_iter()
268 .filter_map(|context_id| self.context.get(context_id))
269 .cloned();
270 attach_context_to_message(&mut context_message, referenced_context);
271
272 request.messages.push(context_message);
273 }
274
275 request
276 }
277
278 pub fn stream_completion(
279 &mut self,
280 request: LanguageModelRequest,
281 model: Arc<dyn LanguageModel>,
282 cx: &mut ModelContext<Self>,
283 ) {
284 let pending_completion_id = post_inc(&mut self.completion_count);
285
286 let task = cx.spawn(|thread, mut cx| async move {
287 let stream = model.stream_completion(request, &cx);
288 let stream_completion = async {
289 let mut events = stream.await?;
290 let mut stop_reason = StopReason::EndTurn;
291
292 while let Some(event) = events.next().await {
293 let event = event?;
294
295 thread.update(&mut cx, |thread, cx| {
296 match event {
297 LanguageModelCompletionEvent::StartMessage { .. } => {
298 thread.insert_message(Role::Assistant, String::new(), cx);
299 }
300 LanguageModelCompletionEvent::Stop(reason) => {
301 stop_reason = reason;
302 }
303 LanguageModelCompletionEvent::Text(chunk) => {
304 if let Some(last_message) = thread.messages.last_mut() {
305 if last_message.role == Role::Assistant {
306 last_message.text.push_str(&chunk);
307 cx.emit(ThreadEvent::StreamedAssistantText(
308 last_message.id,
309 chunk,
310 ));
311 }
312 }
313 }
314 LanguageModelCompletionEvent::ToolUse(tool_use) => {
315 if let Some(last_assistant_message) = thread
316 .messages
317 .iter()
318 .rfind(|message| message.role == Role::Assistant)
319 {
320 thread
321 .tool_uses_by_message
322 .entry(last_assistant_message.id)
323 .or_default()
324 .push(tool_use.clone());
325
326 thread.pending_tool_uses_by_id.insert(
327 tool_use.id.clone(),
328 PendingToolUse {
329 assistant_message_id: last_assistant_message.id,
330 id: tool_use.id,
331 name: tool_use.name,
332 input: tool_use.input,
333 status: PendingToolUseStatus::Idle,
334 },
335 );
336 }
337 }
338 }
339
340 thread.touch_updated_at();
341 cx.emit(ThreadEvent::StreamedCompletion);
342 cx.notify();
343 })?;
344
345 smol::future::yield_now().await;
346 }
347
348 thread.update(&mut cx, |thread, cx| {
349 thread
350 .pending_completions
351 .retain(|completion| completion.id != pending_completion_id);
352
353 if thread.summary.is_none() && thread.messages.len() >= 2 {
354 thread.summarize(cx);
355 }
356 })?;
357
358 anyhow::Ok(stop_reason)
359 };
360
361 let result = stream_completion.await;
362
363 thread
364 .update(&mut cx, |_thread, cx| match result.as_ref() {
365 Ok(stop_reason) => match stop_reason {
366 StopReason::ToolUse => {
367 cx.emit(ThreadEvent::UsePendingTools);
368 }
369 StopReason::EndTurn => {}
370 StopReason::MaxTokens => {}
371 },
372 Err(error) => {
373 if error.is::<PaymentRequiredError>() {
374 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
375 } else if error.is::<MaxMonthlySpendReachedError>() {
376 cx.emit(ThreadEvent::ShowError(ThreadError::MaxMonthlySpendReached));
377 } else {
378 let error_message = error
379 .chain()
380 .map(|err| err.to_string())
381 .collect::<Vec<_>>()
382 .join("\n");
383 cx.emit(ThreadEvent::ShowError(ThreadError::Message(
384 SharedString::from(error_message.clone()),
385 )));
386 }
387 }
388 })
389 .ok();
390 });
391
392 self.pending_completions.push(PendingCompletion {
393 id: pending_completion_id,
394 _task: task,
395 });
396 }
397
398 pub fn summarize(&mut self, cx: &mut ModelContext<Self>) {
399 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
400 return;
401 };
402 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
403 return;
404 };
405
406 if !provider.is_authenticated(cx) {
407 return;
408 }
409
410 let mut request = self.to_completion_request(RequestKind::Chat, cx);
411 request.messages.push(LanguageModelRequestMessage {
412 role: Role::User,
413 content: vec![
414 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
415 .into(),
416 ],
417 cache: false,
418 });
419
420 self.pending_summary = cx.spawn(|this, mut cx| {
421 async move {
422 let stream = model.stream_completion_text(request, &cx);
423 let mut messages = stream.await?;
424
425 let mut new_summary = String::new();
426 while let Some(message) = messages.stream.next().await {
427 let text = message?;
428 let mut lines = text.lines();
429 new_summary.extend(lines.next());
430
431 // Stop if the LLM generated multiple lines.
432 if lines.next().is_some() {
433 break;
434 }
435 }
436
437 this.update(&mut cx, |this, cx| {
438 if !new_summary.is_empty() {
439 this.summary = Some(new_summary.into());
440 }
441
442 cx.emit(ThreadEvent::SummaryChanged);
443 })?;
444
445 anyhow::Ok(())
446 }
447 .log_err()
448 });
449 }
450
451 pub fn insert_tool_output(
452 &mut self,
453 assistant_message_id: MessageId,
454 tool_use_id: LanguageModelToolUseId,
455 output: Task<Result<String>>,
456 cx: &mut ModelContext<Self>,
457 ) {
458 let insert_output_task = cx.spawn(|thread, mut cx| {
459 let tool_use_id = tool_use_id.clone();
460 async move {
461 let output = output.await;
462 thread
463 .update(&mut cx, |thread, cx| {
464 // The tool use was requested by an Assistant message,
465 // so we want to attach the tool results to the next
466 // user message.
467 let next_user_message = MessageId(assistant_message_id.0 + 1);
468
469 let tool_results = thread
470 .tool_results_by_message
471 .entry(next_user_message)
472 .or_default();
473
474 match output {
475 Ok(output) => {
476 tool_results.push(LanguageModelToolResult {
477 tool_use_id: tool_use_id.to_string(),
478 content: output,
479 is_error: false,
480 });
481
482 cx.emit(ThreadEvent::ToolFinished { tool_use_id });
483 }
484 Err(err) => {
485 tool_results.push(LanguageModelToolResult {
486 tool_use_id: tool_use_id.to_string(),
487 content: err.to_string(),
488 is_error: true,
489 });
490
491 if let Some(tool_use) =
492 thread.pending_tool_uses_by_id.get_mut(&tool_use_id)
493 {
494 tool_use.status = PendingToolUseStatus::Error(err.to_string());
495 }
496 }
497 }
498 })
499 .ok();
500 }
501 });
502
503 if let Some(tool_use) = self.pending_tool_uses_by_id.get_mut(&tool_use_id) {
504 tool_use.status = PendingToolUseStatus::Running {
505 _task: insert_output_task.shared(),
506 };
507 }
508 }
509
510 /// Cancels the last pending completion, if there are any pending.
511 ///
512 /// Returns whether a completion was canceled.
513 pub fn cancel_last_completion(&mut self) -> bool {
514 if let Some(_last_completion) = self.pending_completions.pop() {
515 true
516 } else {
517 false
518 }
519 }
520}
521
522#[derive(Debug, Clone)]
523pub enum ThreadError {
524 PaymentRequired,
525 MaxMonthlySpendReached,
526 Message(SharedString),
527}
528
529#[derive(Debug, Clone)]
530pub enum ThreadEvent {
531 ShowError(ThreadError),
532 StreamedCompletion,
533 StreamedAssistantText(MessageId, String),
534 MessageAdded(MessageId),
535 SummaryChanged,
536 UsePendingTools,
537 ToolFinished {
538 #[allow(unused)]
539 tool_use_id: LanguageModelToolUseId,
540 },
541}
542
543impl EventEmitter<ThreadEvent> for Thread {}
544
545struct PendingCompletion {
546 id: usize,
547 _task: Task<()>,
548}
549
550#[derive(Debug, Clone)]
551pub struct PendingToolUse {
552 pub id: LanguageModelToolUseId,
553 /// The ID of the Assistant message in which the tool use was requested.
554 pub assistant_message_id: MessageId,
555 pub name: String,
556 pub input: serde_json::Value,
557 pub status: PendingToolUseStatus,
558}
559
560#[derive(Debug, Clone)]
561pub enum PendingToolUseStatus {
562 Idle,
563 Running { _task: Shared<Task<()>> },
564 Error(#[allow(unused)] String),
565}
566
567impl PendingToolUseStatus {
568 pub fn is_idle(&self) -> bool {
569 matches!(self, PendingToolUseStatus::Idle)
570 }
571}