1use std::sync::Arc;
2
3use anyhow::Result;
4use assistant_tool::ToolWorkingSet;
5use chrono::{DateTime, Utc};
6use collections::{BTreeMap, HashMap, HashSet};
7use futures::future::Shared;
8use futures::{FutureExt as _, StreamExt as _};
9use gpui::{AppContext, EventEmitter, ModelContext, SharedString, Task};
10use language_model::{
11 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
12 LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolUse,
13 LanguageModelToolUseId, MessageContent, Role, StopReason,
14};
15use language_models::provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError};
16use serde::{Deserialize, Serialize};
17use util::{post_inc, TryFutureExt as _};
18use uuid::Uuid;
19
20use crate::context::{attach_context_to_message, ContextId, ContextSnapshot};
21
22#[derive(Debug, Clone, Copy)]
23pub enum RequestKind {
24 Chat,
25}
26
27#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
28pub struct ThreadId(Arc<str>);
29
30impl ThreadId {
31 pub fn new() -> Self {
32 Self(Uuid::new_v4().to_string().into())
33 }
34}
35
36impl std::fmt::Display for ThreadId {
37 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
38 write!(f, "{}", self.0)
39 }
40}
41
42#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
43pub struct MessageId(usize);
44
45impl MessageId {
46 fn post_inc(&mut self) -> Self {
47 Self(post_inc(&mut self.0))
48 }
49}
50
51/// A message in a [`Thread`].
52#[derive(Debug, Clone)]
53pub struct Message {
54 pub id: MessageId,
55 pub role: Role,
56 pub text: String,
57}
58
59/// A thread of conversation with the LLM.
60pub struct Thread {
61 id: ThreadId,
62 updated_at: DateTime<Utc>,
63 summary: Option<SharedString>,
64 pending_summary: Task<Option<()>>,
65 messages: Vec<Message>,
66 next_message_id: MessageId,
67 context: BTreeMap<ContextId, ContextSnapshot>,
68 context_by_message: HashMap<MessageId, Vec<ContextId>>,
69 completion_count: usize,
70 pending_completions: Vec<PendingCompletion>,
71 tools: Arc<ToolWorkingSet>,
72 tool_uses_by_message: HashMap<MessageId, Vec<LanguageModelToolUse>>,
73 tool_results_by_message: HashMap<MessageId, Vec<LanguageModelToolResult>>,
74 pending_tool_uses_by_id: HashMap<LanguageModelToolUseId, PendingToolUse>,
75}
76
77impl Thread {
78 pub fn new(tools: Arc<ToolWorkingSet>, _cx: &mut ModelContext<Self>) -> Self {
79 Self {
80 id: ThreadId::new(),
81 updated_at: Utc::now(),
82 summary: None,
83 pending_summary: Task::ready(None),
84 messages: Vec::new(),
85 next_message_id: MessageId(0),
86 context: BTreeMap::default(),
87 context_by_message: HashMap::default(),
88 completion_count: 0,
89 pending_completions: Vec::new(),
90 tools,
91 tool_uses_by_message: HashMap::default(),
92 tool_results_by_message: HashMap::default(),
93 pending_tool_uses_by_id: HashMap::default(),
94 }
95 }
96
97 pub fn id(&self) -> &ThreadId {
98 &self.id
99 }
100
101 pub fn is_empty(&self) -> bool {
102 self.messages.is_empty()
103 }
104
105 pub fn updated_at(&self) -> DateTime<Utc> {
106 self.updated_at
107 }
108
109 pub fn touch_updated_at(&mut self) {
110 self.updated_at = Utc::now();
111 }
112
113 pub fn summary(&self) -> Option<SharedString> {
114 self.summary.clone()
115 }
116
117 pub fn summary_or_default(&self) -> SharedString {
118 const DEFAULT: SharedString = SharedString::new_static("New Thread");
119 self.summary.clone().unwrap_or(DEFAULT)
120 }
121
122 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut ModelContext<Self>) {
123 self.summary = Some(summary.into());
124 cx.emit(ThreadEvent::SummaryChanged);
125 }
126
127 pub fn message(&self, id: MessageId) -> Option<&Message> {
128 self.messages.iter().find(|message| message.id == id)
129 }
130
131 pub fn messages(&self) -> impl Iterator<Item = &Message> {
132 self.messages.iter()
133 }
134
135 pub fn is_streaming(&self) -> bool {
136 !self.pending_completions.is_empty()
137 }
138
139 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
140 &self.tools
141 }
142
143 pub fn context_for_message(&self, id: MessageId) -> Option<Vec<ContextSnapshot>> {
144 let context = self.context_by_message.get(&id)?;
145 Some(
146 context
147 .into_iter()
148 .filter_map(|context_id| self.context.get(&context_id))
149 .cloned()
150 .collect::<Vec<_>>(),
151 )
152 }
153
154 pub fn pending_tool_uses(&self) -> Vec<&PendingToolUse> {
155 self.pending_tool_uses_by_id.values().collect()
156 }
157
158 pub fn insert_user_message(
159 &mut self,
160 text: impl Into<String>,
161 context: Vec<ContextSnapshot>,
162 cx: &mut ModelContext<Self>,
163 ) {
164 let message_id = self.insert_message(Role::User, text, cx);
165 let context_ids = context.iter().map(|context| context.id).collect::<Vec<_>>();
166 self.context
167 .extend(context.into_iter().map(|context| (context.id, context)));
168 self.context_by_message.insert(message_id, context_ids);
169 }
170
171 pub fn insert_message(
172 &mut self,
173 role: Role,
174 text: impl Into<String>,
175 cx: &mut ModelContext<Self>,
176 ) -> MessageId {
177 let id = self.next_message_id.post_inc();
178 self.messages.push(Message {
179 id,
180 role,
181 text: text.into(),
182 });
183 self.touch_updated_at();
184 cx.emit(ThreadEvent::MessageAdded(id));
185 id
186 }
187
188 /// Returns the representation of this [`Thread`] in a textual form.
189 ///
190 /// This is the representation we use when attaching a thread as context to another thread.
191 pub fn text(&self) -> String {
192 let mut text = String::new();
193
194 for message in &self.messages {
195 text.push_str(match message.role {
196 language_model::Role::User => "User:",
197 language_model::Role::Assistant => "Assistant:",
198 language_model::Role::System => "System:",
199 });
200 text.push('\n');
201
202 text.push_str(&message.text);
203 text.push('\n');
204 }
205
206 text
207 }
208
209 pub fn to_completion_request(
210 &self,
211 _request_kind: RequestKind,
212 _cx: &AppContext,
213 ) -> LanguageModelRequest {
214 let mut request = LanguageModelRequest {
215 messages: vec![],
216 tools: Vec::new(),
217 stop: Vec::new(),
218 temperature: None,
219 };
220
221 let mut referenced_context_ids = HashSet::default();
222
223 for message in &self.messages {
224 if let Some(context_ids) = self.context_by_message.get(&message.id) {
225 referenced_context_ids.extend(context_ids);
226 }
227
228 let mut request_message = LanguageModelRequestMessage {
229 role: message.role,
230 content: Vec::new(),
231 cache: false,
232 };
233
234 if let Some(tool_results) = self.tool_results_by_message.get(&message.id) {
235 for tool_result in tool_results {
236 request_message
237 .content
238 .push(MessageContent::ToolResult(tool_result.clone()));
239 }
240 }
241
242 if !message.text.is_empty() {
243 request_message
244 .content
245 .push(MessageContent::Text(message.text.clone()));
246 }
247
248 if let Some(tool_uses) = self.tool_uses_by_message.get(&message.id) {
249 for tool_use in tool_uses {
250 request_message
251 .content
252 .push(MessageContent::ToolUse(tool_use.clone()));
253 }
254 }
255
256 request.messages.push(request_message);
257 }
258
259 if !referenced_context_ids.is_empty() {
260 let mut context_message = LanguageModelRequestMessage {
261 role: Role::User,
262 content: Vec::new(),
263 cache: false,
264 };
265
266 let referenced_context = referenced_context_ids
267 .into_iter()
268 .filter_map(|context_id| self.context.get(context_id))
269 .cloned();
270 attach_context_to_message(&mut context_message, referenced_context);
271
272 request.messages.push(context_message);
273 }
274
275 request
276 }
277
278 pub fn stream_completion(
279 &mut self,
280 request: LanguageModelRequest,
281 model: Arc<dyn LanguageModel>,
282 cx: &mut ModelContext<Self>,
283 ) {
284 let pending_completion_id = post_inc(&mut self.completion_count);
285
286 let task = cx.spawn(|thread, mut cx| async move {
287 let stream = model.stream_completion(request, &cx);
288 let stream_completion = async {
289 let mut events = stream.await?;
290 let mut stop_reason = StopReason::EndTurn;
291
292 while let Some(event) = events.next().await {
293 let event = event?;
294
295 thread.update(&mut cx, |thread, cx| {
296 match event {
297 LanguageModelCompletionEvent::StartMessage { .. } => {
298 thread.insert_message(Role::Assistant, String::new(), cx);
299 }
300 LanguageModelCompletionEvent::Stop(reason) => {
301 stop_reason = reason;
302 }
303 LanguageModelCompletionEvent::Text(chunk) => {
304 if let Some(last_message) = thread.messages.last_mut() {
305 if last_message.role == Role::Assistant {
306 last_message.text.push_str(&chunk);
307 cx.emit(ThreadEvent::StreamedAssistantText(
308 last_message.id,
309 chunk,
310 ));
311 }
312 }
313 }
314 LanguageModelCompletionEvent::ToolUse(tool_use) => {
315 if let Some(last_assistant_message) = thread
316 .messages
317 .iter()
318 .rfind(|message| message.role == Role::Assistant)
319 {
320 thread
321 .tool_uses_by_message
322 .entry(last_assistant_message.id)
323 .or_default()
324 .push(tool_use.clone());
325
326 thread.pending_tool_uses_by_id.insert(
327 tool_use.id.clone(),
328 PendingToolUse {
329 assistant_message_id: last_assistant_message.id,
330 id: tool_use.id,
331 name: tool_use.name,
332 input: tool_use.input,
333 status: PendingToolUseStatus::Idle,
334 },
335 );
336 }
337 }
338 }
339
340 thread.touch_updated_at();
341 cx.emit(ThreadEvent::StreamedCompletion);
342 cx.notify();
343 })?;
344
345 smol::future::yield_now().await;
346 }
347
348 thread.update(&mut cx, |thread, cx| {
349 thread
350 .pending_completions
351 .retain(|completion| completion.id != pending_completion_id);
352
353 if thread.summary.is_none() && thread.messages.len() >= 2 {
354 thread.summarize(cx);
355 }
356 })?;
357
358 anyhow::Ok(stop_reason)
359 };
360
361 let result = stream_completion.await;
362
363 thread
364 .update(&mut cx, |thread, cx| match result.as_ref() {
365 Ok(stop_reason) => match stop_reason {
366 StopReason::ToolUse => {
367 cx.emit(ThreadEvent::UsePendingTools);
368 }
369 StopReason::EndTurn => {}
370 StopReason::MaxTokens => {}
371 },
372 Err(error) => {
373 if error.is::<PaymentRequiredError>() {
374 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
375 } else if error.is::<MaxMonthlySpendReachedError>() {
376 cx.emit(ThreadEvent::ShowError(ThreadError::MaxMonthlySpendReached));
377 } else {
378 let error_message = error
379 .chain()
380 .map(|err| err.to_string())
381 .collect::<Vec<_>>()
382 .join("\n");
383 cx.emit(ThreadEvent::ShowError(ThreadError::Message(
384 SharedString::from(error_message.clone()),
385 )));
386 }
387
388 thread.cancel_last_completion();
389 }
390 })
391 .ok();
392 });
393
394 self.pending_completions.push(PendingCompletion {
395 id: pending_completion_id,
396 _task: task,
397 });
398 }
399
400 pub fn summarize(&mut self, cx: &mut ModelContext<Self>) {
401 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
402 return;
403 };
404 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
405 return;
406 };
407
408 if !provider.is_authenticated(cx) {
409 return;
410 }
411
412 let mut request = self.to_completion_request(RequestKind::Chat, cx);
413 request.messages.push(LanguageModelRequestMessage {
414 role: Role::User,
415 content: vec![
416 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
417 .into(),
418 ],
419 cache: false,
420 });
421
422 self.pending_summary = cx.spawn(|this, mut cx| {
423 async move {
424 let stream = model.stream_completion_text(request, &cx);
425 let mut messages = stream.await?;
426
427 let mut new_summary = String::new();
428 while let Some(message) = messages.stream.next().await {
429 let text = message?;
430 let mut lines = text.lines();
431 new_summary.extend(lines.next());
432
433 // Stop if the LLM generated multiple lines.
434 if lines.next().is_some() {
435 break;
436 }
437 }
438
439 this.update(&mut cx, |this, cx| {
440 if !new_summary.is_empty() {
441 this.summary = Some(new_summary.into());
442 }
443
444 cx.emit(ThreadEvent::SummaryChanged);
445 })?;
446
447 anyhow::Ok(())
448 }
449 .log_err()
450 });
451 }
452
453 pub fn insert_tool_output(
454 &mut self,
455 assistant_message_id: MessageId,
456 tool_use_id: LanguageModelToolUseId,
457 output: Task<Result<String>>,
458 cx: &mut ModelContext<Self>,
459 ) {
460 let insert_output_task = cx.spawn(|thread, mut cx| {
461 let tool_use_id = tool_use_id.clone();
462 async move {
463 let output = output.await;
464 thread
465 .update(&mut cx, |thread, cx| {
466 // The tool use was requested by an Assistant message,
467 // so we want to attach the tool results to the next
468 // user message.
469 let next_user_message = MessageId(assistant_message_id.0 + 1);
470
471 let tool_results = thread
472 .tool_results_by_message
473 .entry(next_user_message)
474 .or_default();
475
476 match output {
477 Ok(output) => {
478 tool_results.push(LanguageModelToolResult {
479 tool_use_id: tool_use_id.to_string(),
480 content: output,
481 is_error: false,
482 });
483
484 cx.emit(ThreadEvent::ToolFinished { tool_use_id });
485 }
486 Err(err) => {
487 tool_results.push(LanguageModelToolResult {
488 tool_use_id: tool_use_id.to_string(),
489 content: err.to_string(),
490 is_error: true,
491 });
492
493 if let Some(tool_use) =
494 thread.pending_tool_uses_by_id.get_mut(&tool_use_id)
495 {
496 tool_use.status = PendingToolUseStatus::Error(err.to_string());
497 }
498 }
499 }
500 })
501 .ok();
502 }
503 });
504
505 if let Some(tool_use) = self.pending_tool_uses_by_id.get_mut(&tool_use_id) {
506 tool_use.status = PendingToolUseStatus::Running {
507 _task: insert_output_task.shared(),
508 };
509 }
510 }
511
512 /// Cancels the last pending completion, if there are any pending.
513 ///
514 /// Returns whether a completion was canceled.
515 pub fn cancel_last_completion(&mut self) -> bool {
516 if let Some(_last_completion) = self.pending_completions.pop() {
517 true
518 } else {
519 false
520 }
521 }
522}
523
524#[derive(Debug, Clone)]
525pub enum ThreadError {
526 PaymentRequired,
527 MaxMonthlySpendReached,
528 Message(SharedString),
529}
530
531#[derive(Debug, Clone)]
532pub enum ThreadEvent {
533 ShowError(ThreadError),
534 StreamedCompletion,
535 StreamedAssistantText(MessageId, String),
536 MessageAdded(MessageId),
537 SummaryChanged,
538 UsePendingTools,
539 ToolFinished {
540 #[allow(unused)]
541 tool_use_id: LanguageModelToolUseId,
542 },
543}
544
545impl EventEmitter<ThreadEvent> for Thread {}
546
547struct PendingCompletion {
548 id: usize,
549 _task: Task<()>,
550}
551
552#[derive(Debug, Clone)]
553pub struct PendingToolUse {
554 pub id: LanguageModelToolUseId,
555 /// The ID of the Assistant message in which the tool use was requested.
556 pub assistant_message_id: MessageId,
557 pub name: String,
558 pub input: serde_json::Value,
559 pub status: PendingToolUseStatus,
560}
561
562#[derive(Debug, Clone)]
563pub enum PendingToolUseStatus {
564 Idle,
565 Running { _task: Shared<Task<()>> },
566 Error(#[allow(unused)] String),
567}
568
569impl PendingToolUseStatus {
570 pub fn is_idle(&self) -> bool {
571 matches!(self, PendingToolUseStatus::Idle)
572 }
573}