1use std::sync::Arc;
2
3use anyhow::Result;
4use assistant_tool::ToolWorkingSet;
5use chrono::{DateTime, Utc};
6use collections::HashMap;
7use futures::future::Shared;
8use futures::{FutureExt as _, StreamExt as _};
9use gpui::{AppContext, EventEmitter, ModelContext, SharedString, Task};
10use language_model::{
11 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
12 LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolUse,
13 LanguageModelToolUseId, MessageContent, Role, StopReason,
14};
15use language_models::provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError};
16use serde::{Deserialize, Serialize};
17use util::{post_inc, TryFutureExt as _};
18use uuid::Uuid;
19
20use crate::context::{attach_context_to_message, Context};
21
22#[derive(Debug, Clone, Copy)]
23pub enum RequestKind {
24 Chat,
25}
26
27#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
28pub struct ThreadId(Arc<str>);
29
30impl ThreadId {
31 pub fn new() -> Self {
32 Self(Uuid::new_v4().to_string().into())
33 }
34}
35
36impl std::fmt::Display for ThreadId {
37 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
38 write!(f, "{}", self.0)
39 }
40}
41
42#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
43pub struct MessageId(usize);
44
45impl MessageId {
46 fn post_inc(&mut self) -> Self {
47 Self(post_inc(&mut self.0))
48 }
49}
50
51/// A message in a [`Thread`].
52#[derive(Debug, Clone)]
53pub struct Message {
54 pub id: MessageId,
55 pub role: Role,
56 pub text: String,
57}
58
59/// A thread of conversation with the LLM.
60pub struct Thread {
61 id: ThreadId,
62 updated_at: DateTime<Utc>,
63 summary: Option<SharedString>,
64 pending_summary: Task<Option<()>>,
65 messages: Vec<Message>,
66 next_message_id: MessageId,
67 context_by_message: HashMap<MessageId, Vec<Context>>,
68 completion_count: usize,
69 pending_completions: Vec<PendingCompletion>,
70 tools: Arc<ToolWorkingSet>,
71 tool_uses_by_message: HashMap<MessageId, Vec<LanguageModelToolUse>>,
72 tool_results_by_message: HashMap<MessageId, Vec<LanguageModelToolResult>>,
73 pending_tool_uses_by_id: HashMap<LanguageModelToolUseId, PendingToolUse>,
74}
75
76impl Thread {
77 pub fn new(tools: Arc<ToolWorkingSet>, _cx: &mut ModelContext<Self>) -> Self {
78 Self {
79 id: ThreadId::new(),
80 updated_at: Utc::now(),
81 summary: None,
82 pending_summary: Task::ready(None),
83 messages: Vec::new(),
84 next_message_id: MessageId(0),
85 context_by_message: HashMap::default(),
86 completion_count: 0,
87 pending_completions: Vec::new(),
88 tools,
89 tool_uses_by_message: HashMap::default(),
90 tool_results_by_message: HashMap::default(),
91 pending_tool_uses_by_id: HashMap::default(),
92 }
93 }
94
95 pub fn id(&self) -> &ThreadId {
96 &self.id
97 }
98
99 pub fn is_empty(&self) -> bool {
100 self.messages.is_empty()
101 }
102
103 pub fn updated_at(&self) -> DateTime<Utc> {
104 self.updated_at
105 }
106
107 pub fn touch_updated_at(&mut self) {
108 self.updated_at = Utc::now();
109 }
110
111 pub fn summary(&self) -> Option<SharedString> {
112 self.summary.clone()
113 }
114
115 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut ModelContext<Self>) {
116 self.summary = Some(summary.into());
117 cx.emit(ThreadEvent::SummaryChanged);
118 }
119
120 pub fn message(&self, id: MessageId) -> Option<&Message> {
121 self.messages.iter().find(|message| message.id == id)
122 }
123
124 pub fn messages(&self) -> impl Iterator<Item = &Message> {
125 self.messages.iter()
126 }
127
128 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
129 &self.tools
130 }
131
132 pub fn context_for_message(&self, id: MessageId) -> Option<&Vec<Context>> {
133 self.context_by_message.get(&id)
134 }
135
136 pub fn pending_tool_uses(&self) -> Vec<&PendingToolUse> {
137 self.pending_tool_uses_by_id.values().collect()
138 }
139
140 pub fn insert_user_message(
141 &mut self,
142 text: impl Into<String>,
143 context: Vec<Context>,
144 cx: &mut ModelContext<Self>,
145 ) {
146 let message_id = self.insert_message(Role::User, text, cx);
147 self.context_by_message.insert(message_id, context);
148 }
149
150 pub fn insert_message(
151 &mut self,
152 role: Role,
153 text: impl Into<String>,
154 cx: &mut ModelContext<Self>,
155 ) -> MessageId {
156 let id = self.next_message_id.post_inc();
157 self.messages.push(Message {
158 id,
159 role,
160 text: text.into(),
161 });
162 self.touch_updated_at();
163 cx.emit(ThreadEvent::MessageAdded(id));
164 id
165 }
166
167 /// Returns the representation of this [`Thread`] in a textual form.
168 ///
169 /// This is the representation we use when attaching a thread as context to another thread.
170 pub fn text(&self) -> String {
171 let mut text = String::new();
172
173 for message in &self.messages {
174 text.push_str(match message.role {
175 language_model::Role::User => "User:",
176 language_model::Role::Assistant => "Assistant:",
177 language_model::Role::System => "System:",
178 });
179 text.push('\n');
180
181 text.push_str(&message.text);
182 text.push('\n');
183 }
184
185 text
186 }
187
188 pub fn to_completion_request(
189 &self,
190 _request_kind: RequestKind,
191 _cx: &AppContext,
192 ) -> LanguageModelRequest {
193 let mut request = LanguageModelRequest {
194 messages: vec![],
195 tools: Vec::new(),
196 stop: Vec::new(),
197 temperature: None,
198 };
199
200 for message in &self.messages {
201 let mut request_message = LanguageModelRequestMessage {
202 role: message.role,
203 content: Vec::new(),
204 cache: false,
205 };
206
207 if let Some(tool_results) = self.tool_results_by_message.get(&message.id) {
208 for tool_result in tool_results {
209 request_message
210 .content
211 .push(MessageContent::ToolResult(tool_result.clone()));
212 }
213 }
214
215 if let Some(context) = self.context_for_message(message.id) {
216 attach_context_to_message(&mut request_message, context.clone());
217 }
218
219 if !message.text.is_empty() {
220 request_message
221 .content
222 .push(MessageContent::Text(message.text.clone()));
223 }
224
225 if let Some(tool_uses) = self.tool_uses_by_message.get(&message.id) {
226 for tool_use in tool_uses {
227 request_message
228 .content
229 .push(MessageContent::ToolUse(tool_use.clone()));
230 }
231 }
232
233 request.messages.push(request_message);
234 }
235
236 request
237 }
238
239 pub fn stream_completion(
240 &mut self,
241 request: LanguageModelRequest,
242 model: Arc<dyn LanguageModel>,
243 cx: &mut ModelContext<Self>,
244 ) {
245 let pending_completion_id = post_inc(&mut self.completion_count);
246
247 let task = cx.spawn(|thread, mut cx| async move {
248 let stream = model.stream_completion(request, &cx);
249 let stream_completion = async {
250 let mut events = stream.await?;
251 let mut stop_reason = StopReason::EndTurn;
252
253 while let Some(event) = events.next().await {
254 let event = event?;
255
256 thread.update(&mut cx, |thread, cx| {
257 match event {
258 LanguageModelCompletionEvent::StartMessage { .. } => {
259 thread.insert_message(Role::Assistant, String::new(), cx);
260 }
261 LanguageModelCompletionEvent::Stop(reason) => {
262 stop_reason = reason;
263 }
264 LanguageModelCompletionEvent::Text(chunk) => {
265 if let Some(last_message) = thread.messages.last_mut() {
266 if last_message.role == Role::Assistant {
267 last_message.text.push_str(&chunk);
268 cx.emit(ThreadEvent::StreamedAssistantText(
269 last_message.id,
270 chunk,
271 ));
272 }
273 }
274 }
275 LanguageModelCompletionEvent::ToolUse(tool_use) => {
276 if let Some(last_assistant_message) = thread
277 .messages
278 .iter()
279 .rfind(|message| message.role == Role::Assistant)
280 {
281 thread
282 .tool_uses_by_message
283 .entry(last_assistant_message.id)
284 .or_default()
285 .push(tool_use.clone());
286
287 thread.pending_tool_uses_by_id.insert(
288 tool_use.id.clone(),
289 PendingToolUse {
290 assistant_message_id: last_assistant_message.id,
291 id: tool_use.id,
292 name: tool_use.name,
293 input: tool_use.input,
294 status: PendingToolUseStatus::Idle,
295 },
296 );
297 }
298 }
299 }
300
301 thread.touch_updated_at();
302 cx.emit(ThreadEvent::StreamedCompletion);
303 cx.notify();
304 })?;
305
306 smol::future::yield_now().await;
307 }
308
309 thread.update(&mut cx, |thread, cx| {
310 thread
311 .pending_completions
312 .retain(|completion| completion.id != pending_completion_id);
313
314 if thread.summary.is_none() && thread.messages.len() >= 2 {
315 thread.summarize(cx);
316 }
317 })?;
318
319 anyhow::Ok(stop_reason)
320 };
321
322 let result = stream_completion.await;
323
324 thread
325 .update(&mut cx, |_thread, cx| match result.as_ref() {
326 Ok(stop_reason) => match stop_reason {
327 StopReason::ToolUse => {
328 cx.emit(ThreadEvent::UsePendingTools);
329 }
330 StopReason::EndTurn => {}
331 StopReason::MaxTokens => {}
332 },
333 Err(error) => {
334 if error.is::<PaymentRequiredError>() {
335 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
336 } else if error.is::<MaxMonthlySpendReachedError>() {
337 cx.emit(ThreadEvent::ShowError(ThreadError::MaxMonthlySpendReached));
338 } else {
339 let error_message = error
340 .chain()
341 .map(|err| err.to_string())
342 .collect::<Vec<_>>()
343 .join("\n");
344 cx.emit(ThreadEvent::ShowError(ThreadError::Message(
345 SharedString::from(error_message.clone()),
346 )));
347 }
348 }
349 })
350 .ok();
351 });
352
353 self.pending_completions.push(PendingCompletion {
354 id: pending_completion_id,
355 _task: task,
356 });
357 }
358
359 pub fn summarize(&mut self, cx: &mut ModelContext<Self>) {
360 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
361 return;
362 };
363 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
364 return;
365 };
366
367 if !provider.is_authenticated(cx) {
368 return;
369 }
370
371 let mut request = self.to_completion_request(RequestKind::Chat, cx);
372 request.messages.push(LanguageModelRequestMessage {
373 role: Role::User,
374 content: vec![
375 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
376 .into(),
377 ],
378 cache: false,
379 });
380
381 self.pending_summary = cx.spawn(|this, mut cx| {
382 async move {
383 let stream = model.stream_completion_text(request, &cx);
384 let mut messages = stream.await?;
385
386 let mut new_summary = String::new();
387 while let Some(message) = messages.stream.next().await {
388 let text = message?;
389 let mut lines = text.lines();
390 new_summary.extend(lines.next());
391
392 // Stop if the LLM generated multiple lines.
393 if lines.next().is_some() {
394 break;
395 }
396 }
397
398 this.update(&mut cx, |this, cx| {
399 if !new_summary.is_empty() {
400 this.summary = Some(new_summary.into());
401 }
402
403 cx.emit(ThreadEvent::SummaryChanged);
404 })?;
405
406 anyhow::Ok(())
407 }
408 .log_err()
409 });
410 }
411
412 pub fn insert_tool_output(
413 &mut self,
414 assistant_message_id: MessageId,
415 tool_use_id: LanguageModelToolUseId,
416 output: Task<Result<String>>,
417 cx: &mut ModelContext<Self>,
418 ) {
419 let insert_output_task = cx.spawn(|thread, mut cx| {
420 let tool_use_id = tool_use_id.clone();
421 async move {
422 let output = output.await;
423 thread
424 .update(&mut cx, |thread, cx| {
425 // The tool use was requested by an Assistant message,
426 // so we want to attach the tool results to the next
427 // user message.
428 let next_user_message = MessageId(assistant_message_id.0 + 1);
429
430 let tool_results = thread
431 .tool_results_by_message
432 .entry(next_user_message)
433 .or_default();
434
435 match output {
436 Ok(output) => {
437 tool_results.push(LanguageModelToolResult {
438 tool_use_id: tool_use_id.to_string(),
439 content: output,
440 is_error: false,
441 });
442
443 cx.emit(ThreadEvent::ToolFinished { tool_use_id });
444 }
445 Err(err) => {
446 tool_results.push(LanguageModelToolResult {
447 tool_use_id: tool_use_id.to_string(),
448 content: err.to_string(),
449 is_error: true,
450 });
451
452 if let Some(tool_use) =
453 thread.pending_tool_uses_by_id.get_mut(&tool_use_id)
454 {
455 tool_use.status = PendingToolUseStatus::Error(err.to_string());
456 }
457 }
458 }
459 })
460 .ok();
461 }
462 });
463
464 if let Some(tool_use) = self.pending_tool_uses_by_id.get_mut(&tool_use_id) {
465 tool_use.status = PendingToolUseStatus::Running {
466 _task: insert_output_task.shared(),
467 };
468 }
469 }
470}
471
472#[derive(Debug, Clone)]
473pub enum ThreadError {
474 PaymentRequired,
475 MaxMonthlySpendReached,
476 Message(SharedString),
477}
478
479#[derive(Debug, Clone)]
480pub enum ThreadEvent {
481 ShowError(ThreadError),
482 StreamedCompletion,
483 StreamedAssistantText(MessageId, String),
484 MessageAdded(MessageId),
485 SummaryChanged,
486 UsePendingTools,
487 ToolFinished {
488 #[allow(unused)]
489 tool_use_id: LanguageModelToolUseId,
490 },
491}
492
493impl EventEmitter<ThreadEvent> for Thread {}
494
495struct PendingCompletion {
496 id: usize,
497 _task: Task<()>,
498}
499
500#[derive(Debug, Clone)]
501pub struct PendingToolUse {
502 pub id: LanguageModelToolUseId,
503 /// The ID of the Assistant message in which the tool use was requested.
504 pub assistant_message_id: MessageId,
505 pub name: String,
506 pub input: serde_json::Value,
507 pub status: PendingToolUseStatus,
508}
509
510#[derive(Debug, Clone)]
511pub enum PendingToolUseStatus {
512 Idle,
513 Running { _task: Shared<Task<()>> },
514 Error(#[allow(unused)] String),
515}
516
517impl PendingToolUseStatus {
518 pub fn is_idle(&self) -> bool {
519 matches!(self, PendingToolUseStatus::Idle)
520 }
521}