1use std::sync::Arc;
2
3use anyhow::Result;
4use assistant_tool::ToolWorkingSet;
5use chrono::{DateTime, Utc};
6use collections::HashMap;
7use futures::future::Shared;
8use futures::{FutureExt as _, StreamExt as _};
9use gpui::{AppContext, EventEmitter, ModelContext, SharedString, Task};
10use language_model::{
11 LanguageModel, LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
12 LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolUse,
13 LanguageModelToolUseId, MessageContent, Role, StopReason,
14};
15use language_models::provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError};
16use serde::{Deserialize, Serialize};
17use util::{post_inc, TryFutureExt as _};
18use uuid::Uuid;
19
20use crate::context::{Context, ContextKind};
21
22#[derive(Debug, Clone, Copy)]
23pub enum RequestKind {
24 Chat,
25}
26
27#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize)]
28pub struct ThreadId(Arc<str>);
29
30impl ThreadId {
31 pub fn new() -> Self {
32 Self(Uuid::new_v4().to_string().into())
33 }
34}
35
36impl std::fmt::Display for ThreadId {
37 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
38 write!(f, "{}", self.0)
39 }
40}
41
42#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
43pub struct MessageId(usize);
44
45impl MessageId {
46 fn post_inc(&mut self) -> Self {
47 Self(post_inc(&mut self.0))
48 }
49}
50
51/// A message in a [`Thread`].
52#[derive(Debug, Clone)]
53pub struct Message {
54 pub id: MessageId,
55 pub role: Role,
56 pub text: String,
57}
58
59/// A thread of conversation with the LLM.
60pub struct Thread {
61 id: ThreadId,
62 updated_at: DateTime<Utc>,
63 summary: Option<SharedString>,
64 pending_summary: Task<Option<()>>,
65 messages: Vec<Message>,
66 next_message_id: MessageId,
67 context_by_message: HashMap<MessageId, Vec<Context>>,
68 completion_count: usize,
69 pending_completions: Vec<PendingCompletion>,
70 tools: Arc<ToolWorkingSet>,
71 tool_uses_by_message: HashMap<MessageId, Vec<LanguageModelToolUse>>,
72 tool_results_by_message: HashMap<MessageId, Vec<LanguageModelToolResult>>,
73 pending_tool_uses_by_id: HashMap<LanguageModelToolUseId, PendingToolUse>,
74}
75
76impl Thread {
77 pub fn new(tools: Arc<ToolWorkingSet>, _cx: &mut ModelContext<Self>) -> Self {
78 Self {
79 id: ThreadId::new(),
80 updated_at: Utc::now(),
81 summary: None,
82 pending_summary: Task::ready(None),
83 messages: Vec::new(),
84 next_message_id: MessageId(0),
85 context_by_message: HashMap::default(),
86 completion_count: 0,
87 pending_completions: Vec::new(),
88 tools,
89 tool_uses_by_message: HashMap::default(),
90 tool_results_by_message: HashMap::default(),
91 pending_tool_uses_by_id: HashMap::default(),
92 }
93 }
94
95 pub fn id(&self) -> &ThreadId {
96 &self.id
97 }
98
99 pub fn is_empty(&self) -> bool {
100 self.messages.is_empty()
101 }
102
103 pub fn updated_at(&self) -> DateTime<Utc> {
104 self.updated_at
105 }
106
107 pub fn touch_updated_at(&mut self) {
108 self.updated_at = Utc::now();
109 }
110
111 pub fn summary(&self) -> Option<SharedString> {
112 self.summary.clone()
113 }
114
115 pub fn set_summary(&mut self, summary: impl Into<SharedString>, cx: &mut ModelContext<Self>) {
116 self.summary = Some(summary.into());
117 cx.emit(ThreadEvent::SummaryChanged);
118 }
119
120 pub fn message(&self, id: MessageId) -> Option<&Message> {
121 self.messages.iter().find(|message| message.id == id)
122 }
123
124 pub fn messages(&self) -> impl Iterator<Item = &Message> {
125 self.messages.iter()
126 }
127
128 pub fn tools(&self) -> &Arc<ToolWorkingSet> {
129 &self.tools
130 }
131
132 pub fn context_for_message(&self, id: MessageId) -> Option<&Vec<Context>> {
133 self.context_by_message.get(&id)
134 }
135
136 pub fn pending_tool_uses(&self) -> Vec<&PendingToolUse> {
137 self.pending_tool_uses_by_id.values().collect()
138 }
139
140 pub fn insert_user_message(
141 &mut self,
142 text: impl Into<String>,
143 context: Vec<Context>,
144 cx: &mut ModelContext<Self>,
145 ) {
146 let message_id = self.insert_message(Role::User, text, cx);
147 self.context_by_message.insert(message_id, context);
148 }
149
150 pub fn insert_message(
151 &mut self,
152 role: Role,
153 text: impl Into<String>,
154 cx: &mut ModelContext<Self>,
155 ) -> MessageId {
156 let id = self.next_message_id.post_inc();
157 self.messages.push(Message {
158 id,
159 role,
160 text: text.into(),
161 });
162 self.touch_updated_at();
163 cx.emit(ThreadEvent::MessageAdded(id));
164 id
165 }
166
167 pub fn to_completion_request(
168 &self,
169 _request_kind: RequestKind,
170 _cx: &AppContext,
171 ) -> LanguageModelRequest {
172 let mut request = LanguageModelRequest {
173 messages: vec![],
174 tools: Vec::new(),
175 stop: Vec::new(),
176 temperature: None,
177 };
178
179 for message in &self.messages {
180 let mut request_message = LanguageModelRequestMessage {
181 role: message.role,
182 content: Vec::new(),
183 cache: false,
184 };
185
186 if let Some(tool_results) = self.tool_results_by_message.get(&message.id) {
187 for tool_result in tool_results {
188 request_message
189 .content
190 .push(MessageContent::ToolResult(tool_result.clone()));
191 }
192 }
193
194 if let Some(context) = self.context_for_message(message.id) {
195 let mut file_context = String::new();
196
197 for context in context.iter() {
198 match context.kind {
199 ContextKind::File => {
200 file_context.push_str(&context.text);
201 file_context.push_str("\n");
202 }
203 }
204 }
205
206 let mut context_text = String::new();
207 if !file_context.is_empty() {
208 context_text.push_str("The following files are available:\n");
209 context_text.push_str(&file_context);
210 }
211
212 request_message
213 .content
214 .push(MessageContent::Text(context_text))
215 }
216
217 if !message.text.is_empty() {
218 request_message
219 .content
220 .push(MessageContent::Text(message.text.clone()));
221 }
222
223 if let Some(tool_uses) = self.tool_uses_by_message.get(&message.id) {
224 for tool_use in tool_uses {
225 request_message
226 .content
227 .push(MessageContent::ToolUse(tool_use.clone()));
228 }
229 }
230
231 request.messages.push(request_message);
232 }
233
234 request
235 }
236
237 pub fn stream_completion(
238 &mut self,
239 request: LanguageModelRequest,
240 model: Arc<dyn LanguageModel>,
241 cx: &mut ModelContext<Self>,
242 ) {
243 let pending_completion_id = post_inc(&mut self.completion_count);
244
245 let task = cx.spawn(|thread, mut cx| async move {
246 let stream = model.stream_completion(request, &cx);
247 let stream_completion = async {
248 let mut events = stream.await?;
249 let mut stop_reason = StopReason::EndTurn;
250
251 while let Some(event) = events.next().await {
252 let event = event?;
253
254 thread.update(&mut cx, |thread, cx| {
255 match event {
256 LanguageModelCompletionEvent::StartMessage { .. } => {
257 thread.insert_message(Role::Assistant, String::new(), cx);
258 }
259 LanguageModelCompletionEvent::Stop(reason) => {
260 stop_reason = reason;
261 }
262 LanguageModelCompletionEvent::Text(chunk) => {
263 if let Some(last_message) = thread.messages.last_mut() {
264 if last_message.role == Role::Assistant {
265 last_message.text.push_str(&chunk);
266 cx.emit(ThreadEvent::StreamedAssistantText(
267 last_message.id,
268 chunk,
269 ));
270 }
271 }
272 }
273 LanguageModelCompletionEvent::ToolUse(tool_use) => {
274 if let Some(last_assistant_message) = thread
275 .messages
276 .iter()
277 .rfind(|message| message.role == Role::Assistant)
278 {
279 thread
280 .tool_uses_by_message
281 .entry(last_assistant_message.id)
282 .or_default()
283 .push(tool_use.clone());
284
285 thread.pending_tool_uses_by_id.insert(
286 tool_use.id.clone(),
287 PendingToolUse {
288 assistant_message_id: last_assistant_message.id,
289 id: tool_use.id,
290 name: tool_use.name,
291 input: tool_use.input,
292 status: PendingToolUseStatus::Idle,
293 },
294 );
295 }
296 }
297 }
298
299 thread.touch_updated_at();
300 cx.emit(ThreadEvent::StreamedCompletion);
301 cx.notify();
302 })?;
303
304 smol::future::yield_now().await;
305 }
306
307 thread.update(&mut cx, |thread, cx| {
308 thread
309 .pending_completions
310 .retain(|completion| completion.id != pending_completion_id);
311
312 if thread.summary.is_none() && thread.messages.len() >= 2 {
313 thread.summarize(cx);
314 }
315 })?;
316
317 anyhow::Ok(stop_reason)
318 };
319
320 let result = stream_completion.await;
321
322 thread
323 .update(&mut cx, |_thread, cx| match result.as_ref() {
324 Ok(stop_reason) => match stop_reason {
325 StopReason::ToolUse => {
326 cx.emit(ThreadEvent::UsePendingTools);
327 }
328 StopReason::EndTurn => {}
329 StopReason::MaxTokens => {}
330 },
331 Err(error) => {
332 if error.is::<PaymentRequiredError>() {
333 cx.emit(ThreadEvent::ShowError(ThreadError::PaymentRequired));
334 } else if error.is::<MaxMonthlySpendReachedError>() {
335 cx.emit(ThreadEvent::ShowError(ThreadError::MaxMonthlySpendReached));
336 } else {
337 let error_message = error
338 .chain()
339 .map(|err| err.to_string())
340 .collect::<Vec<_>>()
341 .join("\n");
342 cx.emit(ThreadEvent::ShowError(ThreadError::Message(
343 SharedString::from(error_message.clone()),
344 )));
345 }
346 }
347 })
348 .ok();
349 });
350
351 self.pending_completions.push(PendingCompletion {
352 id: pending_completion_id,
353 _task: task,
354 });
355 }
356
357 pub fn summarize(&mut self, cx: &mut ModelContext<Self>) {
358 let Some(provider) = LanguageModelRegistry::read_global(cx).active_provider() else {
359 return;
360 };
361 let Some(model) = LanguageModelRegistry::read_global(cx).active_model() else {
362 return;
363 };
364
365 if !provider.is_authenticated(cx) {
366 return;
367 }
368
369 let mut request = self.to_completion_request(RequestKind::Chat, cx);
370 request.messages.push(LanguageModelRequestMessage {
371 role: Role::User,
372 content: vec![
373 "Generate a concise 3-7 word title for this conversation, omitting punctuation. Go straight to the title, without any preamble and prefix like `Here's a concise suggestion:...` or `Title:`"
374 .into(),
375 ],
376 cache: false,
377 });
378
379 self.pending_summary = cx.spawn(|this, mut cx| {
380 async move {
381 let stream = model.stream_completion_text(request, &cx);
382 let mut messages = stream.await?;
383
384 let mut new_summary = String::new();
385 while let Some(message) = messages.stream.next().await {
386 let text = message?;
387 let mut lines = text.lines();
388 new_summary.extend(lines.next());
389
390 // Stop if the LLM generated multiple lines.
391 if lines.next().is_some() {
392 break;
393 }
394 }
395
396 this.update(&mut cx, |this, cx| {
397 if !new_summary.is_empty() {
398 this.summary = Some(new_summary.into());
399 }
400
401 cx.emit(ThreadEvent::SummaryChanged);
402 })?;
403
404 anyhow::Ok(())
405 }
406 .log_err()
407 });
408 }
409
410 pub fn insert_tool_output(
411 &mut self,
412 assistant_message_id: MessageId,
413 tool_use_id: LanguageModelToolUseId,
414 output: Task<Result<String>>,
415 cx: &mut ModelContext<Self>,
416 ) {
417 let insert_output_task = cx.spawn(|thread, mut cx| {
418 let tool_use_id = tool_use_id.clone();
419 async move {
420 let output = output.await;
421 thread
422 .update(&mut cx, |thread, cx| {
423 // The tool use was requested by an Assistant message,
424 // so we want to attach the tool results to the next
425 // user message.
426 let next_user_message = MessageId(assistant_message_id.0 + 1);
427
428 let tool_results = thread
429 .tool_results_by_message
430 .entry(next_user_message)
431 .or_default();
432
433 match output {
434 Ok(output) => {
435 tool_results.push(LanguageModelToolResult {
436 tool_use_id: tool_use_id.to_string(),
437 content: output,
438 is_error: false,
439 });
440
441 cx.emit(ThreadEvent::ToolFinished { tool_use_id });
442 }
443 Err(err) => {
444 tool_results.push(LanguageModelToolResult {
445 tool_use_id: tool_use_id.to_string(),
446 content: err.to_string(),
447 is_error: true,
448 });
449
450 if let Some(tool_use) =
451 thread.pending_tool_uses_by_id.get_mut(&tool_use_id)
452 {
453 tool_use.status = PendingToolUseStatus::Error(err.to_string());
454 }
455 }
456 }
457 })
458 .ok();
459 }
460 });
461
462 if let Some(tool_use) = self.pending_tool_uses_by_id.get_mut(&tool_use_id) {
463 tool_use.status = PendingToolUseStatus::Running {
464 _task: insert_output_task.shared(),
465 };
466 }
467 }
468}
469
470#[derive(Debug, Clone)]
471pub enum ThreadError {
472 PaymentRequired,
473 MaxMonthlySpendReached,
474 Message(SharedString),
475}
476
477#[derive(Debug, Clone)]
478pub enum ThreadEvent {
479 ShowError(ThreadError),
480 StreamedCompletion,
481 StreamedAssistantText(MessageId, String),
482 MessageAdded(MessageId),
483 SummaryChanged,
484 UsePendingTools,
485 ToolFinished {
486 #[allow(unused)]
487 tool_use_id: LanguageModelToolUseId,
488 },
489}
490
491impl EventEmitter<ThreadEvent> for Thread {}
492
493struct PendingCompletion {
494 id: usize,
495 _task: Task<()>,
496}
497
498#[derive(Debug, Clone)]
499pub struct PendingToolUse {
500 pub id: LanguageModelToolUseId,
501 /// The ID of the Assistant message in which the tool use was requested.
502 pub assistant_message_id: MessageId,
503 pub name: String,
504 pub input: serde_json::Value,
505 pub status: PendingToolUseStatus,
506}
507
508#[derive(Debug, Clone)]
509pub enum PendingToolUseStatus {
510 Idle,
511 Running { _task: Shared<Task<()>> },
512 Error(#[allow(unused)] String),
513}
514
515impl PendingToolUseStatus {
516 pub fn is_idle(&self) -> bool {
517 matches!(self, PendingToolUseStatus::Idle)
518 }
519}