1use std::cell::{Ref, RefCell};
2use std::path::{Path, PathBuf};
3use std::rc::Rc;
4use std::sync::{Arc, Mutex};
5
6use agent_settings::{AgentProfileId, CompletionMode};
7use anyhow::{Context as _, Result, anyhow};
8use assistant_tool::{ToolId, ToolWorkingSet};
9use chrono::{DateTime, Utc};
10use collections::HashMap;
11use context_server::ContextServerId;
12use futures::channel::{mpsc, oneshot};
13use futures::future::{self, BoxFuture, Shared};
14use futures::{FutureExt as _, StreamExt as _};
15use gpui::{
16 App, BackgroundExecutor, Context, Entity, EventEmitter, Global, ReadGlobal, SharedString,
17 Subscription, Task, prelude::*,
18};
19
20use language_model::{LanguageModelToolResultContent, LanguageModelToolUseId, Role, TokenUsage};
21use project::context_server_store::{ContextServerStatus, ContextServerStore};
22use project::{Project, ProjectItem, ProjectPath, Worktree};
23use prompt_store::{
24 ProjectContext, PromptBuilder, PromptId, PromptStore, PromptsUpdatedEvent, RulesFileContext,
25 UserRulesContext, WorktreeContext,
26};
27use serde::{Deserialize, Serialize};
28use ui::Window;
29use util::ResultExt as _;
30
31use crate::context_server_tool::ContextServerTool;
32use crate::thread::{
33 DetailedSummaryState, ExceededWindowError, MessageId, ProjectSnapshot, Thread, ThreadId,
34};
35use indoc::indoc;
36use sqlez::{
37 bindable::{Bind, Column},
38 connection::Connection,
39 statement::Statement,
40};
41
42#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
43pub enum DataType {
44 #[serde(rename = "json")]
45 Json,
46 #[serde(rename = "zstd")]
47 Zstd,
48}
49
50impl Bind for DataType {
51 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
52 let value = match self {
53 DataType::Json => "json",
54 DataType::Zstd => "zstd",
55 };
56 value.bind(statement, start_index)
57 }
58}
59
60impl Column for DataType {
61 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
62 let (value, next_index) = String::column(statement, start_index)?;
63 let data_type = match value.as_str() {
64 "json" => DataType::Json,
65 "zstd" => DataType::Zstd,
66 _ => anyhow::bail!("Unknown data type: {}", value),
67 };
68 Ok((data_type, next_index))
69 }
70}
71
72const RULES_FILE_NAMES: [&'static str; 8] = [
73 ".rules",
74 ".cursorrules",
75 ".windsurfrules",
76 ".clinerules",
77 ".github/copilot-instructions.md",
78 "CLAUDE.md",
79 "AGENT.md",
80 "AGENTS.md",
81];
82
83pub fn init(cx: &mut App) {
84 ThreadsDatabase::init(cx);
85}
86
87/// A system prompt shared by all threads created by this ThreadStore
88#[derive(Clone, Default)]
89pub struct SharedProjectContext(Rc<RefCell<Option<ProjectContext>>>);
90
91impl SharedProjectContext {
92 pub fn borrow(&self) -> Ref<Option<ProjectContext>> {
93 self.0.borrow()
94 }
95}
96
97pub type TextThreadStore = assistant_context_editor::ContextStore;
98
99pub struct ThreadStore {
100 project: Entity<Project>,
101 tools: Entity<ToolWorkingSet>,
102 prompt_builder: Arc<PromptBuilder>,
103 prompt_store: Option<Entity<PromptStore>>,
104 context_server_tool_ids: HashMap<ContextServerId, Vec<ToolId>>,
105 threads: Vec<SerializedThreadMetadata>,
106 project_context: SharedProjectContext,
107 reload_system_prompt_tx: mpsc::Sender<()>,
108 _reload_system_prompt_task: Task<()>,
109 _subscriptions: Vec<Subscription>,
110}
111
112pub struct RulesLoadingError {
113 pub message: SharedString,
114}
115
116impl EventEmitter<RulesLoadingError> for ThreadStore {}
117
118impl ThreadStore {
119 pub fn load(
120 project: Entity<Project>,
121 tools: Entity<ToolWorkingSet>,
122 prompt_store: Option<Entity<PromptStore>>,
123 prompt_builder: Arc<PromptBuilder>,
124 cx: &mut App,
125 ) -> Task<Result<Entity<Self>>> {
126 cx.spawn(async move |cx| {
127 let (thread_store, ready_rx) = cx.update(|cx| {
128 let mut option_ready_rx = None;
129 let thread_store = cx.new(|cx| {
130 let (thread_store, ready_rx) =
131 Self::new(project, tools, prompt_builder, prompt_store, cx);
132 option_ready_rx = Some(ready_rx);
133 thread_store
134 });
135 (thread_store, option_ready_rx.take().unwrap())
136 })?;
137 ready_rx.await?;
138 Ok(thread_store)
139 })
140 }
141
142 fn new(
143 project: Entity<Project>,
144 tools: Entity<ToolWorkingSet>,
145 prompt_builder: Arc<PromptBuilder>,
146 prompt_store: Option<Entity<PromptStore>>,
147 cx: &mut Context<Self>,
148 ) -> (Self, oneshot::Receiver<()>) {
149 let mut subscriptions = vec![cx.subscribe(&project, Self::handle_project_event)];
150
151 if let Some(prompt_store) = prompt_store.as_ref() {
152 subscriptions.push(cx.subscribe(
153 prompt_store,
154 |this, _prompt_store, PromptsUpdatedEvent, _cx| {
155 this.enqueue_system_prompt_reload();
156 },
157 ))
158 }
159
160 // This channel and task prevent concurrent and redundant loading of the system prompt.
161 let (reload_system_prompt_tx, mut reload_system_prompt_rx) = mpsc::channel(1);
162 let (ready_tx, ready_rx) = oneshot::channel();
163 let mut ready_tx = Some(ready_tx);
164 let reload_system_prompt_task = cx.spawn({
165 let prompt_store = prompt_store.clone();
166 async move |thread_store, cx| {
167 loop {
168 let Some(reload_task) = thread_store
169 .update(cx, |thread_store, cx| {
170 thread_store.reload_system_prompt(prompt_store.clone(), cx)
171 })
172 .ok()
173 else {
174 return;
175 };
176 reload_task.await;
177 if let Some(ready_tx) = ready_tx.take() {
178 ready_tx.send(()).ok();
179 }
180 reload_system_prompt_rx.next().await;
181 }
182 }
183 });
184
185 let this = Self {
186 project,
187 tools,
188 prompt_builder,
189 prompt_store,
190 context_server_tool_ids: HashMap::default(),
191 threads: Vec::new(),
192 project_context: SharedProjectContext::default(),
193 reload_system_prompt_tx,
194 _reload_system_prompt_task: reload_system_prompt_task,
195 _subscriptions: subscriptions,
196 };
197 this.register_context_server_handlers(cx);
198 this.reload(cx).detach_and_log_err(cx);
199 (this, ready_rx)
200 }
201
202 fn handle_project_event(
203 &mut self,
204 _project: Entity<Project>,
205 event: &project::Event,
206 _cx: &mut Context<Self>,
207 ) {
208 match event {
209 project::Event::WorktreeAdded(_) | project::Event::WorktreeRemoved(_) => {
210 self.enqueue_system_prompt_reload();
211 }
212 project::Event::WorktreeUpdatedEntries(_, items) => {
213 if items.iter().any(|(path, _, _)| {
214 RULES_FILE_NAMES
215 .iter()
216 .any(|name| path.as_ref() == Path::new(name))
217 }) {
218 self.enqueue_system_prompt_reload();
219 }
220 }
221 _ => {}
222 }
223 }
224
225 fn enqueue_system_prompt_reload(&mut self) {
226 self.reload_system_prompt_tx.try_send(()).ok();
227 }
228
229 // Note that this should only be called from `reload_system_prompt_task`.
230 fn reload_system_prompt(
231 &self,
232 prompt_store: Option<Entity<PromptStore>>,
233 cx: &mut Context<Self>,
234 ) -> Task<()> {
235 let worktrees = self
236 .project
237 .read(cx)
238 .visible_worktrees(cx)
239 .collect::<Vec<_>>();
240 let worktree_tasks = worktrees
241 .into_iter()
242 .map(|worktree| {
243 Self::load_worktree_info_for_system_prompt(worktree, self.project.clone(), cx)
244 })
245 .collect::<Vec<_>>();
246 let default_user_rules_task = match prompt_store {
247 None => Task::ready(vec![]),
248 Some(prompt_store) => prompt_store.read_with(cx, |prompt_store, cx| {
249 let prompts = prompt_store.default_prompt_metadata();
250 let load_tasks = prompts.into_iter().map(|prompt_metadata| {
251 let contents = prompt_store.load(prompt_metadata.id, cx);
252 async move { (contents.await, prompt_metadata) }
253 });
254 cx.background_spawn(future::join_all(load_tasks))
255 }),
256 };
257
258 cx.spawn(async move |this, cx| {
259 let (worktrees, default_user_rules) =
260 future::join(future::join_all(worktree_tasks), default_user_rules_task).await;
261
262 let worktrees = worktrees
263 .into_iter()
264 .map(|(worktree, rules_error)| {
265 if let Some(rules_error) = rules_error {
266 this.update(cx, |_, cx| cx.emit(rules_error)).ok();
267 }
268 worktree
269 })
270 .collect::<Vec<_>>();
271
272 let default_user_rules = default_user_rules
273 .into_iter()
274 .flat_map(|(contents, prompt_metadata)| match contents {
275 Ok(contents) => Some(UserRulesContext {
276 uuid: match prompt_metadata.id {
277 PromptId::User { uuid } => uuid,
278 PromptId::EditWorkflow => return None,
279 },
280 title: prompt_metadata.title.map(|title| title.to_string()),
281 contents,
282 }),
283 Err(err) => {
284 this.update(cx, |_, cx| {
285 cx.emit(RulesLoadingError {
286 message: format!("{err:?}").into(),
287 });
288 })
289 .ok();
290 None
291 }
292 })
293 .collect::<Vec<_>>();
294
295 this.update(cx, |this, _cx| {
296 *this.project_context.0.borrow_mut() =
297 Some(ProjectContext::new(worktrees, default_user_rules));
298 })
299 .ok();
300 })
301 }
302
303 fn load_worktree_info_for_system_prompt(
304 worktree: Entity<Worktree>,
305 project: Entity<Project>,
306 cx: &mut App,
307 ) -> Task<(WorktreeContext, Option<RulesLoadingError>)> {
308 let root_name = worktree.read(cx).root_name().into();
309
310 let rules_task = Self::load_worktree_rules_file(worktree, project, cx);
311 let Some(rules_task) = rules_task else {
312 return Task::ready((
313 WorktreeContext {
314 root_name,
315 rules_file: None,
316 },
317 None,
318 ));
319 };
320
321 cx.spawn(async move |_| {
322 let (rules_file, rules_file_error) = match rules_task.await {
323 Ok(rules_file) => (Some(rules_file), None),
324 Err(err) => (
325 None,
326 Some(RulesLoadingError {
327 message: format!("{err}").into(),
328 }),
329 ),
330 };
331 let worktree_info = WorktreeContext {
332 root_name,
333 rules_file,
334 };
335 (worktree_info, rules_file_error)
336 })
337 }
338
339 fn load_worktree_rules_file(
340 worktree: Entity<Worktree>,
341 project: Entity<Project>,
342 cx: &mut App,
343 ) -> Option<Task<Result<RulesFileContext>>> {
344 let worktree_ref = worktree.read(cx);
345 let worktree_id = worktree_ref.id();
346 let selected_rules_file = RULES_FILE_NAMES
347 .into_iter()
348 .filter_map(|name| {
349 worktree_ref
350 .entry_for_path(name)
351 .filter(|entry| entry.is_file())
352 .map(|entry| entry.path.clone())
353 })
354 .next();
355
356 // Note that Cline supports `.clinerules` being a directory, but that is not currently
357 // supported. This doesn't seem to occur often in GitHub repositories.
358 selected_rules_file.map(|path_in_worktree| {
359 let project_path = ProjectPath {
360 worktree_id,
361 path: path_in_worktree.clone(),
362 };
363 let buffer_task =
364 project.update(cx, |project, cx| project.open_buffer(project_path, cx));
365 let rope_task = cx.spawn(async move |cx| {
366 buffer_task.await?.read_with(cx, |buffer, cx| {
367 let project_entry_id = buffer.entry_id(cx).context("buffer has no file")?;
368 anyhow::Ok((project_entry_id, buffer.as_rope().clone()))
369 })?
370 });
371 // Build a string from the rope on a background thread.
372 cx.background_spawn(async move {
373 let (project_entry_id, rope) = rope_task.await?;
374 anyhow::Ok(RulesFileContext {
375 path_in_worktree,
376 text: rope.to_string().trim().to_string(),
377 project_entry_id: project_entry_id.to_usize(),
378 })
379 })
380 })
381 }
382
383 pub fn prompt_store(&self) -> &Option<Entity<PromptStore>> {
384 &self.prompt_store
385 }
386
387 pub fn tools(&self) -> Entity<ToolWorkingSet> {
388 self.tools.clone()
389 }
390
391 /// Returns the number of threads.
392 pub fn thread_count(&self) -> usize {
393 self.threads.len()
394 }
395
396 pub fn unordered_threads(&self) -> impl Iterator<Item = &SerializedThreadMetadata> {
397 self.threads.iter()
398 }
399
400 pub fn reverse_chronological_threads(&self) -> Vec<SerializedThreadMetadata> {
401 let mut threads = self.threads.iter().cloned().collect::<Vec<_>>();
402 threads.sort_unstable_by_key(|thread| std::cmp::Reverse(thread.updated_at));
403 threads
404 }
405
406 pub fn create_thread(&mut self, cx: &mut Context<Self>) -> Entity<Thread> {
407 cx.new(|cx| {
408 Thread::new(
409 self.project.clone(),
410 self.tools.clone(),
411 self.prompt_builder.clone(),
412 self.project_context.clone(),
413 cx,
414 )
415 })
416 }
417
418 pub fn create_thread_from_serialized(
419 &mut self,
420 serialized: SerializedThread,
421 cx: &mut Context<Self>,
422 ) -> Entity<Thread> {
423 cx.new(|cx| {
424 Thread::deserialize(
425 ThreadId::new(),
426 serialized,
427 self.project.clone(),
428 self.tools.clone(),
429 self.prompt_builder.clone(),
430 self.project_context.clone(),
431 None,
432 cx,
433 )
434 })
435 }
436
437 pub fn open_thread(
438 &self,
439 id: &ThreadId,
440 window: &mut Window,
441 cx: &mut Context<Self>,
442 ) -> Task<Result<Entity<Thread>>> {
443 let id = id.clone();
444 let database_future = ThreadsDatabase::global_future(cx);
445 let this = cx.weak_entity();
446 window.spawn(cx, async move |cx| {
447 let database = database_future.await.map_err(|err| anyhow!(err))?;
448 let thread = database
449 .try_find_thread(id.clone())
450 .await?
451 .with_context(|| format!("no thread found with ID: {id:?}"))?;
452
453 let thread = this.update_in(cx, |this, window, cx| {
454 cx.new(|cx| {
455 Thread::deserialize(
456 id.clone(),
457 thread,
458 this.project.clone(),
459 this.tools.clone(),
460 this.prompt_builder.clone(),
461 this.project_context.clone(),
462 Some(window),
463 cx,
464 )
465 })
466 })?;
467
468 Ok(thread)
469 })
470 }
471
472 pub fn save_thread(&self, thread: &Entity<Thread>, cx: &mut Context<Self>) -> Task<Result<()>> {
473 let (metadata, serialized_thread) =
474 thread.update(cx, |thread, cx| (thread.id().clone(), thread.serialize(cx)));
475
476 let database_future = ThreadsDatabase::global_future(cx);
477 cx.spawn(async move |this, cx| {
478 let serialized_thread = serialized_thread.await?;
479 let database = database_future.await.map_err(|err| anyhow!(err))?;
480 database.save_thread(metadata, serialized_thread).await?;
481
482 this.update(cx, |this, cx| this.reload(cx))?.await
483 })
484 }
485
486 pub fn delete_thread(&mut self, id: &ThreadId, cx: &mut Context<Self>) -> Task<Result<()>> {
487 let id = id.clone();
488 let database_future = ThreadsDatabase::global_future(cx);
489 cx.spawn(async move |this, cx| {
490 let database = database_future.await.map_err(|err| anyhow!(err))?;
491 database.delete_thread(id.clone()).await?;
492
493 this.update(cx, |this, cx| {
494 this.threads.retain(|thread| thread.id != id);
495 cx.notify();
496 })
497 })
498 }
499
500 pub fn reload(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
501 let database_future = ThreadsDatabase::global_future(cx);
502 cx.spawn(async move |this, cx| {
503 let threads = database_future
504 .await
505 .map_err(|err| anyhow!(err))?
506 .list_threads()
507 .await?;
508
509 this.update(cx, |this, cx| {
510 this.threads = threads;
511 cx.notify();
512 })
513 })
514 }
515
516 fn register_context_server_handlers(&self, cx: &mut Context<Self>) {
517 let context_server_store = self.project.read(cx).context_server_store();
518 cx.subscribe(&context_server_store, Self::handle_context_server_event)
519 .detach();
520
521 // Check for any servers that were already running before the handler was registered
522 for server in context_server_store.read(cx).running_servers() {
523 self.load_context_server_tools(server.id(), context_server_store.clone(), cx);
524 }
525 }
526
527 fn handle_context_server_event(
528 &mut self,
529 context_server_store: Entity<ContextServerStore>,
530 event: &project::context_server_store::Event,
531 cx: &mut Context<Self>,
532 ) {
533 let tool_working_set = self.tools.clone();
534 match event {
535 project::context_server_store::Event::ServerStatusChanged { server_id, status } => {
536 match status {
537 ContextServerStatus::Starting => {}
538 ContextServerStatus::Running => {
539 self.load_context_server_tools(server_id.clone(), context_server_store, cx);
540 }
541 ContextServerStatus::Stopped | ContextServerStatus::Error(_) => {
542 if let Some(tool_ids) = self.context_server_tool_ids.remove(server_id) {
543 tool_working_set.update(cx, |tool_working_set, _| {
544 tool_working_set.remove(&tool_ids);
545 });
546 }
547 }
548 }
549 }
550 }
551 }
552
553 fn load_context_server_tools(
554 &self,
555 server_id: ContextServerId,
556 context_server_store: Entity<ContextServerStore>,
557 cx: &mut Context<Self>,
558 ) {
559 let Some(server) = context_server_store.read(cx).get_running_server(&server_id) else {
560 return;
561 };
562 let tool_working_set = self.tools.clone();
563 cx.spawn(async move |this, cx| {
564 let Some(protocol) = server.client() else {
565 return;
566 };
567
568 if protocol.capable(context_server::protocol::ServerCapability::Tools) {
569 if let Some(response) = protocol
570 .request::<context_server::types::request::ListTools>(())
571 .await
572 .log_err()
573 {
574 let tool_ids = tool_working_set
575 .update(cx, |tool_working_set, _| {
576 response
577 .tools
578 .into_iter()
579 .map(|tool| {
580 log::info!("registering context server tool: {:?}", tool.name);
581 tool_working_set.insert(Arc::new(ContextServerTool::new(
582 context_server_store.clone(),
583 server.id(),
584 tool,
585 )))
586 })
587 .collect::<Vec<_>>()
588 })
589 .log_err();
590
591 if let Some(tool_ids) = tool_ids {
592 this.update(cx, |this, _| {
593 this.context_server_tool_ids.insert(server_id, tool_ids);
594 })
595 .log_err();
596 }
597 }
598 }
599 })
600 .detach();
601 }
602}
603
604#[derive(Debug, Clone, Serialize, Deserialize)]
605pub struct SerializedThreadMetadata {
606 pub id: ThreadId,
607 pub summary: SharedString,
608 pub updated_at: DateTime<Utc>,
609}
610
611#[derive(Serialize, Deserialize, Debug)]
612pub struct SerializedThread {
613 pub version: String,
614 pub summary: SharedString,
615 pub updated_at: DateTime<Utc>,
616 pub messages: Vec<SerializedMessage>,
617 #[serde(default)]
618 pub initial_project_snapshot: Option<Arc<ProjectSnapshot>>,
619 #[serde(default)]
620 pub cumulative_token_usage: TokenUsage,
621 #[serde(default)]
622 pub request_token_usage: Vec<TokenUsage>,
623 #[serde(default)]
624 pub detailed_summary_state: DetailedSummaryState,
625 #[serde(default)]
626 pub exceeded_window_error: Option<ExceededWindowError>,
627 #[serde(default)]
628 pub model: Option<SerializedLanguageModel>,
629 #[serde(default)]
630 pub completion_mode: Option<CompletionMode>,
631 #[serde(default)]
632 pub tool_use_limit_reached: bool,
633 #[serde(default)]
634 pub profile: Option<AgentProfileId>,
635}
636
637#[derive(Serialize, Deserialize, Debug)]
638pub struct SerializedLanguageModel {
639 pub provider: String,
640 pub model: String,
641}
642
643impl SerializedThread {
644 pub const VERSION: &'static str = "0.2.0";
645
646 pub fn from_json(json: &[u8]) -> Result<Self> {
647 let saved_thread_json = serde_json::from_slice::<serde_json::Value>(json)?;
648 match saved_thread_json.get("version") {
649 Some(serde_json::Value::String(version)) => match version.as_str() {
650 SerializedThreadV0_1_0::VERSION => {
651 let saved_thread =
652 serde_json::from_value::<SerializedThreadV0_1_0>(saved_thread_json)?;
653 Ok(saved_thread.upgrade())
654 }
655 SerializedThread::VERSION => Ok(serde_json::from_value::<SerializedThread>(
656 saved_thread_json,
657 )?),
658 _ => anyhow::bail!("unrecognized serialized thread version: {version:?}"),
659 },
660 None => {
661 let saved_thread =
662 serde_json::from_value::<LegacySerializedThread>(saved_thread_json)?;
663 Ok(saved_thread.upgrade())
664 }
665 version => anyhow::bail!("unrecognized serialized thread version: {version:?}"),
666 }
667 }
668}
669
670#[derive(Serialize, Deserialize, Debug)]
671pub struct SerializedThreadV0_1_0(
672 // The structure did not change, so we are reusing the latest SerializedThread.
673 // When making the next version, make sure this points to SerializedThreadV0_2_0
674 SerializedThread,
675);
676
677impl SerializedThreadV0_1_0 {
678 pub const VERSION: &'static str = "0.1.0";
679
680 pub fn upgrade(self) -> SerializedThread {
681 debug_assert_eq!(SerializedThread::VERSION, "0.2.0");
682
683 let mut messages: Vec<SerializedMessage> = Vec::with_capacity(self.0.messages.len());
684
685 for message in self.0.messages {
686 if message.role == Role::User && !message.tool_results.is_empty() {
687 if let Some(last_message) = messages.last_mut() {
688 debug_assert!(last_message.role == Role::Assistant);
689
690 last_message.tool_results = message.tool_results;
691 continue;
692 }
693 }
694
695 messages.push(message);
696 }
697
698 SerializedThread { messages, ..self.0 }
699 }
700}
701
702#[derive(Debug, Serialize, Deserialize)]
703pub struct SerializedMessage {
704 pub id: MessageId,
705 pub role: Role,
706 #[serde(default)]
707 pub segments: Vec<SerializedMessageSegment>,
708 #[serde(default)]
709 pub tool_uses: Vec<SerializedToolUse>,
710 #[serde(default)]
711 pub tool_results: Vec<SerializedToolResult>,
712 #[serde(default)]
713 pub context: String,
714 #[serde(default)]
715 pub creases: Vec<SerializedCrease>,
716 #[serde(default)]
717 pub is_hidden: bool,
718}
719
720#[derive(Debug, Serialize, Deserialize)]
721#[serde(tag = "type")]
722pub enum SerializedMessageSegment {
723 #[serde(rename = "text")]
724 Text {
725 text: String,
726 },
727 #[serde(rename = "thinking")]
728 Thinking {
729 text: String,
730 #[serde(skip_serializing_if = "Option::is_none")]
731 signature: Option<String>,
732 },
733 RedactedThinking {
734 data: Vec<u8>,
735 },
736}
737
738#[derive(Debug, Serialize, Deserialize)]
739pub struct SerializedToolUse {
740 pub id: LanguageModelToolUseId,
741 pub name: SharedString,
742 pub input: serde_json::Value,
743}
744
745#[derive(Debug, Serialize, Deserialize)]
746pub struct SerializedToolResult {
747 pub tool_use_id: LanguageModelToolUseId,
748 pub is_error: bool,
749 pub content: LanguageModelToolResultContent,
750 pub output: Option<serde_json::Value>,
751}
752
753#[derive(Serialize, Deserialize)]
754struct LegacySerializedThread {
755 pub summary: SharedString,
756 pub updated_at: DateTime<Utc>,
757 pub messages: Vec<LegacySerializedMessage>,
758 #[serde(default)]
759 pub initial_project_snapshot: Option<Arc<ProjectSnapshot>>,
760}
761
762impl LegacySerializedThread {
763 pub fn upgrade(self) -> SerializedThread {
764 SerializedThread {
765 version: SerializedThread::VERSION.to_string(),
766 summary: self.summary,
767 updated_at: self.updated_at,
768 messages: self.messages.into_iter().map(|msg| msg.upgrade()).collect(),
769 initial_project_snapshot: self.initial_project_snapshot,
770 cumulative_token_usage: TokenUsage::default(),
771 request_token_usage: Vec::new(),
772 detailed_summary_state: DetailedSummaryState::default(),
773 exceeded_window_error: None,
774 model: None,
775 completion_mode: None,
776 tool_use_limit_reached: false,
777 profile: None,
778 }
779 }
780}
781
782#[derive(Debug, Serialize, Deserialize)]
783struct LegacySerializedMessage {
784 pub id: MessageId,
785 pub role: Role,
786 pub text: String,
787 #[serde(default)]
788 pub tool_uses: Vec<SerializedToolUse>,
789 #[serde(default)]
790 pub tool_results: Vec<SerializedToolResult>,
791}
792
793impl LegacySerializedMessage {
794 fn upgrade(self) -> SerializedMessage {
795 SerializedMessage {
796 id: self.id,
797 role: self.role,
798 segments: vec![SerializedMessageSegment::Text { text: self.text }],
799 tool_uses: self.tool_uses,
800 tool_results: self.tool_results,
801 context: String::new(),
802 creases: Vec::new(),
803 is_hidden: false,
804 }
805 }
806}
807
808#[derive(Debug, Serialize, Deserialize)]
809pub struct SerializedCrease {
810 pub start: usize,
811 pub end: usize,
812 pub icon_path: SharedString,
813 pub label: SharedString,
814}
815
816struct GlobalThreadsDatabase(
817 Shared<BoxFuture<'static, Result<Arc<ThreadsDatabase>, Arc<anyhow::Error>>>>,
818);
819
820impl Global for GlobalThreadsDatabase {}
821
822pub(crate) struct ThreadsDatabase {
823 executor: BackgroundExecutor,
824 connection: Arc<Mutex<Connection>>,
825}
826
827impl ThreadsDatabase {
828 fn connection(&self) -> Arc<Mutex<Connection>> {
829 self.connection.clone()
830 }
831
832 const COMPRESSION_LEVEL: i32 = 3;
833}
834
835impl Bind for ThreadId {
836 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
837 self.to_string().bind(statement, start_index)
838 }
839}
840
841impl Column for ThreadId {
842 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
843 let (id_str, next_index) = String::column(statement, start_index)?;
844 Ok((ThreadId::from(id_str.as_str()), next_index))
845 }
846}
847
848impl ThreadsDatabase {
849 fn global_future(
850 cx: &mut App,
851 ) -> Shared<BoxFuture<'static, Result<Arc<ThreadsDatabase>, Arc<anyhow::Error>>>> {
852 GlobalThreadsDatabase::global(cx).0.clone()
853 }
854
855 fn init(cx: &mut App) {
856 let executor = cx.background_executor().clone();
857 let database_future = executor
858 .spawn({
859 let executor = executor.clone();
860 let threads_dir = paths::data_dir().join("threads");
861 async move { ThreadsDatabase::new(threads_dir, executor) }
862 })
863 .then(|result| future::ready(result.map(Arc::new).map_err(Arc::new)))
864 .boxed()
865 .shared();
866
867 cx.set_global(GlobalThreadsDatabase(database_future));
868 }
869
870 pub fn new(threads_dir: PathBuf, executor: BackgroundExecutor) -> Result<Self> {
871 std::fs::create_dir_all(&threads_dir)?;
872
873 let sqlite_path = threads_dir.join("threads.db");
874 let mdb_path = threads_dir.join("threads-db.1.mdb");
875
876 let needs_migration_from_heed = mdb_path.exists();
877
878 let connection = Connection::open_file(&sqlite_path.to_string_lossy());
879
880 connection.exec(indoc! {"
881 CREATE TABLE IF NOT EXISTS threads (
882 id TEXT PRIMARY KEY,
883 summary TEXT NOT NULL,
884 updated_at TEXT NOT NULL,
885 data_type TEXT NOT NULL,
886 data BLOB NOT NULL
887 )
888 "})?()
889 .map_err(|e| anyhow!("Failed to create threads table: {}", e))?;
890
891 let db = Self {
892 executor: executor.clone(),
893 connection: Arc::new(Mutex::new(connection)),
894 };
895
896 if needs_migration_from_heed {
897 let db_connection = db.connection();
898 let executor_clone = executor.clone();
899 executor
900 .spawn(async move {
901 log::info!("Starting threads.db migration");
902 Self::migrate_from_heed(&mdb_path, db_connection, executor_clone)?;
903 std::fs::remove_dir_all(mdb_path)?;
904 log::info!("threads.db migrated to sqlite");
905 Ok::<(), anyhow::Error>(())
906 })
907 .detach();
908 }
909
910 Ok(db)
911 }
912
913 // Remove this migration after 2025-09-01
914 fn migrate_from_heed(
915 mdb_path: &Path,
916 connection: Arc<Mutex<Connection>>,
917 _executor: BackgroundExecutor,
918 ) -> Result<()> {
919 use heed::types::SerdeBincode;
920 struct SerializedThreadHeed(SerializedThread);
921
922 impl heed::BytesEncode<'_> for SerializedThreadHeed {
923 type EItem = SerializedThreadHeed;
924
925 fn bytes_encode(
926 item: &Self::EItem,
927 ) -> Result<std::borrow::Cow<[u8]>, heed::BoxedError> {
928 serde_json::to_vec(&item.0)
929 .map(std::borrow::Cow::Owned)
930 .map_err(Into::into)
931 }
932 }
933
934 impl<'a> heed::BytesDecode<'a> for SerializedThreadHeed {
935 type DItem = SerializedThreadHeed;
936
937 fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, heed::BoxedError> {
938 SerializedThread::from_json(bytes)
939 .map(SerializedThreadHeed)
940 .map_err(Into::into)
941 }
942 }
943
944 const ONE_GB_IN_BYTES: usize = 1024 * 1024 * 1024;
945
946 let env = unsafe {
947 heed::EnvOpenOptions::new()
948 .map_size(ONE_GB_IN_BYTES)
949 .max_dbs(1)
950 .open(mdb_path)?
951 };
952
953 let txn = env.write_txn()?;
954 let threads: heed::Database<SerdeBincode<ThreadId>, SerializedThreadHeed> = env
955 .open_database(&txn, Some("threads"))?
956 .ok_or_else(|| anyhow!("threads database not found"))?;
957
958 for result in threads.iter(&txn)? {
959 let (thread_id, thread_heed) = result?;
960 Self::save_thread_sync(&connection, thread_id, thread_heed.0)?;
961 }
962
963 Ok(())
964 }
965
966 fn save_thread_sync(
967 connection: &Arc<Mutex<Connection>>,
968 id: ThreadId,
969 thread: SerializedThread,
970 ) -> Result<()> {
971 let json_data = serde_json::to_string(&thread)?;
972 let summary = thread.summary.to_string();
973 let updated_at = thread.updated_at.to_rfc3339();
974
975 let connection = connection.lock().unwrap();
976
977 let compressed = zstd::encode_all(json_data.as_bytes(), Self::COMPRESSION_LEVEL)?;
978 let data_type = DataType::Zstd;
979 let data = compressed;
980
981 let mut insert = connection.exec_bound::<(ThreadId, String, String, DataType, Vec<u8>)>(indoc! {"
982 INSERT OR REPLACE INTO threads (id, summary, updated_at, data_type, data) VALUES (?, ?, ?, ?, ?)
983 "})?;
984
985 insert((id, summary, updated_at, data_type, data))?;
986
987 Ok(())
988 }
989
990 pub fn list_threads(&self) -> Task<Result<Vec<SerializedThreadMetadata>>> {
991 let connection = self.connection.clone();
992
993 self.executor.spawn(async move {
994 let connection = connection.lock().unwrap();
995 let mut select =
996 connection.select_bound::<(), (ThreadId, String, String)>(indoc! {"
997 SELECT id, summary, updated_at FROM threads ORDER BY updated_at DESC
998 "})?;
999
1000 let rows = select(())?;
1001 let mut threads = Vec::new();
1002
1003 for (id, summary, updated_at) in rows {
1004 threads.push(SerializedThreadMetadata {
1005 id,
1006 summary: summary.into(),
1007 updated_at: DateTime::parse_from_rfc3339(&updated_at)?.with_timezone(&Utc),
1008 });
1009 }
1010
1011 Ok(threads)
1012 })
1013 }
1014
1015 pub fn try_find_thread(&self, id: ThreadId) -> Task<Result<Option<SerializedThread>>> {
1016 let connection = self.connection.clone();
1017
1018 self.executor.spawn(async move {
1019 let connection = connection.lock().unwrap();
1020 let mut select = connection.select_bound::<ThreadId, (DataType, Vec<u8>)>(indoc! {"
1021 SELECT data_type, data FROM threads WHERE id = ? LIMIT 1
1022 "})?;
1023
1024 let rows = select(id)?;
1025 if let Some((data_type, data)) = rows.into_iter().next() {
1026 let json_data = match data_type {
1027 DataType::Zstd => {
1028 let decompressed = zstd::decode_all(&data[..])?;
1029 String::from_utf8(decompressed)?
1030 }
1031 DataType::Json => String::from_utf8(data)?,
1032 };
1033
1034 let thread = SerializedThread::from_json(json_data.as_bytes())?;
1035 Ok(Some(thread))
1036 } else {
1037 Ok(None)
1038 }
1039 })
1040 }
1041
1042 pub fn save_thread(&self, id: ThreadId, thread: SerializedThread) -> Task<Result<()>> {
1043 let connection = self.connection.clone();
1044
1045 self.executor
1046 .spawn(async move { Self::save_thread_sync(&connection, id, thread) })
1047 }
1048
1049 pub fn delete_thread(&self, id: ThreadId) -> Task<Result<()>> {
1050 let connection = self.connection.clone();
1051
1052 self.executor.spawn(async move {
1053 let connection = connection.lock().unwrap();
1054
1055 let mut delete = connection.exec_bound::<ThreadId>(indoc! {"
1056 DELETE FROM threads WHERE id = ?
1057 "})?;
1058
1059 delete(id)?;
1060
1061 Ok(())
1062 })
1063 }
1064}