1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use acp_thread::AcpThreadEvent;
7use agent::{ThreadStore, ZED_AGENT_ID};
8use agent_client_protocol as acp;
9use anyhow::Context as _;
10use chrono::{DateTime, Utc};
11use collections::{HashMap, HashSet};
12use db::{
13 sqlez::{
14 bindable::Column, domain::Domain, statement::Statement,
15 thread_safe_connection::ThreadSafeConnection,
16 },
17 sqlez_macros::sql,
18};
19use feature_flags::{AgentV2FeatureFlag, FeatureFlagAppExt};
20use futures::{FutureExt as _, future::Shared};
21use gpui::{AppContext as _, Entity, Global, Subscription, Task};
22use project::AgentId;
23use ui::{App, Context, SharedString};
24use util::ResultExt as _;
25use workspace::PathList;
26
27use crate::DEFAULT_THREAD_TITLE;
28
29pub fn init(cx: &mut App) {
30 ThreadMetadataStore::init_global(cx);
31
32 if cx.has_flag::<AgentV2FeatureFlag>() {
33 migrate_thread_metadata(cx);
34 }
35 cx.observe_flag::<AgentV2FeatureFlag, _>(|has_flag, cx| {
36 if has_flag {
37 migrate_thread_metadata(cx);
38 }
39 })
40 .detach();
41}
42
43/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
44/// We skip migrating threads that do not have a project.
45///
46/// TODO: Remove this after N weeks of shipping the sidebar
47fn migrate_thread_metadata(cx: &mut App) {
48 let store = ThreadMetadataStore::global(cx);
49 let db = store.read(cx).db.clone();
50
51 cx.spawn(async move |cx| {
52 let existing_entries = db.list_ids()?.into_iter().collect::<HashSet<_>>();
53
54 let is_first_migration = existing_entries.is_empty();
55
56 let mut to_migrate = store.read_with(cx, |_store, cx| {
57 ThreadStore::global(cx)
58 .read(cx)
59 .entries()
60 .filter_map(|entry| {
61 if existing_entries.contains(&entry.id.0) {
62 return None;
63 }
64
65 Some(ThreadMetadata {
66 session_id: entry.id,
67 agent_id: ZED_AGENT_ID.clone(),
68 title: entry.title,
69 updated_at: entry.updated_at,
70 created_at: entry.created_at,
71 folder_paths: entry.folder_paths,
72 main_worktree_paths: PathList::default(),
73 archived: true,
74 })
75 })
76 .collect::<Vec<_>>()
77 });
78
79 if to_migrate.is_empty() {
80 return anyhow::Ok(());
81 }
82
83 // On the first migration (no entries in DB yet), keep the 5 most
84 // recent threads per project unarchived.
85 if is_first_migration {
86 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
87 for entry in &mut to_migrate {
88 if entry.folder_paths.is_empty() {
89 continue;
90 }
91 per_project
92 .entry(entry.folder_paths.clone())
93 .or_default()
94 .push(entry);
95 }
96 for entries in per_project.values_mut() {
97 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
98 for entry in entries.iter_mut().take(5) {
99 entry.archived = false;
100 }
101 }
102 }
103
104 log::info!("Migrating {} thread store entries", to_migrate.len());
105
106 // Manually save each entry to the database and call reload, otherwise
107 // we'll end up triggering lots of reloads after each save
108 for entry in to_migrate {
109 db.save(entry).await?;
110 }
111
112 log::info!("Finished migrating thread store entries");
113
114 let _ = store.update(cx, |store, cx| store.reload(cx));
115 anyhow::Ok(())
116 })
117 .detach_and_log_err(cx);
118}
119
120struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
121impl Global for GlobalThreadMetadataStore {}
122
123/// Lightweight metadata for any thread (native or ACP), enough to populate
124/// the sidebar list and route to the correct load path when clicked.
125#[derive(Debug, Clone, PartialEq)]
126pub struct ThreadMetadata {
127 pub session_id: acp::SessionId,
128 pub agent_id: AgentId,
129 pub title: SharedString,
130 pub updated_at: DateTime<Utc>,
131 pub created_at: Option<DateTime<Utc>>,
132 pub folder_paths: PathList,
133 pub main_worktree_paths: PathList,
134 pub archived: bool,
135}
136
137impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
138 fn from(meta: &ThreadMetadata) -> Self {
139 Self {
140 session_id: meta.session_id.clone(),
141 work_dirs: Some(meta.folder_paths.clone()),
142 title: Some(meta.title.clone()),
143 updated_at: Some(meta.updated_at),
144 created_at: meta.created_at,
145 meta: None,
146 }
147 }
148}
149
150/// Record of a git worktree that was archived (deleted from disk) when its
151/// last thread was archived.
152pub struct ArchivedGitWorktree {
153 /// Auto-incrementing primary key.
154 pub id: i64,
155 /// Absolute path to the directory of the worktree before it was deleted.
156 /// Used when restoring, to put the recreated worktree back where it was.
157 /// If the path already exists on disk, the worktree is assumed to be
158 /// already restored and is used as-is.
159 pub worktree_path: PathBuf,
160 /// Absolute path of the main repository ("main worktree") that owned this worktree.
161 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
162 /// If the main repo isn't found on disk, unarchiving fails because we only store
163 /// commit hashes, and without the actual git repo being available, we can't restore
164 /// the files.
165 pub main_repo_path: PathBuf,
166 /// Branch that was checked out in the worktree at archive time. `None` if
167 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
168 /// could happen if the user made a detached one outside of Zed.
169 /// On restore, we try to switch to this branch. If that fails (e.g. it's
170 /// checked out elsewhere), we auto-generate a new one.
171 pub branch_name: Option<String>,
172 /// SHA of the WIP commit that captures files that were staged (but not yet
173 /// committed) at the time of archiving. This commit can be empty if the
174 /// user had no staged files at the time. It sits directly on top of whatever
175 /// the user's last actual commit was.
176 pub staged_commit_hash: String,
177 /// SHA of the WIP commit that captures files that were unstaged (including
178 /// untracked) at the time of archiving. This commit can be empty if the user
179 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
180 /// After doing `git reset` past both of these commits, we're back in the state
181 /// we had before archiving, including what was staged, what was unstaged, and
182 /// what was committed.
183 pub unstaged_commit_hash: String,
184 /// SHA of the commit that HEAD pointed at before we created the two WIP
185 /// commits during archival. After resetting past the WIP commits during
186 /// restore, HEAD should land back on this commit. It also serves as a
187 /// pre-restore sanity check (abort if this commit no longer exists in the
188 /// repo) and as a fallback target if the WIP resets fail.
189 pub original_commit_hash: String,
190}
191
192/// The store holds all metadata needed to show threads in the sidebar/the archive.
193///
194/// Automatically listens to AcpThread events and updates metadata if it has changed.
195pub struct ThreadMetadataStore {
196 db: ThreadMetadataDb,
197 threads: HashMap<acp::SessionId, ThreadMetadata>,
198 threads_by_paths: HashMap<PathList, HashSet<acp::SessionId>>,
199 threads_by_main_paths: HashMap<PathList, HashSet<acp::SessionId>>,
200 reload_task: Option<Shared<Task<()>>>,
201 session_subscriptions: HashMap<acp::SessionId, Subscription>,
202 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
203 _db_operations_task: Task<()>,
204}
205
206#[derive(Debug, PartialEq)]
207enum DbOperation {
208 Upsert(ThreadMetadata),
209 Delete(acp::SessionId),
210}
211
212impl DbOperation {
213 fn id(&self) -> &acp::SessionId {
214 match self {
215 DbOperation::Upsert(thread) => &thread.session_id,
216 DbOperation::Delete(session_id) => session_id,
217 }
218 }
219}
220
221impl ThreadMetadataStore {
222 #[cfg(not(any(test, feature = "test-support")))]
223 pub fn init_global(cx: &mut App) {
224 if cx.has_global::<Self>() {
225 return;
226 }
227
228 let db = ThreadMetadataDb::global(cx);
229 let thread_store = cx.new(|cx| Self::new(db, cx));
230 cx.set_global(GlobalThreadMetadataStore(thread_store));
231 }
232
233 #[cfg(any(test, feature = "test-support"))]
234 pub fn init_global(cx: &mut App) {
235 let thread = std::thread::current();
236 let test_name = thread.name().unwrap_or("unknown_test");
237 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
238 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
239 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
240 cx.set_global(GlobalThreadMetadataStore(thread_store));
241 }
242
243 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
244 cx.try_global::<GlobalThreadMetadataStore>()
245 .map(|store| store.0.clone())
246 }
247
248 pub fn global(cx: &App) -> Entity<Self> {
249 cx.global::<GlobalThreadMetadataStore>().0.clone()
250 }
251
252 pub fn is_empty(&self) -> bool {
253 self.threads.is_empty()
254 }
255
256 /// Returns all thread IDs.
257 pub fn entry_ids(&self) -> impl Iterator<Item = acp::SessionId> + '_ {
258 self.threads.keys().cloned()
259 }
260
261 /// Returns the metadata for a specific thread, if it exists.
262 pub fn entry(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
263 self.threads.get(session_id)
264 }
265
266 /// Returns all threads.
267 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
268 self.threads.values()
269 }
270
271 /// Returns all archived threads.
272 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
273 self.entries().filter(|t| t.archived)
274 }
275
276 /// Returns all threads for the given path list, excluding archived threads.
277 pub fn entries_for_path(
278 &self,
279 path_list: &PathList,
280 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
281 self.threads_by_paths
282 .get(path_list)
283 .into_iter()
284 .flatten()
285 .filter_map(|s| self.threads.get(s))
286 .filter(|s| !s.archived)
287 }
288
289 /// Returns threads whose `main_worktree_paths` matches the given path list,
290 /// excluding archived threads. This finds threads that were opened in a
291 /// linked worktree but are associated with the given main worktree.
292 pub fn entries_for_main_worktree_path(
293 &self,
294 path_list: &PathList,
295 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
296 self.threads_by_main_paths
297 .get(path_list)
298 .into_iter()
299 .flatten()
300 .filter_map(|s| self.threads.get(s))
301 .filter(|s| !s.archived)
302 }
303
304 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
305 let db = self.db.clone();
306 self.reload_task.take();
307
308 let list_task = cx
309 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
310
311 let reload_task = cx
312 .spawn(async move |this, cx| {
313 let Some(rows) = list_task.await.log_err() else {
314 return;
315 };
316
317 this.update(cx, |this, cx| {
318 this.threads.clear();
319 this.threads_by_paths.clear();
320 this.threads_by_main_paths.clear();
321
322 for row in rows {
323 this.threads_by_paths
324 .entry(row.folder_paths.clone())
325 .or_default()
326 .insert(row.session_id.clone());
327 if !row.main_worktree_paths.is_empty() {
328 this.threads_by_main_paths
329 .entry(row.main_worktree_paths.clone())
330 .or_default()
331 .insert(row.session_id.clone());
332 }
333 this.threads.insert(row.session_id.clone(), row);
334 }
335
336 cx.notify();
337 })
338 .ok();
339 })
340 .shared();
341 self.reload_task = Some(reload_task.clone());
342 reload_task
343 }
344
345 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
346 if !cx.has_flag::<AgentV2FeatureFlag>() {
347 return;
348 }
349
350 for metadata in metadata {
351 self.save_internal(metadata);
352 }
353 cx.notify();
354 }
355
356 #[cfg(any(test, feature = "test-support"))]
357 pub fn save_manually(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
358 self.save(metadata, cx)
359 }
360
361 fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
362 if !cx.has_flag::<AgentV2FeatureFlag>() {
363 return;
364 }
365
366 self.save_internal(metadata);
367 cx.notify();
368 }
369
370 fn save_internal(&mut self, metadata: ThreadMetadata) {
371 if let Some(thread) = self.threads.get(&metadata.session_id) {
372 if thread.folder_paths != metadata.folder_paths {
373 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
374 session_ids.remove(&metadata.session_id);
375 }
376 }
377 if thread.main_worktree_paths != metadata.main_worktree_paths
378 && !thread.main_worktree_paths.is_empty()
379 {
380 if let Some(session_ids) = self
381 .threads_by_main_paths
382 .get_mut(&thread.main_worktree_paths)
383 {
384 session_ids.remove(&metadata.session_id);
385 }
386 }
387 }
388
389 self.threads
390 .insert(metadata.session_id.clone(), metadata.clone());
391
392 self.threads_by_paths
393 .entry(metadata.folder_paths.clone())
394 .or_default()
395 .insert(metadata.session_id.clone());
396
397 if !metadata.main_worktree_paths.is_empty() {
398 self.threads_by_main_paths
399 .entry(metadata.main_worktree_paths.clone())
400 .or_default()
401 .insert(metadata.session_id.clone());
402 }
403
404 self.pending_thread_ops_tx
405 .try_send(DbOperation::Upsert(metadata))
406 .log_err();
407 }
408
409 pub fn update_working_directories(
410 &mut self,
411 session_id: &acp::SessionId,
412 work_dirs: PathList,
413 cx: &mut Context<Self>,
414 ) {
415 if !cx.has_flag::<AgentV2FeatureFlag>() {
416 return;
417 }
418
419 if let Some(thread) = self.threads.get(session_id) {
420 self.save_internal(ThreadMetadata {
421 folder_paths: work_dirs,
422 ..thread.clone()
423 });
424 cx.notify();
425 }
426 }
427
428 pub fn archive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
429 self.update_archived(session_id, true, cx);
430 }
431
432 pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
433 self.update_archived(session_id, false, cx);
434 }
435
436 pub fn create_archived_worktree(
437 &self,
438 worktree_path: String,
439 main_repo_path: String,
440 branch_name: Option<String>,
441 staged_commit_hash: String,
442 unstaged_commit_hash: String,
443 original_commit_hash: String,
444 cx: &App,
445 ) -> Task<anyhow::Result<i64>> {
446 let db = self.db.clone();
447 cx.background_spawn(async move {
448 db.create_archived_worktree(
449 worktree_path,
450 main_repo_path,
451 branch_name,
452 staged_commit_hash,
453 unstaged_commit_hash,
454 original_commit_hash,
455 )
456 .await
457 })
458 }
459
460 pub fn link_thread_to_archived_worktree(
461 &self,
462 session_id: String,
463 archived_worktree_id: i64,
464 cx: &App,
465 ) -> Task<anyhow::Result<()>> {
466 let db = self.db.clone();
467 cx.background_spawn(async move {
468 db.link_thread_to_archived_worktree(session_id, archived_worktree_id)
469 .await
470 })
471 }
472
473 pub fn get_archived_worktrees_for_thread(
474 &self,
475 session_id: String,
476 cx: &App,
477 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
478 let db = self.db.clone();
479 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(session_id).await })
480 }
481
482 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
483 let db = self.db.clone();
484 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
485 }
486
487 fn update_archived(
488 &mut self,
489 session_id: &acp::SessionId,
490 archived: bool,
491 cx: &mut Context<Self>,
492 ) {
493 if !cx.has_flag::<AgentV2FeatureFlag>() {
494 return;
495 }
496
497 if let Some(thread) = self.threads.get(session_id) {
498 self.save_internal(ThreadMetadata {
499 archived,
500 ..thread.clone()
501 });
502 cx.notify();
503 }
504 }
505
506 pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context<Self>) {
507 if !cx.has_flag::<AgentV2FeatureFlag>() {
508 return;
509 }
510
511 if let Some(thread) = self.threads.get(&session_id) {
512 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
513 session_ids.remove(&session_id);
514 }
515 if !thread.main_worktree_paths.is_empty() {
516 if let Some(session_ids) = self
517 .threads_by_main_paths
518 .get_mut(&thread.main_worktree_paths)
519 {
520 session_ids.remove(&session_id);
521 }
522 }
523 }
524 self.threads.remove(&session_id);
525 self.pending_thread_ops_tx
526 .try_send(DbOperation::Delete(session_id))
527 .log_err();
528 cx.notify();
529 }
530
531 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
532 let weak_store = cx.weak_entity();
533
534 cx.observe_new::<acp_thread::AcpThread>(move |thread, _window, cx| {
535 // Don't track subagent threads in the sidebar.
536 if thread.parent_session_id().is_some() {
537 return;
538 }
539
540 let thread_entity = cx.entity();
541
542 cx.on_release({
543 let weak_store = weak_store.clone();
544 move |thread, cx| {
545 weak_store
546 .update(cx, |store, _cx| {
547 let session_id = thread.session_id().clone();
548 store.session_subscriptions.remove(&session_id);
549 })
550 .ok();
551 }
552 })
553 .detach();
554
555 weak_store
556 .update(cx, |this, cx| {
557 let subscription = cx.subscribe(&thread_entity, Self::handle_thread_event);
558 this.session_subscriptions
559 .insert(thread.session_id().clone(), subscription);
560 })
561 .ok();
562 })
563 .detach();
564
565 let (tx, rx) = smol::channel::unbounded();
566 let _db_operations_task = cx.background_spawn({
567 let db = db.clone();
568 async move {
569 while let Ok(first_update) = rx.recv().await {
570 let mut updates = vec![first_update];
571 while let Ok(update) = rx.try_recv() {
572 updates.push(update);
573 }
574 let updates = Self::dedup_db_operations(updates);
575 for operation in updates {
576 match operation {
577 DbOperation::Upsert(metadata) => {
578 db.save(metadata).await.log_err();
579 }
580 DbOperation::Delete(session_id) => {
581 db.delete(session_id).await.log_err();
582 }
583 }
584 }
585 }
586 }
587 });
588
589 let mut this = Self {
590 db,
591 threads: HashMap::default(),
592 threads_by_paths: HashMap::default(),
593 threads_by_main_paths: HashMap::default(),
594 reload_task: None,
595 session_subscriptions: HashMap::default(),
596 pending_thread_ops_tx: tx,
597 _db_operations_task,
598 };
599 let _ = this.reload(cx);
600 this
601 }
602
603 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
604 let mut ops = HashMap::default();
605 for operation in operations.into_iter().rev() {
606 if ops.contains_key(operation.id()) {
607 continue;
608 }
609 ops.insert(operation.id().clone(), operation);
610 }
611 ops.into_values().collect()
612 }
613
614 fn handle_thread_event(
615 &mut self,
616 thread: Entity<acp_thread::AcpThread>,
617 event: &AcpThreadEvent,
618 cx: &mut Context<Self>,
619 ) {
620 // Don't track subagent threads in the sidebar.
621 if thread.read(cx).parent_session_id().is_some() {
622 return;
623 }
624
625 match event {
626 AcpThreadEvent::NewEntry
627 | AcpThreadEvent::TitleUpdated
628 | AcpThreadEvent::EntryUpdated(_)
629 | AcpThreadEvent::EntriesRemoved(_)
630 | AcpThreadEvent::ToolAuthorizationRequested(_)
631 | AcpThreadEvent::ToolAuthorizationReceived(_)
632 | AcpThreadEvent::Retry(_)
633 | AcpThreadEvent::Stopped(_)
634 | AcpThreadEvent::Error
635 | AcpThreadEvent::LoadError(_)
636 | AcpThreadEvent::Refusal
637 | AcpThreadEvent::WorkingDirectoriesUpdated => {
638 let thread_ref = thread.read(cx);
639 if thread_ref.entries().is_empty() {
640 return;
641 }
642
643 let existing_thread = self.threads.get(thread_ref.session_id());
644 let session_id = thread_ref.session_id().clone();
645 let title = thread_ref
646 .title()
647 .unwrap_or_else(|| DEFAULT_THREAD_TITLE.into());
648
649 let updated_at = Utc::now();
650
651 let created_at = existing_thread
652 .and_then(|t| t.created_at)
653 .unwrap_or_else(|| updated_at);
654
655 let agent_id = thread_ref.connection().agent_id();
656
657 let folder_paths = {
658 let project = thread_ref.project().read(cx);
659 let paths: Vec<Arc<Path>> = project
660 .visible_worktrees(cx)
661 .map(|worktree| worktree.read(cx).abs_path())
662 .collect();
663 PathList::new(&paths)
664 };
665
666 let main_worktree_paths = thread_ref
667 .project()
668 .read(cx)
669 .project_group_key(cx)
670 .path_list()
671 .clone();
672
673 // Threads without a folder path (e.g. started in an empty
674 // window) are archived by default so they don't get lost,
675 // because they won't show up in the sidebar. Users can reload
676 // them from the archive.
677 let archived = existing_thread
678 .map(|t| t.archived)
679 .unwrap_or(folder_paths.is_empty());
680
681 let metadata = ThreadMetadata {
682 session_id,
683 agent_id,
684 title,
685 created_at: Some(created_at),
686 updated_at,
687 folder_paths,
688 main_worktree_paths,
689 archived,
690 };
691
692 self.save(metadata, cx);
693 }
694 AcpThreadEvent::TokenUsageUpdated
695 | AcpThreadEvent::SubagentSpawned(_)
696 | AcpThreadEvent::PromptCapabilitiesUpdated
697 | AcpThreadEvent::AvailableCommandsUpdated(_)
698 | AcpThreadEvent::ModeUpdated(_)
699 | AcpThreadEvent::ConfigOptionsUpdated(_) => {}
700 }
701 }
702}
703
704impl Global for ThreadMetadataStore {}
705
706struct ThreadMetadataDb(ThreadSafeConnection);
707
708impl Domain for ThreadMetadataDb {
709 const NAME: &str = stringify!(ThreadMetadataDb);
710
711 const MIGRATIONS: &[&str] = &[
712 sql!(
713 CREATE TABLE IF NOT EXISTS sidebar_threads(
714 session_id TEXT PRIMARY KEY,
715 agent_id TEXT,
716 title TEXT NOT NULL,
717 updated_at TEXT NOT NULL,
718 created_at TEXT,
719 folder_paths TEXT,
720 folder_paths_order TEXT
721 ) STRICT;
722 ),
723 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
724 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
725 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
726 sql!(
727 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
728 id INTEGER PRIMARY KEY,
729 worktree_path TEXT NOT NULL,
730 main_repo_path TEXT NOT NULL,
731 branch_name TEXT,
732 staged_commit_hash TEXT,
733 unstaged_commit_hash TEXT,
734 original_commit_hash TEXT
735 ) STRICT;
736
737 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
738 session_id TEXT NOT NULL,
739 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
740 PRIMARY KEY (session_id, archived_worktree_id)
741 ) STRICT;
742 ),
743 ];
744}
745
746db::static_connection!(ThreadMetadataDb, []);
747
748impl ThreadMetadataDb {
749 pub fn list_ids(&self) -> anyhow::Result<Vec<Arc<str>>> {
750 self.select::<Arc<str>>(
751 "SELECT session_id FROM sidebar_threads \
752 ORDER BY updated_at DESC",
753 )?()
754 }
755
756 /// List all sidebar thread metadata, ordered by updated_at descending.
757 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
758 self.select::<ThreadMetadata>(
759 "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order \
760 FROM sidebar_threads \
761 ORDER BY updated_at DESC"
762 )?()
763 }
764
765 /// Upsert metadata for a thread.
766 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
767 let id = row.session_id.0.clone();
768 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
769 None
770 } else {
771 Some(row.agent_id.to_string())
772 };
773 let title = row.title.to_string();
774 let updated_at = row.updated_at.to_rfc3339();
775 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
776 let serialized = row.folder_paths.serialize();
777 let (folder_paths, folder_paths_order) = if row.folder_paths.is_empty() {
778 (None, None)
779 } else {
780 (Some(serialized.paths), Some(serialized.order))
781 };
782 let main_serialized = row.main_worktree_paths.serialize();
783 let (main_worktree_paths, main_worktree_paths_order) = if row.main_worktree_paths.is_empty()
784 {
785 (None, None)
786 } else {
787 (Some(main_serialized.paths), Some(main_serialized.order))
788 };
789 let archived = row.archived;
790
791 self.write(move |conn| {
792 let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order) \
793 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) \
794 ON CONFLICT(session_id) DO UPDATE SET \
795 agent_id = excluded.agent_id, \
796 title = excluded.title, \
797 updated_at = excluded.updated_at, \
798 created_at = excluded.created_at, \
799 folder_paths = excluded.folder_paths, \
800 folder_paths_order = excluded.folder_paths_order, \
801 archived = excluded.archived, \
802 main_worktree_paths = excluded.main_worktree_paths, \
803 main_worktree_paths_order = excluded.main_worktree_paths_order";
804 let mut stmt = Statement::prepare(conn, sql)?;
805 let mut i = stmt.bind(&id, 1)?;
806 i = stmt.bind(&agent_id, i)?;
807 i = stmt.bind(&title, i)?;
808 i = stmt.bind(&updated_at, i)?;
809 i = stmt.bind(&created_at, i)?;
810 i = stmt.bind(&folder_paths, i)?;
811 i = stmt.bind(&folder_paths_order, i)?;
812 i = stmt.bind(&archived, i)?;
813 i = stmt.bind(&main_worktree_paths, i)?;
814 stmt.bind(&main_worktree_paths_order, i)?;
815 stmt.exec()
816 })
817 .await
818 }
819
820 /// Delete metadata for a single thread.
821 pub async fn delete(&self, session_id: acp::SessionId) -> anyhow::Result<()> {
822 let id = session_id.0.clone();
823 self.write(move |conn| {
824 let mut stmt =
825 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE session_id = ?")?;
826 stmt.bind(&id, 1)?;
827 stmt.exec()
828 })
829 .await
830 }
831
832 pub async fn create_archived_worktree(
833 &self,
834 worktree_path: String,
835 main_repo_path: String,
836 branch_name: Option<String>,
837 staged_commit_hash: String,
838 unstaged_commit_hash: String,
839 original_commit_hash: String,
840 ) -> anyhow::Result<i64> {
841 self.write(move |conn| {
842 let mut stmt = Statement::prepare(
843 conn,
844 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
845 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
846 RETURNING id",
847 )?;
848 let mut i = stmt.bind(&worktree_path, 1)?;
849 i = stmt.bind(&main_repo_path, i)?;
850 i = stmt.bind(&branch_name, i)?;
851 i = stmt.bind(&staged_commit_hash, i)?;
852 i = stmt.bind(&unstaged_commit_hash, i)?;
853 stmt.bind(&original_commit_hash, i)?;
854 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
855 })
856 .await
857 }
858
859 pub async fn link_thread_to_archived_worktree(
860 &self,
861 session_id: String,
862 archived_worktree_id: i64,
863 ) -> anyhow::Result<()> {
864 self.write(move |conn| {
865 let mut stmt = Statement::prepare(
866 conn,
867 "INSERT INTO thread_archived_worktrees(session_id, archived_worktree_id) \
868 VALUES (?1, ?2)",
869 )?;
870 let i = stmt.bind(&session_id, 1)?;
871 stmt.bind(&archived_worktree_id, i)?;
872 stmt.exec()
873 })
874 .await
875 }
876
877 pub async fn get_archived_worktrees_for_thread(
878 &self,
879 session_id: String,
880 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
881 self.select_bound::<String, ArchivedGitWorktree>(
882 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
883 FROM archived_git_worktrees a \
884 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
885 WHERE t.session_id = ?1",
886 )?(session_id)
887 }
888
889 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
890 self.write(move |conn| {
891 let mut stmt = Statement::prepare(
892 conn,
893 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
894 )?;
895 stmt.bind(&id, 1)?;
896 stmt.exec()?;
897
898 let mut stmt =
899 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
900 stmt.bind(&id, 1)?;
901 stmt.exec()
902 })
903 .await
904 }
905}
906
907impl Column for ThreadMetadata {
908 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
909 let (id, next): (Arc<str>, i32) = Column::column(statement, start_index)?;
910 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
911 let (title, next): (String, i32) = Column::column(statement, next)?;
912 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
913 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
914 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
915 let (folder_paths_order_str, next): (Option<String>, i32) =
916 Column::column(statement, next)?;
917 let (archived, next): (bool, i32) = Column::column(statement, next)?;
918 let (main_worktree_paths_str, next): (Option<String>, i32) =
919 Column::column(statement, next)?;
920 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
921 Column::column(statement, next)?;
922
923 let agent_id = agent_id
924 .map(|id| AgentId::new(id))
925 .unwrap_or(ZED_AGENT_ID.clone());
926
927 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
928 let created_at = created_at_str
929 .as_deref()
930 .map(DateTime::parse_from_rfc3339)
931 .transpose()?
932 .map(|dt| dt.with_timezone(&Utc));
933
934 let folder_paths = folder_paths_str
935 .map(|paths| {
936 PathList::deserialize(&util::path_list::SerializedPathList {
937 paths,
938 order: folder_paths_order_str.unwrap_or_default(),
939 })
940 })
941 .unwrap_or_default();
942
943 let main_worktree_paths = main_worktree_paths_str
944 .map(|paths| {
945 PathList::deserialize(&util::path_list::SerializedPathList {
946 paths,
947 order: main_worktree_paths_order_str.unwrap_or_default(),
948 })
949 })
950 .unwrap_or_default();
951
952 Ok((
953 ThreadMetadata {
954 session_id: acp::SessionId::new(id),
955 agent_id,
956 title: title.into(),
957 updated_at,
958 created_at,
959 folder_paths,
960 main_worktree_paths,
961 archived,
962 },
963 next,
964 ))
965 }
966}
967
968impl Column for ArchivedGitWorktree {
969 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
970 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
971 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
972 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
973 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
974 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
975 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
976 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
977
978 Ok((
979 ArchivedGitWorktree {
980 id,
981 worktree_path: PathBuf::from(worktree_path_str),
982 main_repo_path: PathBuf::from(main_repo_path_str),
983 branch_name,
984 staged_commit_hash,
985 unstaged_commit_hash,
986 original_commit_hash,
987 },
988 next,
989 ))
990 }
991}
992
993#[cfg(test)]
994mod tests {
995 use super::*;
996 use acp_thread::{AgentConnection, StubAgentConnection};
997 use action_log::ActionLog;
998 use agent::DbThread;
999 use agent_client_protocol as acp;
1000 use feature_flags::FeatureFlagAppExt;
1001 use gpui::TestAppContext;
1002 use project::FakeFs;
1003 use project::Project;
1004 use std::path::Path;
1005 use std::rc::Rc;
1006
1007 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1008 DbThread {
1009 title: title.to_string().into(),
1010 messages: Vec::new(),
1011 updated_at,
1012 detailed_summary: None,
1013 initial_project_snapshot: None,
1014 cumulative_token_usage: Default::default(),
1015 request_token_usage: Default::default(),
1016 model: None,
1017 profile: None,
1018 imported: false,
1019 subagent_context: None,
1020 speed: None,
1021 thinking_enabled: false,
1022 thinking_effort: None,
1023 draft_prompt: None,
1024 ui_scroll_position: None,
1025 }
1026 }
1027
1028 fn make_metadata(
1029 session_id: &str,
1030 title: &str,
1031 updated_at: DateTime<Utc>,
1032 folder_paths: PathList,
1033 ) -> ThreadMetadata {
1034 ThreadMetadata {
1035 archived: false,
1036 session_id: acp::SessionId::new(session_id),
1037 agent_id: agent::ZED_AGENT_ID.clone(),
1038 title: title.to_string().into(),
1039 updated_at,
1040 created_at: Some(updated_at),
1041 folder_paths,
1042 main_worktree_paths: PathList::default(),
1043 }
1044 }
1045
1046 fn init_test(cx: &mut TestAppContext) {
1047 cx.update(|cx| {
1048 let settings_store = settings::SettingsStore::test(cx);
1049 cx.set_global(settings_store);
1050 cx.update_flags(true, vec!["agent-v2".to_string()]);
1051 ThreadMetadataStore::init_global(cx);
1052 ThreadStore::init_global(cx);
1053 });
1054 cx.run_until_parked();
1055 }
1056
1057 #[gpui::test]
1058 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1059 let first_paths = PathList::new(&[Path::new("/project-a")]);
1060 let second_paths = PathList::new(&[Path::new("/project-b")]);
1061 let now = Utc::now();
1062 let older = now - chrono::Duration::seconds(1);
1063
1064 let thread = std::thread::current();
1065 let test_name = thread.name().unwrap_or("unknown_test");
1066 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1067 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1068 &db_name,
1069 )));
1070
1071 db.save(make_metadata(
1072 "session-1",
1073 "First Thread",
1074 now,
1075 first_paths.clone(),
1076 ))
1077 .await
1078 .unwrap();
1079 db.save(make_metadata(
1080 "session-2",
1081 "Second Thread",
1082 older,
1083 second_paths.clone(),
1084 ))
1085 .await
1086 .unwrap();
1087
1088 cx.update(|cx| {
1089 let settings_store = settings::SettingsStore::test(cx);
1090 cx.set_global(settings_store);
1091 cx.update_flags(true, vec!["agent-v2".to_string()]);
1092 ThreadMetadataStore::init_global(cx);
1093 });
1094
1095 cx.run_until_parked();
1096
1097 cx.update(|cx| {
1098 let store = ThreadMetadataStore::global(cx);
1099 let store = store.read(cx);
1100
1101 let entry_ids = store
1102 .entry_ids()
1103 .map(|session_id| session_id.0.to_string())
1104 .collect::<Vec<_>>();
1105 assert_eq!(entry_ids.len(), 2);
1106 assert!(entry_ids.contains(&"session-1".to_string()));
1107 assert!(entry_ids.contains(&"session-2".to_string()));
1108
1109 let first_path_entries = store
1110 .entries_for_path(&first_paths)
1111 .map(|entry| entry.session_id.0.to_string())
1112 .collect::<Vec<_>>();
1113 assert_eq!(first_path_entries, vec!["session-1"]);
1114
1115 let second_path_entries = store
1116 .entries_for_path(&second_paths)
1117 .map(|entry| entry.session_id.0.to_string())
1118 .collect::<Vec<_>>();
1119 assert_eq!(second_path_entries, vec!["session-2"]);
1120 });
1121 }
1122
1123 #[gpui::test]
1124 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1125 init_test(cx);
1126
1127 let first_paths = PathList::new(&[Path::new("/project-a")]);
1128 let second_paths = PathList::new(&[Path::new("/project-b")]);
1129 let initial_time = Utc::now();
1130 let updated_time = initial_time + chrono::Duration::seconds(1);
1131
1132 let initial_metadata = make_metadata(
1133 "session-1",
1134 "First Thread",
1135 initial_time,
1136 first_paths.clone(),
1137 );
1138
1139 let second_metadata = make_metadata(
1140 "session-2",
1141 "Second Thread",
1142 initial_time,
1143 second_paths.clone(),
1144 );
1145
1146 cx.update(|cx| {
1147 let store = ThreadMetadataStore::global(cx);
1148 store.update(cx, |store, cx| {
1149 store.save(initial_metadata, cx);
1150 store.save(second_metadata, cx);
1151 });
1152 });
1153
1154 cx.run_until_parked();
1155
1156 cx.update(|cx| {
1157 let store = ThreadMetadataStore::global(cx);
1158 let store = store.read(cx);
1159
1160 let first_path_entries = store
1161 .entries_for_path(&first_paths)
1162 .map(|entry| entry.session_id.0.to_string())
1163 .collect::<Vec<_>>();
1164 assert_eq!(first_path_entries, vec!["session-1"]);
1165
1166 let second_path_entries = store
1167 .entries_for_path(&second_paths)
1168 .map(|entry| entry.session_id.0.to_string())
1169 .collect::<Vec<_>>();
1170 assert_eq!(second_path_entries, vec!["session-2"]);
1171 });
1172
1173 let moved_metadata = make_metadata(
1174 "session-1",
1175 "First Thread",
1176 updated_time,
1177 second_paths.clone(),
1178 );
1179
1180 cx.update(|cx| {
1181 let store = ThreadMetadataStore::global(cx);
1182 store.update(cx, |store, cx| {
1183 store.save(moved_metadata, cx);
1184 });
1185 });
1186
1187 cx.run_until_parked();
1188
1189 cx.update(|cx| {
1190 let store = ThreadMetadataStore::global(cx);
1191 let store = store.read(cx);
1192
1193 let entry_ids = store
1194 .entry_ids()
1195 .map(|session_id| session_id.0.to_string())
1196 .collect::<Vec<_>>();
1197 assert_eq!(entry_ids.len(), 2);
1198 assert!(entry_ids.contains(&"session-1".to_string()));
1199 assert!(entry_ids.contains(&"session-2".to_string()));
1200
1201 let first_path_entries = store
1202 .entries_for_path(&first_paths)
1203 .map(|entry| entry.session_id.0.to_string())
1204 .collect::<Vec<_>>();
1205 assert!(first_path_entries.is_empty());
1206
1207 let second_path_entries = store
1208 .entries_for_path(&second_paths)
1209 .map(|entry| entry.session_id.0.to_string())
1210 .collect::<Vec<_>>();
1211 assert_eq!(second_path_entries.len(), 2);
1212 assert!(second_path_entries.contains(&"session-1".to_string()));
1213 assert!(second_path_entries.contains(&"session-2".to_string()));
1214 });
1215
1216 cx.update(|cx| {
1217 let store = ThreadMetadataStore::global(cx);
1218 store.update(cx, |store, cx| {
1219 store.delete(acp::SessionId::new("session-2"), cx);
1220 });
1221 });
1222
1223 cx.run_until_parked();
1224
1225 cx.update(|cx| {
1226 let store = ThreadMetadataStore::global(cx);
1227 let store = store.read(cx);
1228
1229 let entry_ids = store
1230 .entry_ids()
1231 .map(|session_id| session_id.0.to_string())
1232 .collect::<Vec<_>>();
1233 assert_eq!(entry_ids, vec!["session-1"]);
1234
1235 let second_path_entries = store
1236 .entries_for_path(&second_paths)
1237 .map(|entry| entry.session_id.0.to_string())
1238 .collect::<Vec<_>>();
1239 assert_eq!(second_path_entries, vec!["session-1"]);
1240 });
1241 }
1242
1243 #[gpui::test]
1244 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1245 init_test(cx);
1246
1247 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1248 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1249 let now = Utc::now();
1250
1251 let existing_metadata = ThreadMetadata {
1252 session_id: acp::SessionId::new("a-session-0"),
1253 agent_id: agent::ZED_AGENT_ID.clone(),
1254 title: "Existing Metadata".into(),
1255 updated_at: now - chrono::Duration::seconds(10),
1256 created_at: Some(now - chrono::Duration::seconds(10)),
1257 folder_paths: project_a_paths.clone(),
1258 main_worktree_paths: PathList::default(),
1259 archived: false,
1260 };
1261
1262 cx.update(|cx| {
1263 let store = ThreadMetadataStore::global(cx);
1264 store.update(cx, |store, cx| {
1265 store.save(existing_metadata, cx);
1266 });
1267 });
1268 cx.run_until_parked();
1269
1270 let threads_to_save = vec![
1271 (
1272 "a-session-0",
1273 "Thread A0 From Native Store",
1274 project_a_paths.clone(),
1275 now,
1276 ),
1277 (
1278 "a-session-1",
1279 "Thread A1",
1280 project_a_paths.clone(),
1281 now + chrono::Duration::seconds(1),
1282 ),
1283 (
1284 "b-session-0",
1285 "Thread B0",
1286 project_b_paths.clone(),
1287 now + chrono::Duration::seconds(2),
1288 ),
1289 (
1290 "projectless",
1291 "Projectless",
1292 PathList::default(),
1293 now + chrono::Duration::seconds(3),
1294 ),
1295 ];
1296
1297 for (session_id, title, paths, updated_at) in &threads_to_save {
1298 let save_task = cx.update(|cx| {
1299 let thread_store = ThreadStore::global(cx);
1300 let session_id = session_id.to_string();
1301 let title = title.to_string();
1302 let paths = paths.clone();
1303 thread_store.update(cx, |store, cx| {
1304 store.save_thread(
1305 acp::SessionId::new(session_id),
1306 make_db_thread(&title, *updated_at),
1307 paths,
1308 cx,
1309 )
1310 })
1311 });
1312 save_task.await.unwrap();
1313 cx.run_until_parked();
1314 }
1315
1316 cx.update(|cx| migrate_thread_metadata(cx));
1317 cx.run_until_parked();
1318
1319 let list = cx.update(|cx| {
1320 let store = ThreadMetadataStore::global(cx);
1321 store.read(cx).entries().cloned().collect::<Vec<_>>()
1322 });
1323
1324 assert_eq!(list.len(), 4);
1325 assert!(
1326 list.iter()
1327 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1328 );
1329
1330 let existing_metadata = list
1331 .iter()
1332 .find(|metadata| metadata.session_id.0.as_ref() == "a-session-0")
1333 .unwrap();
1334 assert_eq!(existing_metadata.title.as_ref(), "Existing Metadata");
1335 assert!(!existing_metadata.archived);
1336
1337 let migrated_session_ids = list
1338 .iter()
1339 .map(|metadata| metadata.session_id.0.as_ref())
1340 .collect::<Vec<_>>();
1341 assert!(migrated_session_ids.contains(&"a-session-1"));
1342 assert!(migrated_session_ids.contains(&"b-session-0"));
1343 assert!(migrated_session_ids.contains(&"projectless"));
1344
1345 let migrated_entries = list
1346 .iter()
1347 .filter(|metadata| metadata.session_id.0.as_ref() != "a-session-0")
1348 .collect::<Vec<_>>();
1349 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1350 }
1351
1352 #[gpui::test]
1353 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1354 cx: &mut TestAppContext,
1355 ) {
1356 init_test(cx);
1357
1358 let project_paths = PathList::new(&[Path::new("/project-a")]);
1359 let existing_updated_at = Utc::now();
1360
1361 let existing_metadata = ThreadMetadata {
1362 session_id: acp::SessionId::new("existing-session"),
1363 agent_id: agent::ZED_AGENT_ID.clone(),
1364 title: "Existing Metadata".into(),
1365 updated_at: existing_updated_at,
1366 created_at: Some(existing_updated_at),
1367 folder_paths: project_paths.clone(),
1368 main_worktree_paths: PathList::default(),
1369 archived: false,
1370 };
1371
1372 cx.update(|cx| {
1373 let store = ThreadMetadataStore::global(cx);
1374 store.update(cx, |store, cx| {
1375 store.save(existing_metadata, cx);
1376 });
1377 });
1378 cx.run_until_parked();
1379
1380 let save_task = cx.update(|cx| {
1381 let thread_store = ThreadStore::global(cx);
1382 thread_store.update(cx, |store, cx| {
1383 store.save_thread(
1384 acp::SessionId::new("existing-session"),
1385 make_db_thread(
1386 "Updated Native Thread Title",
1387 existing_updated_at + chrono::Duration::seconds(1),
1388 ),
1389 project_paths.clone(),
1390 cx,
1391 )
1392 })
1393 });
1394 save_task.await.unwrap();
1395 cx.run_until_parked();
1396
1397 cx.update(|cx| migrate_thread_metadata(cx));
1398 cx.run_until_parked();
1399
1400 let list = cx.update(|cx| {
1401 let store = ThreadMetadataStore::global(cx);
1402 store.read(cx).entries().cloned().collect::<Vec<_>>()
1403 });
1404
1405 assert_eq!(list.len(), 1);
1406 assert_eq!(list[0].session_id.0.as_ref(), "existing-session");
1407 }
1408
1409 #[gpui::test]
1410 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
1411 cx: &mut TestAppContext,
1412 ) {
1413 init_test(cx);
1414
1415 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1416 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1417 let now = Utc::now();
1418
1419 // Create 7 threads for project A and 3 for project B
1420 let mut threads_to_save = Vec::new();
1421 for i in 0..7 {
1422 threads_to_save.push((
1423 format!("a-session-{i}"),
1424 format!("Thread A{i}"),
1425 project_a_paths.clone(),
1426 now + chrono::Duration::seconds(i as i64),
1427 ));
1428 }
1429 for i in 0..3 {
1430 threads_to_save.push((
1431 format!("b-session-{i}"),
1432 format!("Thread B{i}"),
1433 project_b_paths.clone(),
1434 now + chrono::Duration::seconds(i as i64),
1435 ));
1436 }
1437
1438 for (session_id, title, paths, updated_at) in &threads_to_save {
1439 let save_task = cx.update(|cx| {
1440 let thread_store = ThreadStore::global(cx);
1441 let session_id = session_id.to_string();
1442 let title = title.to_string();
1443 let paths = paths.clone();
1444 thread_store.update(cx, |store, cx| {
1445 store.save_thread(
1446 acp::SessionId::new(session_id),
1447 make_db_thread(&title, *updated_at),
1448 paths,
1449 cx,
1450 )
1451 })
1452 });
1453 save_task.await.unwrap();
1454 cx.run_until_parked();
1455 }
1456
1457 cx.update(|cx| migrate_thread_metadata(cx));
1458 cx.run_until_parked();
1459
1460 let list = cx.update(|cx| {
1461 let store = ThreadMetadataStore::global(cx);
1462 store.read(cx).entries().cloned().collect::<Vec<_>>()
1463 });
1464
1465 assert_eq!(list.len(), 10);
1466
1467 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
1468 let mut project_a_entries: Vec<_> = list
1469 .iter()
1470 .filter(|m| m.folder_paths == project_a_paths)
1471 .collect();
1472 assert_eq!(project_a_entries.len(), 7);
1473 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
1474
1475 for entry in &project_a_entries[..5] {
1476 assert!(
1477 !entry.archived,
1478 "Expected {} to be unarchived (top 5 most recent)",
1479 entry.session_id.0
1480 );
1481 }
1482 for entry in &project_a_entries[5..] {
1483 assert!(
1484 entry.archived,
1485 "Expected {} to be archived (older than top 5)",
1486 entry.session_id.0
1487 );
1488 }
1489
1490 // Project B: all 3 should be unarchived (under the limit)
1491 let project_b_entries: Vec<_> = list
1492 .iter()
1493 .filter(|m| m.folder_paths == project_b_paths)
1494 .collect();
1495 assert_eq!(project_b_entries.len(), 3);
1496 assert!(project_b_entries.iter().all(|m| !m.archived));
1497 }
1498
1499 #[gpui::test]
1500 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
1501 init_test(cx);
1502
1503 let fs = FakeFs::new(cx.executor());
1504 let project = Project::test(fs, None::<&Path>, cx).await;
1505 let connection = Rc::new(StubAgentConnection::new());
1506
1507 let thread = cx
1508 .update(|cx| {
1509 connection
1510 .clone()
1511 .new_session(project.clone(), PathList::default(), cx)
1512 })
1513 .await
1514 .unwrap();
1515 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1516
1517 cx.update(|cx| {
1518 thread.update(cx, |thread, cx| {
1519 thread.set_title("Draft Thread".into(), cx).detach();
1520 });
1521 });
1522 cx.run_until_parked();
1523
1524 let metadata_ids = cx.update(|cx| {
1525 ThreadMetadataStore::global(cx)
1526 .read(cx)
1527 .entry_ids()
1528 .collect::<Vec<_>>()
1529 });
1530 assert!(
1531 metadata_ids.is_empty(),
1532 "expected empty draft thread title updates to be ignored"
1533 );
1534
1535 cx.update(|cx| {
1536 thread.update(cx, |thread, cx| {
1537 thread.push_user_content_block(None, "Hello".into(), cx);
1538 });
1539 });
1540 cx.run_until_parked();
1541
1542 let metadata_ids = cx.update(|cx| {
1543 ThreadMetadataStore::global(cx)
1544 .read(cx)
1545 .entry_ids()
1546 .collect::<Vec<_>>()
1547 });
1548 assert_eq!(metadata_ids, vec![session_id]);
1549 }
1550
1551 #[gpui::test]
1552 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
1553 init_test(cx);
1554
1555 let fs = FakeFs::new(cx.executor());
1556 let project = Project::test(fs, None::<&Path>, cx).await;
1557 let connection = Rc::new(StubAgentConnection::new());
1558
1559 let thread = cx
1560 .update(|cx| {
1561 connection
1562 .clone()
1563 .new_session(project.clone(), PathList::default(), cx)
1564 })
1565 .await
1566 .unwrap();
1567 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1568
1569 cx.update(|cx| {
1570 thread.update(cx, |thread, cx| {
1571 thread.push_user_content_block(None, "Hello".into(), cx);
1572 });
1573 });
1574 cx.run_until_parked();
1575
1576 let metadata_ids = cx.update(|cx| {
1577 ThreadMetadataStore::global(cx)
1578 .read(cx)
1579 .entry_ids()
1580 .collect::<Vec<_>>()
1581 });
1582 assert_eq!(metadata_ids, vec![session_id.clone()]);
1583
1584 drop(thread);
1585 cx.update(|_| {});
1586 cx.run_until_parked();
1587
1588 let metadata_ids = cx.update(|cx| {
1589 ThreadMetadataStore::global(cx)
1590 .read(cx)
1591 .entry_ids()
1592 .collect::<Vec<_>>()
1593 });
1594 assert_eq!(metadata_ids, vec![session_id]);
1595 }
1596
1597 #[gpui::test]
1598 async fn test_threads_without_project_association_are_archived_by_default(
1599 cx: &mut TestAppContext,
1600 ) {
1601 init_test(cx);
1602
1603 let fs = FakeFs::new(cx.executor());
1604 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
1605 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
1606 let connection = Rc::new(StubAgentConnection::new());
1607
1608 let thread_without_worktree = cx
1609 .update(|cx| {
1610 connection.clone().new_session(
1611 project_without_worktree.clone(),
1612 PathList::default(),
1613 cx,
1614 )
1615 })
1616 .await
1617 .unwrap();
1618 let session_without_worktree =
1619 cx.read(|cx| thread_without_worktree.read(cx).session_id().clone());
1620
1621 cx.update(|cx| {
1622 thread_without_worktree.update(cx, |thread, cx| {
1623 thread.push_user_content_block(None, "content".into(), cx);
1624 thread.set_title("No Project Thread".into(), cx).detach();
1625 });
1626 });
1627 cx.run_until_parked();
1628
1629 let thread_with_worktree = cx
1630 .update(|cx| {
1631 connection.clone().new_session(
1632 project_with_worktree.clone(),
1633 PathList::default(),
1634 cx,
1635 )
1636 })
1637 .await
1638 .unwrap();
1639 let session_with_worktree =
1640 cx.read(|cx| thread_with_worktree.read(cx).session_id().clone());
1641
1642 cx.update(|cx| {
1643 thread_with_worktree.update(cx, |thread, cx| {
1644 thread.push_user_content_block(None, "content".into(), cx);
1645 thread.set_title("Project Thread".into(), cx).detach();
1646 });
1647 });
1648 cx.run_until_parked();
1649
1650 cx.update(|cx| {
1651 let store = ThreadMetadataStore::global(cx);
1652 let store = store.read(cx);
1653
1654 let without_worktree = store
1655 .entry(&session_without_worktree)
1656 .expect("missing metadata for thread without project association");
1657 assert!(without_worktree.folder_paths.is_empty());
1658 assert!(
1659 without_worktree.archived,
1660 "expected thread without project association to be archived"
1661 );
1662
1663 let with_worktree = store
1664 .entry(&session_with_worktree)
1665 .expect("missing metadata for thread with project association");
1666 assert_eq!(
1667 with_worktree.folder_paths,
1668 PathList::new(&[Path::new("/project-a")])
1669 );
1670 assert!(
1671 !with_worktree.archived,
1672 "expected thread with project association to remain unarchived"
1673 );
1674 });
1675 }
1676
1677 #[gpui::test]
1678 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
1679 init_test(cx);
1680
1681 let fs = FakeFs::new(cx.executor());
1682 let project = Project::test(fs, None::<&Path>, cx).await;
1683 let connection = Rc::new(StubAgentConnection::new());
1684
1685 // Create a regular (non-subagent) AcpThread.
1686 let regular_thread = cx
1687 .update(|cx| {
1688 connection
1689 .clone()
1690 .new_session(project.clone(), PathList::default(), cx)
1691 })
1692 .await
1693 .unwrap();
1694
1695 let regular_session_id = cx.read(|cx| regular_thread.read(cx).session_id().clone());
1696
1697 // Set a title on the regular thread to trigger a save via handle_thread_update.
1698 cx.update(|cx| {
1699 regular_thread.update(cx, |thread, cx| {
1700 thread.push_user_content_block(None, "content".into(), cx);
1701 thread.set_title("Regular Thread".into(), cx).detach();
1702 });
1703 });
1704 cx.run_until_parked();
1705
1706 // Create a subagent AcpThread
1707 let subagent_session_id = acp::SessionId::new("subagent-session");
1708 let subagent_thread = cx.update(|cx| {
1709 let action_log = cx.new(|_| ActionLog::new(project.clone()));
1710 cx.new(|cx| {
1711 acp_thread::AcpThread::new(
1712 Some(regular_session_id.clone()),
1713 Some("Subagent Thread".into()),
1714 None,
1715 connection.clone(),
1716 project.clone(),
1717 action_log,
1718 subagent_session_id.clone(),
1719 watch::Receiver::constant(acp::PromptCapabilities::new()),
1720 cx,
1721 )
1722 })
1723 });
1724
1725 // Set a title on the subagent thread to trigger handle_thread_update.
1726 cx.update(|cx| {
1727 subagent_thread.update(cx, |thread, cx| {
1728 thread
1729 .set_title("Subagent Thread Title".into(), cx)
1730 .detach();
1731 });
1732 });
1733 cx.run_until_parked();
1734
1735 // List all metadata from the store cache.
1736 let list = cx.update(|cx| {
1737 let store = ThreadMetadataStore::global(cx);
1738 store.read(cx).entries().cloned().collect::<Vec<_>>()
1739 });
1740
1741 // The subagent thread should NOT appear in the sidebar metadata.
1742 // Only the regular thread should be listed.
1743 assert_eq!(
1744 list.len(),
1745 1,
1746 "Expected only the regular thread in sidebar metadata, \
1747 but found {} entries (subagent threads are leaking into the sidebar)",
1748 list.len(),
1749 );
1750 assert_eq!(list[0].session_id, regular_session_id);
1751 assert_eq!(list[0].title.as_ref(), "Regular Thread");
1752 }
1753
1754 #[test]
1755 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
1756 let now = Utc::now();
1757
1758 let operations = vec![
1759 DbOperation::Upsert(make_metadata(
1760 "session-1",
1761 "First Thread",
1762 now,
1763 PathList::default(),
1764 )),
1765 DbOperation::Delete(acp::SessionId::new("session-1")),
1766 ];
1767
1768 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
1769
1770 assert_eq!(deduped.len(), 1);
1771 assert_eq!(
1772 deduped[0],
1773 DbOperation::Delete(acp::SessionId::new("session-1"))
1774 );
1775 }
1776
1777 #[test]
1778 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
1779 let now = Utc::now();
1780 let later = now + chrono::Duration::seconds(1);
1781
1782 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
1783 let new_metadata = make_metadata("session-1", "New Title", later, PathList::default());
1784
1785 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1786 DbOperation::Upsert(old_metadata),
1787 DbOperation::Upsert(new_metadata.clone()),
1788 ]);
1789
1790 assert_eq!(deduped.len(), 1);
1791 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
1792 }
1793
1794 #[test]
1795 fn test_dedup_db_operations_preserves_distinct_sessions() {
1796 let now = Utc::now();
1797
1798 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
1799 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
1800 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1801 DbOperation::Upsert(metadata1.clone()),
1802 DbOperation::Upsert(metadata2.clone()),
1803 ]);
1804
1805 assert_eq!(deduped.len(), 2);
1806 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
1807 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
1808 }
1809
1810 #[gpui::test]
1811 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
1812 init_test(cx);
1813
1814 let paths = PathList::new(&[Path::new("/project-a")]);
1815 let now = Utc::now();
1816 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
1817
1818 cx.update(|cx| {
1819 let store = ThreadMetadataStore::global(cx);
1820 store.update(cx, |store, cx| {
1821 store.save(metadata, cx);
1822 });
1823 });
1824
1825 cx.run_until_parked();
1826
1827 cx.update(|cx| {
1828 let store = ThreadMetadataStore::global(cx);
1829 let store = store.read(cx);
1830
1831 let path_entries = store
1832 .entries_for_path(&paths)
1833 .map(|e| e.session_id.0.to_string())
1834 .collect::<Vec<_>>();
1835 assert_eq!(path_entries, vec!["session-1"]);
1836
1837 let archived = store
1838 .archived_entries()
1839 .map(|e| e.session_id.0.to_string())
1840 .collect::<Vec<_>>();
1841 assert!(archived.is_empty());
1842 });
1843
1844 cx.update(|cx| {
1845 let store = ThreadMetadataStore::global(cx);
1846 store.update(cx, |store, cx| {
1847 store.archive(&acp::SessionId::new("session-1"), cx);
1848 });
1849 });
1850
1851 cx.run_until_parked();
1852
1853 cx.update(|cx| {
1854 let store = ThreadMetadataStore::global(cx);
1855 let store = store.read(cx);
1856
1857 let path_entries = store
1858 .entries_for_path(&paths)
1859 .map(|e| e.session_id.0.to_string())
1860 .collect::<Vec<_>>();
1861 assert!(path_entries.is_empty());
1862
1863 let archived = store.archived_entries().collect::<Vec<_>>();
1864 assert_eq!(archived.len(), 1);
1865 assert_eq!(archived[0].session_id.0.as_ref(), "session-1");
1866 assert!(archived[0].archived);
1867 });
1868
1869 cx.update(|cx| {
1870 let store = ThreadMetadataStore::global(cx);
1871 store.update(cx, |store, cx| {
1872 store.unarchive(&acp::SessionId::new("session-1"), cx);
1873 });
1874 });
1875
1876 cx.run_until_parked();
1877
1878 cx.update(|cx| {
1879 let store = ThreadMetadataStore::global(cx);
1880 let store = store.read(cx);
1881
1882 let path_entries = store
1883 .entries_for_path(&paths)
1884 .map(|e| e.session_id.0.to_string())
1885 .collect::<Vec<_>>();
1886 assert_eq!(path_entries, vec!["session-1"]);
1887
1888 let archived = store
1889 .archived_entries()
1890 .map(|e| e.session_id.0.to_string())
1891 .collect::<Vec<_>>();
1892 assert!(archived.is_empty());
1893 });
1894 }
1895
1896 #[gpui::test]
1897 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
1898 init_test(cx);
1899
1900 let paths = PathList::new(&[Path::new("/project-a")]);
1901 let now = Utc::now();
1902
1903 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
1904 let metadata2 = make_metadata(
1905 "session-2",
1906 "Archived Thread",
1907 now - chrono::Duration::seconds(1),
1908 paths.clone(),
1909 );
1910
1911 cx.update(|cx| {
1912 let store = ThreadMetadataStore::global(cx);
1913 store.update(cx, |store, cx| {
1914 store.save(metadata1, cx);
1915 store.save(metadata2, cx);
1916 });
1917 });
1918
1919 cx.run_until_parked();
1920
1921 cx.update(|cx| {
1922 let store = ThreadMetadataStore::global(cx);
1923 store.update(cx, |store, cx| {
1924 store.archive(&acp::SessionId::new("session-2"), cx);
1925 });
1926 });
1927
1928 cx.run_until_parked();
1929
1930 cx.update(|cx| {
1931 let store = ThreadMetadataStore::global(cx);
1932 let store = store.read(cx);
1933
1934 let path_entries = store
1935 .entries_for_path(&paths)
1936 .map(|e| e.session_id.0.to_string())
1937 .collect::<Vec<_>>();
1938 assert_eq!(path_entries, vec!["session-1"]);
1939
1940 let all_entries = store
1941 .entries()
1942 .map(|e| e.session_id.0.to_string())
1943 .collect::<Vec<_>>();
1944 assert_eq!(all_entries.len(), 2);
1945 assert!(all_entries.contains(&"session-1".to_string()));
1946 assert!(all_entries.contains(&"session-2".to_string()));
1947
1948 let archived = store
1949 .archived_entries()
1950 .map(|e| e.session_id.0.to_string())
1951 .collect::<Vec<_>>();
1952 assert_eq!(archived, vec!["session-2"]);
1953 });
1954 }
1955
1956 #[gpui::test]
1957 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
1958 init_test(cx);
1959
1960 let paths = PathList::new(&[Path::new("/project-a")]);
1961 let now = Utc::now();
1962
1963 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
1964 let m2 = make_metadata(
1965 "session-2",
1966 "Thread Two",
1967 now - chrono::Duration::seconds(1),
1968 paths.clone(),
1969 );
1970 let m3 = make_metadata(
1971 "session-3",
1972 "Thread Three",
1973 now - chrono::Duration::seconds(2),
1974 paths,
1975 );
1976
1977 cx.update(|cx| {
1978 let store = ThreadMetadataStore::global(cx);
1979 store.update(cx, |store, cx| {
1980 store.save_all(vec![m1, m2, m3], cx);
1981 });
1982 });
1983
1984 cx.run_until_parked();
1985
1986 cx.update(|cx| {
1987 let store = ThreadMetadataStore::global(cx);
1988 let store = store.read(cx);
1989
1990 let all_entries = store
1991 .entries()
1992 .map(|e| e.session_id.0.to_string())
1993 .collect::<Vec<_>>();
1994 assert_eq!(all_entries.len(), 3);
1995 assert!(all_entries.contains(&"session-1".to_string()));
1996 assert!(all_entries.contains(&"session-2".to_string()));
1997 assert!(all_entries.contains(&"session-3".to_string()));
1998
1999 let entry_ids = store.entry_ids().collect::<Vec<_>>();
2000 assert_eq!(entry_ids.len(), 3);
2001 });
2002 }
2003
2004 #[gpui::test]
2005 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2006 init_test(cx);
2007
2008 let paths = PathList::new(&[Path::new("/project-a")]);
2009 let now = Utc::now();
2010 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2011
2012 cx.update(|cx| {
2013 let store = ThreadMetadataStore::global(cx);
2014 store.update(cx, |store, cx| {
2015 store.save(metadata, cx);
2016 });
2017 });
2018
2019 cx.run_until_parked();
2020
2021 cx.update(|cx| {
2022 let store = ThreadMetadataStore::global(cx);
2023 store.update(cx, |store, cx| {
2024 store.archive(&acp::SessionId::new("session-1"), cx);
2025 });
2026 });
2027
2028 cx.run_until_parked();
2029
2030 cx.update(|cx| {
2031 let store = ThreadMetadataStore::global(cx);
2032 store.update(cx, |store, cx| {
2033 let _ = store.reload(cx);
2034 });
2035 });
2036
2037 cx.run_until_parked();
2038
2039 cx.update(|cx| {
2040 let store = ThreadMetadataStore::global(cx);
2041 let store = store.read(cx);
2042
2043 let thread = store
2044 .entries()
2045 .find(|e| e.session_id.0.as_ref() == "session-1")
2046 .expect("thread should exist after reload");
2047 assert!(thread.archived);
2048
2049 let path_entries = store
2050 .entries_for_path(&paths)
2051 .map(|e| e.session_id.0.to_string())
2052 .collect::<Vec<_>>();
2053 assert!(path_entries.is_empty());
2054
2055 let archived = store
2056 .archived_entries()
2057 .map(|e| e.session_id.0.to_string())
2058 .collect::<Vec<_>>();
2059 assert_eq!(archived, vec!["session-1"]);
2060 });
2061 }
2062
2063 #[gpui::test]
2064 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2065 init_test(cx);
2066
2067 cx.run_until_parked();
2068
2069 cx.update(|cx| {
2070 let store = ThreadMetadataStore::global(cx);
2071 store.update(cx, |store, cx| {
2072 store.archive(&acp::SessionId::new("nonexistent"), cx);
2073 });
2074 });
2075
2076 cx.run_until_parked();
2077
2078 cx.update(|cx| {
2079 let store = ThreadMetadataStore::global(cx);
2080 let store = store.read(cx);
2081
2082 assert!(store.is_empty());
2083 assert_eq!(store.entries().count(), 0);
2084 assert_eq!(store.archived_entries().count(), 0);
2085 });
2086 }
2087
2088 #[gpui::test]
2089 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2090 init_test(cx);
2091
2092 let paths = PathList::new(&[Path::new("/project-a")]);
2093 let now = Utc::now();
2094 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2095 let session_id = metadata.session_id.clone();
2096
2097 cx.update(|cx| {
2098 let store = ThreadMetadataStore::global(cx);
2099 store.update(cx, |store, cx| {
2100 store.save(metadata.clone(), cx);
2101 store.archive(&session_id, cx);
2102 });
2103 });
2104
2105 cx.run_until_parked();
2106
2107 cx.update(|cx| {
2108 let store = ThreadMetadataStore::global(cx);
2109 let store = store.read(cx);
2110
2111 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2112 pretty_assertions::assert_eq!(
2113 entries,
2114 vec![ThreadMetadata {
2115 archived: true,
2116 ..metadata
2117 }]
2118 );
2119 });
2120 }
2121
2122 #[gpui::test]
2123 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2124 init_test(cx);
2125 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2126
2127 let id = store
2128 .read_with(cx, |store, cx| {
2129 store.create_archived_worktree(
2130 "/tmp/worktree".to_string(),
2131 "/home/user/repo".to_string(),
2132 Some("feature-branch".to_string()),
2133 "staged_aaa".to_string(),
2134 "unstaged_bbb".to_string(),
2135 "original_000".to_string(),
2136 cx,
2137 )
2138 })
2139 .await
2140 .unwrap();
2141
2142 store
2143 .read_with(cx, |store, cx| {
2144 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2145 })
2146 .await
2147 .unwrap();
2148
2149 let worktrees = store
2150 .read_with(cx, |store, cx| {
2151 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2152 })
2153 .await
2154 .unwrap();
2155
2156 assert_eq!(worktrees.len(), 1);
2157 let wt = &worktrees[0];
2158 assert_eq!(wt.id, id);
2159 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2160 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2161 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2162 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2163 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2164 assert_eq!(wt.original_commit_hash, "original_000");
2165 }
2166
2167 #[gpui::test]
2168 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2169 init_test(cx);
2170 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2171
2172 let id = store
2173 .read_with(cx, |store, cx| {
2174 store.create_archived_worktree(
2175 "/tmp/worktree".to_string(),
2176 "/home/user/repo".to_string(),
2177 Some("main".to_string()),
2178 "deadbeef".to_string(),
2179 "deadbeef".to_string(),
2180 "original_000".to_string(),
2181 cx,
2182 )
2183 })
2184 .await
2185 .unwrap();
2186
2187 store
2188 .read_with(cx, |store, cx| {
2189 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2190 })
2191 .await
2192 .unwrap();
2193
2194 store
2195 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2196 .await
2197 .unwrap();
2198
2199 let worktrees = store
2200 .read_with(cx, |store, cx| {
2201 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2202 })
2203 .await
2204 .unwrap();
2205 assert!(worktrees.is_empty());
2206 }
2207
2208 #[gpui::test]
2209 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2210 init_test(cx);
2211 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2212
2213 let id = store
2214 .read_with(cx, |store, cx| {
2215 store.create_archived_worktree(
2216 "/tmp/worktree".to_string(),
2217 "/home/user/repo".to_string(),
2218 None,
2219 "abc123".to_string(),
2220 "abc123".to_string(),
2221 "original_000".to_string(),
2222 cx,
2223 )
2224 })
2225 .await
2226 .unwrap();
2227
2228 store
2229 .read_with(cx, |store, cx| {
2230 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2231 })
2232 .await
2233 .unwrap();
2234
2235 store
2236 .read_with(cx, |store, cx| {
2237 store.link_thread_to_archived_worktree("session-2".to_string(), id, cx)
2238 })
2239 .await
2240 .unwrap();
2241
2242 let wt1 = store
2243 .read_with(cx, |store, cx| {
2244 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2245 })
2246 .await
2247 .unwrap();
2248
2249 let wt2 = store
2250 .read_with(cx, |store, cx| {
2251 store.get_archived_worktrees_for_thread("session-2".to_string(), cx)
2252 })
2253 .await
2254 .unwrap();
2255
2256 assert_eq!(wt1.len(), 1);
2257 assert_eq!(wt2.len(), 1);
2258 assert_eq!(wt1[0].id, wt2[0].id);
2259 }
2260
2261 #[gpui::test]
2262 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
2263 init_test(cx);
2264 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2265
2266 let id1 = store
2267 .read_with(cx, |store, cx| {
2268 store.create_archived_worktree(
2269 "/projects/worktree-a".to_string(),
2270 "/home/user/repo".to_string(),
2271 Some("branch-a".to_string()),
2272 "staged_a".to_string(),
2273 "unstaged_a".to_string(),
2274 "original_000".to_string(),
2275 cx,
2276 )
2277 })
2278 .await
2279 .unwrap();
2280
2281 let id2 = store
2282 .read_with(cx, |store, cx| {
2283 store.create_archived_worktree(
2284 "/projects/worktree-b".to_string(),
2285 "/home/user/repo".to_string(),
2286 Some("branch-b".to_string()),
2287 "staged_b".to_string(),
2288 "unstaged_b".to_string(),
2289 "original_000".to_string(),
2290 cx,
2291 )
2292 })
2293 .await
2294 .unwrap();
2295
2296 store
2297 .read_with(cx, |store, cx| {
2298 store.link_thread_to_archived_worktree("session-1".to_string(), id1, cx)
2299 })
2300 .await
2301 .unwrap();
2302
2303 store
2304 .read_with(cx, |store, cx| {
2305 store.link_thread_to_archived_worktree("session-1".to_string(), id2, cx)
2306 })
2307 .await
2308 .unwrap();
2309
2310 let worktrees = store
2311 .read_with(cx, |store, cx| {
2312 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2313 })
2314 .await
2315 .unwrap();
2316
2317 assert_eq!(worktrees.len(), 2);
2318
2319 let paths: Vec<&Path> = worktrees
2320 .iter()
2321 .map(|w| w.worktree_path.as_path())
2322 .collect();
2323 assert!(paths.contains(&Path::new("/projects/worktree-a")));
2324 assert!(paths.contains(&Path::new("/projects/worktree-b")));
2325 }
2326}