1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use acp_thread::AcpThreadEvent;
7use agent::{ThreadStore, ZED_AGENT_ID};
8use agent_client_protocol as acp;
9use anyhow::Context as _;
10use chrono::{DateTime, Utc};
11use collections::{HashMap, HashSet};
12use db::{
13 sqlez::{
14 bindable::Column, domain::Domain, statement::Statement,
15 thread_safe_connection::ThreadSafeConnection,
16 },
17 sqlez_macros::sql,
18};
19use futures::{FutureExt as _, future::Shared};
20use gpui::{AppContext as _, Entity, Global, Subscription, Task};
21use project::AgentId;
22use ui::{App, Context, SharedString};
23use util::ResultExt as _;
24use workspace::PathList;
25
26use crate::DEFAULT_THREAD_TITLE;
27
28pub fn init(cx: &mut App) {
29 ThreadMetadataStore::init_global(cx);
30 migrate_thread_metadata(cx);
31}
32
33/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
34/// We skip migrating threads that do not have a project.
35///
36/// TODO: Remove this after N weeks of shipping the sidebar
37fn migrate_thread_metadata(cx: &mut App) {
38 let store = ThreadMetadataStore::global(cx);
39 let db = store.read(cx).db.clone();
40
41 cx.spawn(async move |cx| {
42 let existing_entries = db.list_ids()?.into_iter().collect::<HashSet<_>>();
43
44 let is_first_migration = existing_entries.is_empty();
45
46 let mut to_migrate = store.read_with(cx, |_store, cx| {
47 ThreadStore::global(cx)
48 .read(cx)
49 .entries()
50 .filter_map(|entry| {
51 if existing_entries.contains(&entry.id.0) {
52 return None;
53 }
54
55 Some(ThreadMetadata {
56 session_id: entry.id,
57 agent_id: ZED_AGENT_ID.clone(),
58 title: entry.title,
59 updated_at: entry.updated_at,
60 created_at: entry.created_at,
61 folder_paths: entry.folder_paths,
62 main_worktree_paths: PathList::default(),
63 archived: true,
64 })
65 })
66 .collect::<Vec<_>>()
67 });
68
69 if to_migrate.is_empty() {
70 return anyhow::Ok(());
71 }
72
73 // On the first migration (no entries in DB yet), keep the 5 most
74 // recent threads per project unarchived.
75 if is_first_migration {
76 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
77 for entry in &mut to_migrate {
78 if entry.folder_paths.is_empty() {
79 continue;
80 }
81 per_project
82 .entry(entry.folder_paths.clone())
83 .or_default()
84 .push(entry);
85 }
86 for entries in per_project.values_mut() {
87 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
88 for entry in entries.iter_mut().take(5) {
89 entry.archived = false;
90 }
91 }
92 }
93
94 log::info!("Migrating {} thread store entries", to_migrate.len());
95
96 // Manually save each entry to the database and call reload, otherwise
97 // we'll end up triggering lots of reloads after each save
98 for entry in to_migrate {
99 db.save(entry).await?;
100 }
101
102 log::info!("Finished migrating thread store entries");
103
104 let _ = store.update(cx, |store, cx| store.reload(cx));
105 anyhow::Ok(())
106 })
107 .detach_and_log_err(cx);
108}
109
110struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
111impl Global for GlobalThreadMetadataStore {}
112
113/// Lightweight metadata for any thread (native or ACP), enough to populate
114/// the sidebar list and route to the correct load path when clicked.
115#[derive(Debug, Clone, PartialEq)]
116pub struct ThreadMetadata {
117 pub session_id: acp::SessionId,
118 pub agent_id: AgentId,
119 pub title: SharedString,
120 pub updated_at: DateTime<Utc>,
121 pub created_at: Option<DateTime<Utc>>,
122 pub folder_paths: PathList,
123 pub main_worktree_paths: PathList,
124 pub archived: bool,
125}
126
127impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
128 fn from(meta: &ThreadMetadata) -> Self {
129 Self {
130 session_id: meta.session_id.clone(),
131 work_dirs: Some(meta.folder_paths.clone()),
132 title: Some(meta.title.clone()),
133 updated_at: Some(meta.updated_at),
134 created_at: meta.created_at,
135 meta: None,
136 }
137 }
138}
139
140/// Record of a git worktree that was archived (deleted from disk) when its
141/// last thread was archived.
142pub struct ArchivedGitWorktree {
143 /// Auto-incrementing primary key.
144 pub id: i64,
145 /// Absolute path to the directory of the worktree before it was deleted.
146 /// Used when restoring, to put the recreated worktree back where it was.
147 /// If the path already exists on disk, the worktree is assumed to be
148 /// already restored and is used as-is.
149 pub worktree_path: PathBuf,
150 /// Absolute path of the main repository ("main worktree") that owned this worktree.
151 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
152 /// If the main repo isn't found on disk, unarchiving fails because we only store
153 /// commit hashes, and without the actual git repo being available, we can't restore
154 /// the files.
155 pub main_repo_path: PathBuf,
156 /// Branch that was checked out in the worktree at archive time. `None` if
157 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
158 /// could happen if the user made a detached one outside of Zed.
159 /// On restore, we try to switch to this branch. If that fails (e.g. it's
160 /// checked out elsewhere), we auto-generate a new one.
161 pub branch_name: Option<String>,
162 /// SHA of the WIP commit that captures files that were staged (but not yet
163 /// committed) at the time of archiving. This commit can be empty if the
164 /// user had no staged files at the time. It sits directly on top of whatever
165 /// the user's last actual commit was.
166 pub staged_commit_hash: String,
167 /// SHA of the WIP commit that captures files that were unstaged (including
168 /// untracked) at the time of archiving. This commit can be empty if the user
169 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
170 /// After doing `git reset` past both of these commits, we're back in the state
171 /// we had before archiving, including what was staged, what was unstaged, and
172 /// what was committed.
173 pub unstaged_commit_hash: String,
174 /// SHA of the commit that HEAD pointed at before we created the two WIP
175 /// commits during archival. After resetting past the WIP commits during
176 /// restore, HEAD should land back on this commit. It also serves as a
177 /// pre-restore sanity check (abort if this commit no longer exists in the
178 /// repo) and as a fallback target if the WIP resets fail.
179 pub original_commit_hash: String,
180}
181
182/// The store holds all metadata needed to show threads in the sidebar/the archive.
183///
184/// Automatically listens to AcpThread events and updates metadata if it has changed.
185pub struct ThreadMetadataStore {
186 db: ThreadMetadataDb,
187 threads: HashMap<acp::SessionId, ThreadMetadata>,
188 threads_by_paths: HashMap<PathList, HashSet<acp::SessionId>>,
189 threads_by_main_paths: HashMap<PathList, HashSet<acp::SessionId>>,
190 reload_task: Option<Shared<Task<()>>>,
191 session_subscriptions: HashMap<acp::SessionId, Subscription>,
192 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
193 _db_operations_task: Task<()>,
194}
195
196#[derive(Debug, PartialEq)]
197enum DbOperation {
198 Upsert(ThreadMetadata),
199 Delete(acp::SessionId),
200}
201
202impl DbOperation {
203 fn id(&self) -> &acp::SessionId {
204 match self {
205 DbOperation::Upsert(thread) => &thread.session_id,
206 DbOperation::Delete(session_id) => session_id,
207 }
208 }
209}
210
211impl ThreadMetadataStore {
212 #[cfg(not(any(test, feature = "test-support")))]
213 pub fn init_global(cx: &mut App) {
214 if cx.has_global::<Self>() {
215 return;
216 }
217
218 let db = ThreadMetadataDb::global(cx);
219 let thread_store = cx.new(|cx| Self::new(db, cx));
220 cx.set_global(GlobalThreadMetadataStore(thread_store));
221 }
222
223 #[cfg(any(test, feature = "test-support"))]
224 pub fn init_global(cx: &mut App) {
225 let thread = std::thread::current();
226 let test_name = thread.name().unwrap_or("unknown_test");
227 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
228 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
229 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
230 cx.set_global(GlobalThreadMetadataStore(thread_store));
231 }
232
233 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
234 cx.try_global::<GlobalThreadMetadataStore>()
235 .map(|store| store.0.clone())
236 }
237
238 pub fn global(cx: &App) -> Entity<Self> {
239 cx.global::<GlobalThreadMetadataStore>().0.clone()
240 }
241
242 pub fn is_empty(&self) -> bool {
243 self.threads.is_empty()
244 }
245
246 /// Returns all thread IDs.
247 pub fn entry_ids(&self) -> impl Iterator<Item = acp::SessionId> + '_ {
248 self.threads.keys().cloned()
249 }
250
251 /// Returns the metadata for a specific thread, if it exists.
252 pub fn entry(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
253 self.threads.get(session_id)
254 }
255
256 /// Returns all threads.
257 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
258 self.threads.values()
259 }
260
261 /// Returns all archived threads.
262 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
263 self.entries().filter(|t| t.archived)
264 }
265
266 /// Returns all threads for the given path list, excluding archived threads.
267 pub fn entries_for_path(
268 &self,
269 path_list: &PathList,
270 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
271 self.threads_by_paths
272 .get(path_list)
273 .into_iter()
274 .flatten()
275 .filter_map(|s| self.threads.get(s))
276 .filter(|s| !s.archived)
277 }
278
279 /// Returns threads whose `main_worktree_paths` matches the given path list,
280 /// excluding archived threads. This finds threads that were opened in a
281 /// linked worktree but are associated with the given main worktree.
282 pub fn entries_for_main_worktree_path(
283 &self,
284 path_list: &PathList,
285 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
286 self.threads_by_main_paths
287 .get(path_list)
288 .into_iter()
289 .flatten()
290 .filter_map(|s| self.threads.get(s))
291 .filter(|s| !s.archived)
292 }
293
294 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
295 let db = self.db.clone();
296 self.reload_task.take();
297
298 let list_task = cx
299 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
300
301 let reload_task = cx
302 .spawn(async move |this, cx| {
303 let Some(rows) = list_task.await.log_err() else {
304 return;
305 };
306
307 this.update(cx, |this, cx| {
308 this.threads.clear();
309 this.threads_by_paths.clear();
310 this.threads_by_main_paths.clear();
311
312 for row in rows {
313 this.threads_by_paths
314 .entry(row.folder_paths.clone())
315 .or_default()
316 .insert(row.session_id.clone());
317 if !row.main_worktree_paths.is_empty() {
318 this.threads_by_main_paths
319 .entry(row.main_worktree_paths.clone())
320 .or_default()
321 .insert(row.session_id.clone());
322 }
323 this.threads.insert(row.session_id.clone(), row);
324 }
325
326 cx.notify();
327 })
328 .ok();
329 })
330 .shared();
331 self.reload_task = Some(reload_task.clone());
332 reload_task
333 }
334
335 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
336 for metadata in metadata {
337 self.save_internal(metadata);
338 }
339 cx.notify();
340 }
341
342 #[cfg(any(test, feature = "test-support"))]
343 pub fn save_manually(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
344 self.save(metadata, cx)
345 }
346
347 fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
348 self.save_internal(metadata);
349 cx.notify();
350 }
351
352 fn save_internal(&mut self, metadata: ThreadMetadata) {
353 if let Some(thread) = self.threads.get(&metadata.session_id) {
354 if thread.folder_paths != metadata.folder_paths {
355 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
356 session_ids.remove(&metadata.session_id);
357 }
358 }
359 if thread.main_worktree_paths != metadata.main_worktree_paths
360 && !thread.main_worktree_paths.is_empty()
361 {
362 if let Some(session_ids) = self
363 .threads_by_main_paths
364 .get_mut(&thread.main_worktree_paths)
365 {
366 session_ids.remove(&metadata.session_id);
367 }
368 }
369 }
370
371 self.threads
372 .insert(metadata.session_id.clone(), metadata.clone());
373
374 self.threads_by_paths
375 .entry(metadata.folder_paths.clone())
376 .or_default()
377 .insert(metadata.session_id.clone());
378
379 if !metadata.main_worktree_paths.is_empty() {
380 self.threads_by_main_paths
381 .entry(metadata.main_worktree_paths.clone())
382 .or_default()
383 .insert(metadata.session_id.clone());
384 }
385
386 self.pending_thread_ops_tx
387 .try_send(DbOperation::Upsert(metadata))
388 .log_err();
389 }
390
391 pub fn update_working_directories(
392 &mut self,
393 session_id: &acp::SessionId,
394 work_dirs: PathList,
395 cx: &mut Context<Self>,
396 ) {
397 if let Some(thread) = self.threads.get(session_id) {
398 self.save_internal(ThreadMetadata {
399 folder_paths: work_dirs,
400 ..thread.clone()
401 });
402 cx.notify();
403 }
404 }
405
406 pub fn archive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
407 self.update_archived(session_id, true, cx);
408 }
409
410 pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
411 self.update_archived(session_id, false, cx);
412 }
413
414 pub fn create_archived_worktree(
415 &self,
416 worktree_path: String,
417 main_repo_path: String,
418 branch_name: Option<String>,
419 staged_commit_hash: String,
420 unstaged_commit_hash: String,
421 original_commit_hash: String,
422 cx: &App,
423 ) -> Task<anyhow::Result<i64>> {
424 let db = self.db.clone();
425 cx.background_spawn(async move {
426 db.create_archived_worktree(
427 worktree_path,
428 main_repo_path,
429 branch_name,
430 staged_commit_hash,
431 unstaged_commit_hash,
432 original_commit_hash,
433 )
434 .await
435 })
436 }
437
438 pub fn link_thread_to_archived_worktree(
439 &self,
440 session_id: String,
441 archived_worktree_id: i64,
442 cx: &App,
443 ) -> Task<anyhow::Result<()>> {
444 let db = self.db.clone();
445 cx.background_spawn(async move {
446 db.link_thread_to_archived_worktree(session_id, archived_worktree_id)
447 .await
448 })
449 }
450
451 pub fn get_archived_worktrees_for_thread(
452 &self,
453 session_id: String,
454 cx: &App,
455 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
456 let db = self.db.clone();
457 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(session_id).await })
458 }
459
460 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
461 let db = self.db.clone();
462 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
463 }
464
465 fn update_archived(
466 &mut self,
467 session_id: &acp::SessionId,
468 archived: bool,
469 cx: &mut Context<Self>,
470 ) {
471 if let Some(thread) = self.threads.get(session_id) {
472 self.save_internal(ThreadMetadata {
473 archived,
474 ..thread.clone()
475 });
476 cx.notify();
477 }
478 }
479
480 pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context<Self>) {
481 if let Some(thread) = self.threads.get(&session_id) {
482 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
483 session_ids.remove(&session_id);
484 }
485 if !thread.main_worktree_paths.is_empty() {
486 if let Some(session_ids) = self
487 .threads_by_main_paths
488 .get_mut(&thread.main_worktree_paths)
489 {
490 session_ids.remove(&session_id);
491 }
492 }
493 }
494 self.threads.remove(&session_id);
495 self.pending_thread_ops_tx
496 .try_send(DbOperation::Delete(session_id))
497 .log_err();
498 cx.notify();
499 }
500
501 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
502 let weak_store = cx.weak_entity();
503
504 cx.observe_new::<acp_thread::AcpThread>(move |thread, _window, cx| {
505 // Don't track subagent threads in the sidebar.
506 if thread.parent_session_id().is_some() {
507 return;
508 }
509
510 let thread_entity = cx.entity();
511
512 cx.on_release({
513 let weak_store = weak_store.clone();
514 move |thread, cx| {
515 weak_store
516 .update(cx, |store, _cx| {
517 let session_id = thread.session_id().clone();
518 store.session_subscriptions.remove(&session_id);
519 })
520 .ok();
521 }
522 })
523 .detach();
524
525 weak_store
526 .update(cx, |this, cx| {
527 let subscription = cx.subscribe(&thread_entity, Self::handle_thread_event);
528 this.session_subscriptions
529 .insert(thread.session_id().clone(), subscription);
530 })
531 .ok();
532 })
533 .detach();
534
535 let (tx, rx) = smol::channel::unbounded();
536 let _db_operations_task = cx.background_spawn({
537 let db = db.clone();
538 async move {
539 while let Ok(first_update) = rx.recv().await {
540 let mut updates = vec![first_update];
541 while let Ok(update) = rx.try_recv() {
542 updates.push(update);
543 }
544 let updates = Self::dedup_db_operations(updates);
545 for operation in updates {
546 match operation {
547 DbOperation::Upsert(metadata) => {
548 db.save(metadata).await.log_err();
549 }
550 DbOperation::Delete(session_id) => {
551 db.delete(session_id).await.log_err();
552 }
553 }
554 }
555 }
556 }
557 });
558
559 let mut this = Self {
560 db,
561 threads: HashMap::default(),
562 threads_by_paths: HashMap::default(),
563 threads_by_main_paths: HashMap::default(),
564 reload_task: None,
565 session_subscriptions: HashMap::default(),
566 pending_thread_ops_tx: tx,
567 _db_operations_task,
568 };
569 let _ = this.reload(cx);
570 this
571 }
572
573 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
574 let mut ops = HashMap::default();
575 for operation in operations.into_iter().rev() {
576 if ops.contains_key(operation.id()) {
577 continue;
578 }
579 ops.insert(operation.id().clone(), operation);
580 }
581 ops.into_values().collect()
582 }
583
584 fn handle_thread_event(
585 &mut self,
586 thread: Entity<acp_thread::AcpThread>,
587 event: &AcpThreadEvent,
588 cx: &mut Context<Self>,
589 ) {
590 // Don't track subagent threads in the sidebar.
591 if thread.read(cx).parent_session_id().is_some() {
592 return;
593 }
594
595 match event {
596 AcpThreadEvent::NewEntry
597 | AcpThreadEvent::TitleUpdated
598 | AcpThreadEvent::EntryUpdated(_)
599 | AcpThreadEvent::EntriesRemoved(_)
600 | AcpThreadEvent::ToolAuthorizationRequested(_)
601 | AcpThreadEvent::ToolAuthorizationReceived(_)
602 | AcpThreadEvent::Retry(_)
603 | AcpThreadEvent::Stopped(_)
604 | AcpThreadEvent::Error
605 | AcpThreadEvent::LoadError(_)
606 | AcpThreadEvent::Refusal
607 | AcpThreadEvent::WorkingDirectoriesUpdated => {
608 let thread_ref = thread.read(cx);
609 if thread_ref.entries().is_empty() {
610 return;
611 }
612
613 let existing_thread = self.threads.get(thread_ref.session_id());
614 let session_id = thread_ref.session_id().clone();
615 let title = thread_ref
616 .title()
617 .unwrap_or_else(|| DEFAULT_THREAD_TITLE.into());
618
619 let updated_at = Utc::now();
620
621 let created_at = existing_thread
622 .and_then(|t| t.created_at)
623 .unwrap_or_else(|| updated_at);
624
625 let agent_id = thread_ref.connection().agent_id();
626
627 let folder_paths = {
628 let project = thread_ref.project().read(cx);
629 let paths: Vec<Arc<Path>> = project
630 .visible_worktrees(cx)
631 .map(|worktree| worktree.read(cx).abs_path())
632 .collect();
633 PathList::new(&paths)
634 };
635
636 let main_worktree_paths = thread_ref
637 .project()
638 .read(cx)
639 .project_group_key(cx)
640 .path_list()
641 .clone();
642
643 // Threads without a folder path (e.g. started in an empty
644 // window) are archived by default so they don't get lost,
645 // because they won't show up in the sidebar. Users can reload
646 // them from the archive.
647 let archived = existing_thread
648 .map(|t| t.archived)
649 .unwrap_or(folder_paths.is_empty());
650
651 let metadata = ThreadMetadata {
652 session_id,
653 agent_id,
654 title,
655 created_at: Some(created_at),
656 updated_at,
657 folder_paths,
658 main_worktree_paths,
659 archived,
660 };
661
662 self.save(metadata, cx);
663 }
664 AcpThreadEvent::TokenUsageUpdated
665 | AcpThreadEvent::SubagentSpawned(_)
666 | AcpThreadEvent::PromptCapabilitiesUpdated
667 | AcpThreadEvent::AvailableCommandsUpdated(_)
668 | AcpThreadEvent::ModeUpdated(_)
669 | AcpThreadEvent::ConfigOptionsUpdated(_) => {}
670 }
671 }
672}
673
674impl Global for ThreadMetadataStore {}
675
676struct ThreadMetadataDb(ThreadSafeConnection);
677
678impl Domain for ThreadMetadataDb {
679 const NAME: &str = stringify!(ThreadMetadataDb);
680
681 const MIGRATIONS: &[&str] = &[
682 sql!(
683 CREATE TABLE IF NOT EXISTS sidebar_threads(
684 session_id TEXT PRIMARY KEY,
685 agent_id TEXT,
686 title TEXT NOT NULL,
687 updated_at TEXT NOT NULL,
688 created_at TEXT,
689 folder_paths TEXT,
690 folder_paths_order TEXT
691 ) STRICT;
692 ),
693 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
694 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
695 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
696 sql!(
697 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
698 id INTEGER PRIMARY KEY,
699 worktree_path TEXT NOT NULL,
700 main_repo_path TEXT NOT NULL,
701 branch_name TEXT,
702 staged_commit_hash TEXT,
703 unstaged_commit_hash TEXT,
704 original_commit_hash TEXT
705 ) STRICT;
706
707 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
708 session_id TEXT NOT NULL,
709 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
710 PRIMARY KEY (session_id, archived_worktree_id)
711 ) STRICT;
712 ),
713 ];
714}
715
716db::static_connection!(ThreadMetadataDb, []);
717
718impl ThreadMetadataDb {
719 pub fn list_ids(&self) -> anyhow::Result<Vec<Arc<str>>> {
720 self.select::<Arc<str>>(
721 "SELECT session_id FROM sidebar_threads \
722 ORDER BY updated_at DESC",
723 )?()
724 }
725
726 /// List all sidebar thread metadata, ordered by updated_at descending.
727 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
728 self.select::<ThreadMetadata>(
729 "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order \
730 FROM sidebar_threads \
731 ORDER BY updated_at DESC"
732 )?()
733 }
734
735 /// Upsert metadata for a thread.
736 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
737 let id = row.session_id.0.clone();
738 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
739 None
740 } else {
741 Some(row.agent_id.to_string())
742 };
743 let title = row.title.to_string();
744 let updated_at = row.updated_at.to_rfc3339();
745 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
746 let serialized = row.folder_paths.serialize();
747 let (folder_paths, folder_paths_order) = if row.folder_paths.is_empty() {
748 (None, None)
749 } else {
750 (Some(serialized.paths), Some(serialized.order))
751 };
752 let main_serialized = row.main_worktree_paths.serialize();
753 let (main_worktree_paths, main_worktree_paths_order) = if row.main_worktree_paths.is_empty()
754 {
755 (None, None)
756 } else {
757 (Some(main_serialized.paths), Some(main_serialized.order))
758 };
759 let archived = row.archived;
760
761 self.write(move |conn| {
762 let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order) \
763 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) \
764 ON CONFLICT(session_id) DO UPDATE SET \
765 agent_id = excluded.agent_id, \
766 title = excluded.title, \
767 updated_at = excluded.updated_at, \
768 created_at = excluded.created_at, \
769 folder_paths = excluded.folder_paths, \
770 folder_paths_order = excluded.folder_paths_order, \
771 archived = excluded.archived, \
772 main_worktree_paths = excluded.main_worktree_paths, \
773 main_worktree_paths_order = excluded.main_worktree_paths_order";
774 let mut stmt = Statement::prepare(conn, sql)?;
775 let mut i = stmt.bind(&id, 1)?;
776 i = stmt.bind(&agent_id, i)?;
777 i = stmt.bind(&title, i)?;
778 i = stmt.bind(&updated_at, i)?;
779 i = stmt.bind(&created_at, i)?;
780 i = stmt.bind(&folder_paths, i)?;
781 i = stmt.bind(&folder_paths_order, i)?;
782 i = stmt.bind(&archived, i)?;
783 i = stmt.bind(&main_worktree_paths, i)?;
784 stmt.bind(&main_worktree_paths_order, i)?;
785 stmt.exec()
786 })
787 .await
788 }
789
790 /// Delete metadata for a single thread.
791 pub async fn delete(&self, session_id: acp::SessionId) -> anyhow::Result<()> {
792 let id = session_id.0.clone();
793 self.write(move |conn| {
794 let mut stmt =
795 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE session_id = ?")?;
796 stmt.bind(&id, 1)?;
797 stmt.exec()
798 })
799 .await
800 }
801
802 pub async fn create_archived_worktree(
803 &self,
804 worktree_path: String,
805 main_repo_path: String,
806 branch_name: Option<String>,
807 staged_commit_hash: String,
808 unstaged_commit_hash: String,
809 original_commit_hash: String,
810 ) -> anyhow::Result<i64> {
811 self.write(move |conn| {
812 let mut stmt = Statement::prepare(
813 conn,
814 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
815 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
816 RETURNING id",
817 )?;
818 let mut i = stmt.bind(&worktree_path, 1)?;
819 i = stmt.bind(&main_repo_path, i)?;
820 i = stmt.bind(&branch_name, i)?;
821 i = stmt.bind(&staged_commit_hash, i)?;
822 i = stmt.bind(&unstaged_commit_hash, i)?;
823 stmt.bind(&original_commit_hash, i)?;
824 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
825 })
826 .await
827 }
828
829 pub async fn link_thread_to_archived_worktree(
830 &self,
831 session_id: String,
832 archived_worktree_id: i64,
833 ) -> anyhow::Result<()> {
834 self.write(move |conn| {
835 let mut stmt = Statement::prepare(
836 conn,
837 "INSERT INTO thread_archived_worktrees(session_id, archived_worktree_id) \
838 VALUES (?1, ?2)",
839 )?;
840 let i = stmt.bind(&session_id, 1)?;
841 stmt.bind(&archived_worktree_id, i)?;
842 stmt.exec()
843 })
844 .await
845 }
846
847 pub async fn get_archived_worktrees_for_thread(
848 &self,
849 session_id: String,
850 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
851 self.select_bound::<String, ArchivedGitWorktree>(
852 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
853 FROM archived_git_worktrees a \
854 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
855 WHERE t.session_id = ?1",
856 )?(session_id)
857 }
858
859 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
860 self.write(move |conn| {
861 let mut stmt = Statement::prepare(
862 conn,
863 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
864 )?;
865 stmt.bind(&id, 1)?;
866 stmt.exec()?;
867
868 let mut stmt =
869 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
870 stmt.bind(&id, 1)?;
871 stmt.exec()
872 })
873 .await
874 }
875}
876
877impl Column for ThreadMetadata {
878 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
879 let (id, next): (Arc<str>, i32) = Column::column(statement, start_index)?;
880 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
881 let (title, next): (String, i32) = Column::column(statement, next)?;
882 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
883 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
884 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
885 let (folder_paths_order_str, next): (Option<String>, i32) =
886 Column::column(statement, next)?;
887 let (archived, next): (bool, i32) = Column::column(statement, next)?;
888 let (main_worktree_paths_str, next): (Option<String>, i32) =
889 Column::column(statement, next)?;
890 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
891 Column::column(statement, next)?;
892
893 let agent_id = agent_id
894 .map(|id| AgentId::new(id))
895 .unwrap_or(ZED_AGENT_ID.clone());
896
897 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
898 let created_at = created_at_str
899 .as_deref()
900 .map(DateTime::parse_from_rfc3339)
901 .transpose()?
902 .map(|dt| dt.with_timezone(&Utc));
903
904 let folder_paths = folder_paths_str
905 .map(|paths| {
906 PathList::deserialize(&util::path_list::SerializedPathList {
907 paths,
908 order: folder_paths_order_str.unwrap_or_default(),
909 })
910 })
911 .unwrap_or_default();
912
913 let main_worktree_paths = main_worktree_paths_str
914 .map(|paths| {
915 PathList::deserialize(&util::path_list::SerializedPathList {
916 paths,
917 order: main_worktree_paths_order_str.unwrap_or_default(),
918 })
919 })
920 .unwrap_or_default();
921
922 Ok((
923 ThreadMetadata {
924 session_id: acp::SessionId::new(id),
925 agent_id,
926 title: title.into(),
927 updated_at,
928 created_at,
929 folder_paths,
930 main_worktree_paths,
931 archived,
932 },
933 next,
934 ))
935 }
936}
937
938impl Column for ArchivedGitWorktree {
939 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
940 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
941 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
942 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
943 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
944 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
945 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
946 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
947
948 Ok((
949 ArchivedGitWorktree {
950 id,
951 worktree_path: PathBuf::from(worktree_path_str),
952 main_repo_path: PathBuf::from(main_repo_path_str),
953 branch_name,
954 staged_commit_hash,
955 unstaged_commit_hash,
956 original_commit_hash,
957 },
958 next,
959 ))
960 }
961}
962
963#[cfg(test)]
964mod tests {
965 use super::*;
966 use acp_thread::{AgentConnection, StubAgentConnection};
967 use action_log::ActionLog;
968 use agent::DbThread;
969 use agent_client_protocol as acp;
970
971 use gpui::TestAppContext;
972 use project::FakeFs;
973 use project::Project;
974 use std::path::Path;
975 use std::rc::Rc;
976
977 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
978 DbThread {
979 title: title.to_string().into(),
980 messages: Vec::new(),
981 updated_at,
982 detailed_summary: None,
983 initial_project_snapshot: None,
984 cumulative_token_usage: Default::default(),
985 request_token_usage: Default::default(),
986 model: None,
987 profile: None,
988 imported: false,
989 subagent_context: None,
990 speed: None,
991 thinking_enabled: false,
992 thinking_effort: None,
993 draft_prompt: None,
994 ui_scroll_position: None,
995 }
996 }
997
998 fn make_metadata(
999 session_id: &str,
1000 title: &str,
1001 updated_at: DateTime<Utc>,
1002 folder_paths: PathList,
1003 ) -> ThreadMetadata {
1004 ThreadMetadata {
1005 archived: false,
1006 session_id: acp::SessionId::new(session_id),
1007 agent_id: agent::ZED_AGENT_ID.clone(),
1008 title: title.to_string().into(),
1009 updated_at,
1010 created_at: Some(updated_at),
1011 folder_paths,
1012 main_worktree_paths: PathList::default(),
1013 }
1014 }
1015
1016 fn init_test(cx: &mut TestAppContext) {
1017 cx.update(|cx| {
1018 let settings_store = settings::SettingsStore::test(cx);
1019 cx.set_global(settings_store);
1020 ThreadMetadataStore::init_global(cx);
1021 ThreadStore::init_global(cx);
1022 });
1023 cx.run_until_parked();
1024 }
1025
1026 #[gpui::test]
1027 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1028 let first_paths = PathList::new(&[Path::new("/project-a")]);
1029 let second_paths = PathList::new(&[Path::new("/project-b")]);
1030 let now = Utc::now();
1031 let older = now - chrono::Duration::seconds(1);
1032
1033 let thread = std::thread::current();
1034 let test_name = thread.name().unwrap_or("unknown_test");
1035 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1036 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1037 &db_name,
1038 )));
1039
1040 db.save(make_metadata(
1041 "session-1",
1042 "First Thread",
1043 now,
1044 first_paths.clone(),
1045 ))
1046 .await
1047 .unwrap();
1048 db.save(make_metadata(
1049 "session-2",
1050 "Second Thread",
1051 older,
1052 second_paths.clone(),
1053 ))
1054 .await
1055 .unwrap();
1056
1057 cx.update(|cx| {
1058 let settings_store = settings::SettingsStore::test(cx);
1059 cx.set_global(settings_store);
1060 ThreadMetadataStore::init_global(cx);
1061 });
1062
1063 cx.run_until_parked();
1064
1065 cx.update(|cx| {
1066 let store = ThreadMetadataStore::global(cx);
1067 let store = store.read(cx);
1068
1069 let entry_ids = store
1070 .entry_ids()
1071 .map(|session_id| session_id.0.to_string())
1072 .collect::<Vec<_>>();
1073 assert_eq!(entry_ids.len(), 2);
1074 assert!(entry_ids.contains(&"session-1".to_string()));
1075 assert!(entry_ids.contains(&"session-2".to_string()));
1076
1077 let first_path_entries = store
1078 .entries_for_path(&first_paths)
1079 .map(|entry| entry.session_id.0.to_string())
1080 .collect::<Vec<_>>();
1081 assert_eq!(first_path_entries, vec!["session-1"]);
1082
1083 let second_path_entries = store
1084 .entries_for_path(&second_paths)
1085 .map(|entry| entry.session_id.0.to_string())
1086 .collect::<Vec<_>>();
1087 assert_eq!(second_path_entries, vec!["session-2"]);
1088 });
1089 }
1090
1091 #[gpui::test]
1092 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1093 init_test(cx);
1094
1095 let first_paths = PathList::new(&[Path::new("/project-a")]);
1096 let second_paths = PathList::new(&[Path::new("/project-b")]);
1097 let initial_time = Utc::now();
1098 let updated_time = initial_time + chrono::Duration::seconds(1);
1099
1100 let initial_metadata = make_metadata(
1101 "session-1",
1102 "First Thread",
1103 initial_time,
1104 first_paths.clone(),
1105 );
1106
1107 let second_metadata = make_metadata(
1108 "session-2",
1109 "Second Thread",
1110 initial_time,
1111 second_paths.clone(),
1112 );
1113
1114 cx.update(|cx| {
1115 let store = ThreadMetadataStore::global(cx);
1116 store.update(cx, |store, cx| {
1117 store.save(initial_metadata, cx);
1118 store.save(second_metadata, cx);
1119 });
1120 });
1121
1122 cx.run_until_parked();
1123
1124 cx.update(|cx| {
1125 let store = ThreadMetadataStore::global(cx);
1126 let store = store.read(cx);
1127
1128 let first_path_entries = store
1129 .entries_for_path(&first_paths)
1130 .map(|entry| entry.session_id.0.to_string())
1131 .collect::<Vec<_>>();
1132 assert_eq!(first_path_entries, vec!["session-1"]);
1133
1134 let second_path_entries = store
1135 .entries_for_path(&second_paths)
1136 .map(|entry| entry.session_id.0.to_string())
1137 .collect::<Vec<_>>();
1138 assert_eq!(second_path_entries, vec!["session-2"]);
1139 });
1140
1141 let moved_metadata = make_metadata(
1142 "session-1",
1143 "First Thread",
1144 updated_time,
1145 second_paths.clone(),
1146 );
1147
1148 cx.update(|cx| {
1149 let store = ThreadMetadataStore::global(cx);
1150 store.update(cx, |store, cx| {
1151 store.save(moved_metadata, cx);
1152 });
1153 });
1154
1155 cx.run_until_parked();
1156
1157 cx.update(|cx| {
1158 let store = ThreadMetadataStore::global(cx);
1159 let store = store.read(cx);
1160
1161 let entry_ids = store
1162 .entry_ids()
1163 .map(|session_id| session_id.0.to_string())
1164 .collect::<Vec<_>>();
1165 assert_eq!(entry_ids.len(), 2);
1166 assert!(entry_ids.contains(&"session-1".to_string()));
1167 assert!(entry_ids.contains(&"session-2".to_string()));
1168
1169 let first_path_entries = store
1170 .entries_for_path(&first_paths)
1171 .map(|entry| entry.session_id.0.to_string())
1172 .collect::<Vec<_>>();
1173 assert!(first_path_entries.is_empty());
1174
1175 let second_path_entries = store
1176 .entries_for_path(&second_paths)
1177 .map(|entry| entry.session_id.0.to_string())
1178 .collect::<Vec<_>>();
1179 assert_eq!(second_path_entries.len(), 2);
1180 assert!(second_path_entries.contains(&"session-1".to_string()));
1181 assert!(second_path_entries.contains(&"session-2".to_string()));
1182 });
1183
1184 cx.update(|cx| {
1185 let store = ThreadMetadataStore::global(cx);
1186 store.update(cx, |store, cx| {
1187 store.delete(acp::SessionId::new("session-2"), cx);
1188 });
1189 });
1190
1191 cx.run_until_parked();
1192
1193 cx.update(|cx| {
1194 let store = ThreadMetadataStore::global(cx);
1195 let store = store.read(cx);
1196
1197 let entry_ids = store
1198 .entry_ids()
1199 .map(|session_id| session_id.0.to_string())
1200 .collect::<Vec<_>>();
1201 assert_eq!(entry_ids, vec!["session-1"]);
1202
1203 let second_path_entries = store
1204 .entries_for_path(&second_paths)
1205 .map(|entry| entry.session_id.0.to_string())
1206 .collect::<Vec<_>>();
1207 assert_eq!(second_path_entries, vec!["session-1"]);
1208 });
1209 }
1210
1211 #[gpui::test]
1212 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1213 init_test(cx);
1214
1215 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1216 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1217 let now = Utc::now();
1218
1219 let existing_metadata = ThreadMetadata {
1220 session_id: acp::SessionId::new("a-session-0"),
1221 agent_id: agent::ZED_AGENT_ID.clone(),
1222 title: "Existing Metadata".into(),
1223 updated_at: now - chrono::Duration::seconds(10),
1224 created_at: Some(now - chrono::Duration::seconds(10)),
1225 folder_paths: project_a_paths.clone(),
1226 main_worktree_paths: PathList::default(),
1227 archived: false,
1228 };
1229
1230 cx.update(|cx| {
1231 let store = ThreadMetadataStore::global(cx);
1232 store.update(cx, |store, cx| {
1233 store.save(existing_metadata, cx);
1234 });
1235 });
1236 cx.run_until_parked();
1237
1238 let threads_to_save = vec![
1239 (
1240 "a-session-0",
1241 "Thread A0 From Native Store",
1242 project_a_paths.clone(),
1243 now,
1244 ),
1245 (
1246 "a-session-1",
1247 "Thread A1",
1248 project_a_paths.clone(),
1249 now + chrono::Duration::seconds(1),
1250 ),
1251 (
1252 "b-session-0",
1253 "Thread B0",
1254 project_b_paths.clone(),
1255 now + chrono::Duration::seconds(2),
1256 ),
1257 (
1258 "projectless",
1259 "Projectless",
1260 PathList::default(),
1261 now + chrono::Duration::seconds(3),
1262 ),
1263 ];
1264
1265 for (session_id, title, paths, updated_at) in &threads_to_save {
1266 let save_task = cx.update(|cx| {
1267 let thread_store = ThreadStore::global(cx);
1268 let session_id = session_id.to_string();
1269 let title = title.to_string();
1270 let paths = paths.clone();
1271 thread_store.update(cx, |store, cx| {
1272 store.save_thread(
1273 acp::SessionId::new(session_id),
1274 make_db_thread(&title, *updated_at),
1275 paths,
1276 cx,
1277 )
1278 })
1279 });
1280 save_task.await.unwrap();
1281 cx.run_until_parked();
1282 }
1283
1284 cx.update(|cx| migrate_thread_metadata(cx));
1285 cx.run_until_parked();
1286
1287 let list = cx.update(|cx| {
1288 let store = ThreadMetadataStore::global(cx);
1289 store.read(cx).entries().cloned().collect::<Vec<_>>()
1290 });
1291
1292 assert_eq!(list.len(), 4);
1293 assert!(
1294 list.iter()
1295 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1296 );
1297
1298 let existing_metadata = list
1299 .iter()
1300 .find(|metadata| metadata.session_id.0.as_ref() == "a-session-0")
1301 .unwrap();
1302 assert_eq!(existing_metadata.title.as_ref(), "Existing Metadata");
1303 assert!(!existing_metadata.archived);
1304
1305 let migrated_session_ids = list
1306 .iter()
1307 .map(|metadata| metadata.session_id.0.as_ref())
1308 .collect::<Vec<_>>();
1309 assert!(migrated_session_ids.contains(&"a-session-1"));
1310 assert!(migrated_session_ids.contains(&"b-session-0"));
1311 assert!(migrated_session_ids.contains(&"projectless"));
1312
1313 let migrated_entries = list
1314 .iter()
1315 .filter(|metadata| metadata.session_id.0.as_ref() != "a-session-0")
1316 .collect::<Vec<_>>();
1317 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1318 }
1319
1320 #[gpui::test]
1321 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1322 cx: &mut TestAppContext,
1323 ) {
1324 init_test(cx);
1325
1326 let project_paths = PathList::new(&[Path::new("/project-a")]);
1327 let existing_updated_at = Utc::now();
1328
1329 let existing_metadata = ThreadMetadata {
1330 session_id: acp::SessionId::new("existing-session"),
1331 agent_id: agent::ZED_AGENT_ID.clone(),
1332 title: "Existing Metadata".into(),
1333 updated_at: existing_updated_at,
1334 created_at: Some(existing_updated_at),
1335 folder_paths: project_paths.clone(),
1336 main_worktree_paths: PathList::default(),
1337 archived: false,
1338 };
1339
1340 cx.update(|cx| {
1341 let store = ThreadMetadataStore::global(cx);
1342 store.update(cx, |store, cx| {
1343 store.save(existing_metadata, cx);
1344 });
1345 });
1346 cx.run_until_parked();
1347
1348 let save_task = cx.update(|cx| {
1349 let thread_store = ThreadStore::global(cx);
1350 thread_store.update(cx, |store, cx| {
1351 store.save_thread(
1352 acp::SessionId::new("existing-session"),
1353 make_db_thread(
1354 "Updated Native Thread Title",
1355 existing_updated_at + chrono::Duration::seconds(1),
1356 ),
1357 project_paths.clone(),
1358 cx,
1359 )
1360 })
1361 });
1362 save_task.await.unwrap();
1363 cx.run_until_parked();
1364
1365 cx.update(|cx| migrate_thread_metadata(cx));
1366 cx.run_until_parked();
1367
1368 let list = cx.update(|cx| {
1369 let store = ThreadMetadataStore::global(cx);
1370 store.read(cx).entries().cloned().collect::<Vec<_>>()
1371 });
1372
1373 assert_eq!(list.len(), 1);
1374 assert_eq!(list[0].session_id.0.as_ref(), "existing-session");
1375 }
1376
1377 #[gpui::test]
1378 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
1379 cx: &mut TestAppContext,
1380 ) {
1381 init_test(cx);
1382
1383 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1384 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1385 let now = Utc::now();
1386
1387 // Create 7 threads for project A and 3 for project B
1388 let mut threads_to_save = Vec::new();
1389 for i in 0..7 {
1390 threads_to_save.push((
1391 format!("a-session-{i}"),
1392 format!("Thread A{i}"),
1393 project_a_paths.clone(),
1394 now + chrono::Duration::seconds(i as i64),
1395 ));
1396 }
1397 for i in 0..3 {
1398 threads_to_save.push((
1399 format!("b-session-{i}"),
1400 format!("Thread B{i}"),
1401 project_b_paths.clone(),
1402 now + chrono::Duration::seconds(i as i64),
1403 ));
1404 }
1405
1406 for (session_id, title, paths, updated_at) in &threads_to_save {
1407 let save_task = cx.update(|cx| {
1408 let thread_store = ThreadStore::global(cx);
1409 let session_id = session_id.to_string();
1410 let title = title.to_string();
1411 let paths = paths.clone();
1412 thread_store.update(cx, |store, cx| {
1413 store.save_thread(
1414 acp::SessionId::new(session_id),
1415 make_db_thread(&title, *updated_at),
1416 paths,
1417 cx,
1418 )
1419 })
1420 });
1421 save_task.await.unwrap();
1422 cx.run_until_parked();
1423 }
1424
1425 cx.update(|cx| migrate_thread_metadata(cx));
1426 cx.run_until_parked();
1427
1428 let list = cx.update(|cx| {
1429 let store = ThreadMetadataStore::global(cx);
1430 store.read(cx).entries().cloned().collect::<Vec<_>>()
1431 });
1432
1433 assert_eq!(list.len(), 10);
1434
1435 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
1436 let mut project_a_entries: Vec<_> = list
1437 .iter()
1438 .filter(|m| m.folder_paths == project_a_paths)
1439 .collect();
1440 assert_eq!(project_a_entries.len(), 7);
1441 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
1442
1443 for entry in &project_a_entries[..5] {
1444 assert!(
1445 !entry.archived,
1446 "Expected {} to be unarchived (top 5 most recent)",
1447 entry.session_id.0
1448 );
1449 }
1450 for entry in &project_a_entries[5..] {
1451 assert!(
1452 entry.archived,
1453 "Expected {} to be archived (older than top 5)",
1454 entry.session_id.0
1455 );
1456 }
1457
1458 // Project B: all 3 should be unarchived (under the limit)
1459 let project_b_entries: Vec<_> = list
1460 .iter()
1461 .filter(|m| m.folder_paths == project_b_paths)
1462 .collect();
1463 assert_eq!(project_b_entries.len(), 3);
1464 assert!(project_b_entries.iter().all(|m| !m.archived));
1465 }
1466
1467 #[gpui::test]
1468 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
1469 init_test(cx);
1470
1471 let fs = FakeFs::new(cx.executor());
1472 let project = Project::test(fs, None::<&Path>, cx).await;
1473 let connection = Rc::new(StubAgentConnection::new());
1474
1475 let thread = cx
1476 .update(|cx| {
1477 connection
1478 .clone()
1479 .new_session(project.clone(), PathList::default(), cx)
1480 })
1481 .await
1482 .unwrap();
1483 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1484
1485 cx.update(|cx| {
1486 thread.update(cx, |thread, cx| {
1487 thread.set_title("Draft Thread".into(), cx).detach();
1488 });
1489 });
1490 cx.run_until_parked();
1491
1492 let metadata_ids = cx.update(|cx| {
1493 ThreadMetadataStore::global(cx)
1494 .read(cx)
1495 .entry_ids()
1496 .collect::<Vec<_>>()
1497 });
1498 assert!(
1499 metadata_ids.is_empty(),
1500 "expected empty draft thread title updates to be ignored"
1501 );
1502
1503 cx.update(|cx| {
1504 thread.update(cx, |thread, cx| {
1505 thread.push_user_content_block(None, "Hello".into(), cx);
1506 });
1507 });
1508 cx.run_until_parked();
1509
1510 let metadata_ids = cx.update(|cx| {
1511 ThreadMetadataStore::global(cx)
1512 .read(cx)
1513 .entry_ids()
1514 .collect::<Vec<_>>()
1515 });
1516 assert_eq!(metadata_ids, vec![session_id]);
1517 }
1518
1519 #[gpui::test]
1520 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
1521 init_test(cx);
1522
1523 let fs = FakeFs::new(cx.executor());
1524 let project = Project::test(fs, None::<&Path>, cx).await;
1525 let connection = Rc::new(StubAgentConnection::new());
1526
1527 let thread = cx
1528 .update(|cx| {
1529 connection
1530 .clone()
1531 .new_session(project.clone(), PathList::default(), cx)
1532 })
1533 .await
1534 .unwrap();
1535 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1536
1537 cx.update(|cx| {
1538 thread.update(cx, |thread, cx| {
1539 thread.push_user_content_block(None, "Hello".into(), cx);
1540 });
1541 });
1542 cx.run_until_parked();
1543
1544 let metadata_ids = cx.update(|cx| {
1545 ThreadMetadataStore::global(cx)
1546 .read(cx)
1547 .entry_ids()
1548 .collect::<Vec<_>>()
1549 });
1550 assert_eq!(metadata_ids, vec![session_id.clone()]);
1551
1552 drop(thread);
1553 cx.update(|_| {});
1554 cx.run_until_parked();
1555
1556 let metadata_ids = cx.update(|cx| {
1557 ThreadMetadataStore::global(cx)
1558 .read(cx)
1559 .entry_ids()
1560 .collect::<Vec<_>>()
1561 });
1562 assert_eq!(metadata_ids, vec![session_id]);
1563 }
1564
1565 #[gpui::test]
1566 async fn test_threads_without_project_association_are_archived_by_default(
1567 cx: &mut TestAppContext,
1568 ) {
1569 init_test(cx);
1570
1571 let fs = FakeFs::new(cx.executor());
1572 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
1573 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
1574 let connection = Rc::new(StubAgentConnection::new());
1575
1576 let thread_without_worktree = cx
1577 .update(|cx| {
1578 connection.clone().new_session(
1579 project_without_worktree.clone(),
1580 PathList::default(),
1581 cx,
1582 )
1583 })
1584 .await
1585 .unwrap();
1586 let session_without_worktree =
1587 cx.read(|cx| thread_without_worktree.read(cx).session_id().clone());
1588
1589 cx.update(|cx| {
1590 thread_without_worktree.update(cx, |thread, cx| {
1591 thread.push_user_content_block(None, "content".into(), cx);
1592 thread.set_title("No Project Thread".into(), cx).detach();
1593 });
1594 });
1595 cx.run_until_parked();
1596
1597 let thread_with_worktree = cx
1598 .update(|cx| {
1599 connection.clone().new_session(
1600 project_with_worktree.clone(),
1601 PathList::default(),
1602 cx,
1603 )
1604 })
1605 .await
1606 .unwrap();
1607 let session_with_worktree =
1608 cx.read(|cx| thread_with_worktree.read(cx).session_id().clone());
1609
1610 cx.update(|cx| {
1611 thread_with_worktree.update(cx, |thread, cx| {
1612 thread.push_user_content_block(None, "content".into(), cx);
1613 thread.set_title("Project Thread".into(), cx).detach();
1614 });
1615 });
1616 cx.run_until_parked();
1617
1618 cx.update(|cx| {
1619 let store = ThreadMetadataStore::global(cx);
1620 let store = store.read(cx);
1621
1622 let without_worktree = store
1623 .entry(&session_without_worktree)
1624 .expect("missing metadata for thread without project association");
1625 assert!(without_worktree.folder_paths.is_empty());
1626 assert!(
1627 without_worktree.archived,
1628 "expected thread without project association to be archived"
1629 );
1630
1631 let with_worktree = store
1632 .entry(&session_with_worktree)
1633 .expect("missing metadata for thread with project association");
1634 assert_eq!(
1635 with_worktree.folder_paths,
1636 PathList::new(&[Path::new("/project-a")])
1637 );
1638 assert!(
1639 !with_worktree.archived,
1640 "expected thread with project association to remain unarchived"
1641 );
1642 });
1643 }
1644
1645 #[gpui::test]
1646 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
1647 init_test(cx);
1648
1649 let fs = FakeFs::new(cx.executor());
1650 let project = Project::test(fs, None::<&Path>, cx).await;
1651 let connection = Rc::new(StubAgentConnection::new());
1652
1653 // Create a regular (non-subagent) AcpThread.
1654 let regular_thread = cx
1655 .update(|cx| {
1656 connection
1657 .clone()
1658 .new_session(project.clone(), PathList::default(), cx)
1659 })
1660 .await
1661 .unwrap();
1662
1663 let regular_session_id = cx.read(|cx| regular_thread.read(cx).session_id().clone());
1664
1665 // Set a title on the regular thread to trigger a save via handle_thread_update.
1666 cx.update(|cx| {
1667 regular_thread.update(cx, |thread, cx| {
1668 thread.push_user_content_block(None, "content".into(), cx);
1669 thread.set_title("Regular Thread".into(), cx).detach();
1670 });
1671 });
1672 cx.run_until_parked();
1673
1674 // Create a subagent AcpThread
1675 let subagent_session_id = acp::SessionId::new("subagent-session");
1676 let subagent_thread = cx.update(|cx| {
1677 let action_log = cx.new(|_| ActionLog::new(project.clone()));
1678 cx.new(|cx| {
1679 acp_thread::AcpThread::new(
1680 Some(regular_session_id.clone()),
1681 Some("Subagent Thread".into()),
1682 None,
1683 connection.clone(),
1684 project.clone(),
1685 action_log,
1686 subagent_session_id.clone(),
1687 watch::Receiver::constant(acp::PromptCapabilities::new()),
1688 cx,
1689 )
1690 })
1691 });
1692
1693 // Set a title on the subagent thread to trigger handle_thread_update.
1694 cx.update(|cx| {
1695 subagent_thread.update(cx, |thread, cx| {
1696 thread
1697 .set_title("Subagent Thread Title".into(), cx)
1698 .detach();
1699 });
1700 });
1701 cx.run_until_parked();
1702
1703 // List all metadata from the store cache.
1704 let list = cx.update(|cx| {
1705 let store = ThreadMetadataStore::global(cx);
1706 store.read(cx).entries().cloned().collect::<Vec<_>>()
1707 });
1708
1709 // The subagent thread should NOT appear in the sidebar metadata.
1710 // Only the regular thread should be listed.
1711 assert_eq!(
1712 list.len(),
1713 1,
1714 "Expected only the regular thread in sidebar metadata, \
1715 but found {} entries (subagent threads are leaking into the sidebar)",
1716 list.len(),
1717 );
1718 assert_eq!(list[0].session_id, regular_session_id);
1719 assert_eq!(list[0].title.as_ref(), "Regular Thread");
1720 }
1721
1722 #[test]
1723 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
1724 let now = Utc::now();
1725
1726 let operations = vec![
1727 DbOperation::Upsert(make_metadata(
1728 "session-1",
1729 "First Thread",
1730 now,
1731 PathList::default(),
1732 )),
1733 DbOperation::Delete(acp::SessionId::new("session-1")),
1734 ];
1735
1736 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
1737
1738 assert_eq!(deduped.len(), 1);
1739 assert_eq!(
1740 deduped[0],
1741 DbOperation::Delete(acp::SessionId::new("session-1"))
1742 );
1743 }
1744
1745 #[test]
1746 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
1747 let now = Utc::now();
1748 let later = now + chrono::Duration::seconds(1);
1749
1750 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
1751 let new_metadata = make_metadata("session-1", "New Title", later, PathList::default());
1752
1753 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1754 DbOperation::Upsert(old_metadata),
1755 DbOperation::Upsert(new_metadata.clone()),
1756 ]);
1757
1758 assert_eq!(deduped.len(), 1);
1759 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
1760 }
1761
1762 #[test]
1763 fn test_dedup_db_operations_preserves_distinct_sessions() {
1764 let now = Utc::now();
1765
1766 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
1767 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
1768 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1769 DbOperation::Upsert(metadata1.clone()),
1770 DbOperation::Upsert(metadata2.clone()),
1771 ]);
1772
1773 assert_eq!(deduped.len(), 2);
1774 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
1775 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
1776 }
1777
1778 #[gpui::test]
1779 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
1780 init_test(cx);
1781
1782 let paths = PathList::new(&[Path::new("/project-a")]);
1783 let now = Utc::now();
1784 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
1785
1786 cx.update(|cx| {
1787 let store = ThreadMetadataStore::global(cx);
1788 store.update(cx, |store, cx| {
1789 store.save(metadata, cx);
1790 });
1791 });
1792
1793 cx.run_until_parked();
1794
1795 cx.update(|cx| {
1796 let store = ThreadMetadataStore::global(cx);
1797 let store = store.read(cx);
1798
1799 let path_entries = store
1800 .entries_for_path(&paths)
1801 .map(|e| e.session_id.0.to_string())
1802 .collect::<Vec<_>>();
1803 assert_eq!(path_entries, vec!["session-1"]);
1804
1805 let archived = store
1806 .archived_entries()
1807 .map(|e| e.session_id.0.to_string())
1808 .collect::<Vec<_>>();
1809 assert!(archived.is_empty());
1810 });
1811
1812 cx.update(|cx| {
1813 let store = ThreadMetadataStore::global(cx);
1814 store.update(cx, |store, cx| {
1815 store.archive(&acp::SessionId::new("session-1"), cx);
1816 });
1817 });
1818
1819 cx.run_until_parked();
1820
1821 cx.update(|cx| {
1822 let store = ThreadMetadataStore::global(cx);
1823 let store = store.read(cx);
1824
1825 let path_entries = store
1826 .entries_for_path(&paths)
1827 .map(|e| e.session_id.0.to_string())
1828 .collect::<Vec<_>>();
1829 assert!(path_entries.is_empty());
1830
1831 let archived = store.archived_entries().collect::<Vec<_>>();
1832 assert_eq!(archived.len(), 1);
1833 assert_eq!(archived[0].session_id.0.as_ref(), "session-1");
1834 assert!(archived[0].archived);
1835 });
1836
1837 cx.update(|cx| {
1838 let store = ThreadMetadataStore::global(cx);
1839 store.update(cx, |store, cx| {
1840 store.unarchive(&acp::SessionId::new("session-1"), cx);
1841 });
1842 });
1843
1844 cx.run_until_parked();
1845
1846 cx.update(|cx| {
1847 let store = ThreadMetadataStore::global(cx);
1848 let store = store.read(cx);
1849
1850 let path_entries = store
1851 .entries_for_path(&paths)
1852 .map(|e| e.session_id.0.to_string())
1853 .collect::<Vec<_>>();
1854 assert_eq!(path_entries, vec!["session-1"]);
1855
1856 let archived = store
1857 .archived_entries()
1858 .map(|e| e.session_id.0.to_string())
1859 .collect::<Vec<_>>();
1860 assert!(archived.is_empty());
1861 });
1862 }
1863
1864 #[gpui::test]
1865 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
1866 init_test(cx);
1867
1868 let paths = PathList::new(&[Path::new("/project-a")]);
1869 let now = Utc::now();
1870
1871 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
1872 let metadata2 = make_metadata(
1873 "session-2",
1874 "Archived Thread",
1875 now - chrono::Duration::seconds(1),
1876 paths.clone(),
1877 );
1878
1879 cx.update(|cx| {
1880 let store = ThreadMetadataStore::global(cx);
1881 store.update(cx, |store, cx| {
1882 store.save(metadata1, cx);
1883 store.save(metadata2, cx);
1884 });
1885 });
1886
1887 cx.run_until_parked();
1888
1889 cx.update(|cx| {
1890 let store = ThreadMetadataStore::global(cx);
1891 store.update(cx, |store, cx| {
1892 store.archive(&acp::SessionId::new("session-2"), cx);
1893 });
1894 });
1895
1896 cx.run_until_parked();
1897
1898 cx.update(|cx| {
1899 let store = ThreadMetadataStore::global(cx);
1900 let store = store.read(cx);
1901
1902 let path_entries = store
1903 .entries_for_path(&paths)
1904 .map(|e| e.session_id.0.to_string())
1905 .collect::<Vec<_>>();
1906 assert_eq!(path_entries, vec!["session-1"]);
1907
1908 let all_entries = store
1909 .entries()
1910 .map(|e| e.session_id.0.to_string())
1911 .collect::<Vec<_>>();
1912 assert_eq!(all_entries.len(), 2);
1913 assert!(all_entries.contains(&"session-1".to_string()));
1914 assert!(all_entries.contains(&"session-2".to_string()));
1915
1916 let archived = store
1917 .archived_entries()
1918 .map(|e| e.session_id.0.to_string())
1919 .collect::<Vec<_>>();
1920 assert_eq!(archived, vec!["session-2"]);
1921 });
1922 }
1923
1924 #[gpui::test]
1925 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
1926 init_test(cx);
1927
1928 let paths = PathList::new(&[Path::new("/project-a")]);
1929 let now = Utc::now();
1930
1931 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
1932 let m2 = make_metadata(
1933 "session-2",
1934 "Thread Two",
1935 now - chrono::Duration::seconds(1),
1936 paths.clone(),
1937 );
1938 let m3 = make_metadata(
1939 "session-3",
1940 "Thread Three",
1941 now - chrono::Duration::seconds(2),
1942 paths,
1943 );
1944
1945 cx.update(|cx| {
1946 let store = ThreadMetadataStore::global(cx);
1947 store.update(cx, |store, cx| {
1948 store.save_all(vec![m1, m2, m3], cx);
1949 });
1950 });
1951
1952 cx.run_until_parked();
1953
1954 cx.update(|cx| {
1955 let store = ThreadMetadataStore::global(cx);
1956 let store = store.read(cx);
1957
1958 let all_entries = store
1959 .entries()
1960 .map(|e| e.session_id.0.to_string())
1961 .collect::<Vec<_>>();
1962 assert_eq!(all_entries.len(), 3);
1963 assert!(all_entries.contains(&"session-1".to_string()));
1964 assert!(all_entries.contains(&"session-2".to_string()));
1965 assert!(all_entries.contains(&"session-3".to_string()));
1966
1967 let entry_ids = store.entry_ids().collect::<Vec<_>>();
1968 assert_eq!(entry_ids.len(), 3);
1969 });
1970 }
1971
1972 #[gpui::test]
1973 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
1974 init_test(cx);
1975
1976 let paths = PathList::new(&[Path::new("/project-a")]);
1977 let now = Utc::now();
1978 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
1979
1980 cx.update(|cx| {
1981 let store = ThreadMetadataStore::global(cx);
1982 store.update(cx, |store, cx| {
1983 store.save(metadata, cx);
1984 });
1985 });
1986
1987 cx.run_until_parked();
1988
1989 cx.update(|cx| {
1990 let store = ThreadMetadataStore::global(cx);
1991 store.update(cx, |store, cx| {
1992 store.archive(&acp::SessionId::new("session-1"), cx);
1993 });
1994 });
1995
1996 cx.run_until_parked();
1997
1998 cx.update(|cx| {
1999 let store = ThreadMetadataStore::global(cx);
2000 store.update(cx, |store, cx| {
2001 let _ = store.reload(cx);
2002 });
2003 });
2004
2005 cx.run_until_parked();
2006
2007 cx.update(|cx| {
2008 let store = ThreadMetadataStore::global(cx);
2009 let store = store.read(cx);
2010
2011 let thread = store
2012 .entries()
2013 .find(|e| e.session_id.0.as_ref() == "session-1")
2014 .expect("thread should exist after reload");
2015 assert!(thread.archived);
2016
2017 let path_entries = store
2018 .entries_for_path(&paths)
2019 .map(|e| e.session_id.0.to_string())
2020 .collect::<Vec<_>>();
2021 assert!(path_entries.is_empty());
2022
2023 let archived = store
2024 .archived_entries()
2025 .map(|e| e.session_id.0.to_string())
2026 .collect::<Vec<_>>();
2027 assert_eq!(archived, vec!["session-1"]);
2028 });
2029 }
2030
2031 #[gpui::test]
2032 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2033 init_test(cx);
2034
2035 cx.run_until_parked();
2036
2037 cx.update(|cx| {
2038 let store = ThreadMetadataStore::global(cx);
2039 store.update(cx, |store, cx| {
2040 store.archive(&acp::SessionId::new("nonexistent"), cx);
2041 });
2042 });
2043
2044 cx.run_until_parked();
2045
2046 cx.update(|cx| {
2047 let store = ThreadMetadataStore::global(cx);
2048 let store = store.read(cx);
2049
2050 assert!(store.is_empty());
2051 assert_eq!(store.entries().count(), 0);
2052 assert_eq!(store.archived_entries().count(), 0);
2053 });
2054 }
2055
2056 #[gpui::test]
2057 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2058 init_test(cx);
2059
2060 let paths = PathList::new(&[Path::new("/project-a")]);
2061 let now = Utc::now();
2062 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2063 let session_id = metadata.session_id.clone();
2064
2065 cx.update(|cx| {
2066 let store = ThreadMetadataStore::global(cx);
2067 store.update(cx, |store, cx| {
2068 store.save(metadata.clone(), cx);
2069 store.archive(&session_id, cx);
2070 });
2071 });
2072
2073 cx.run_until_parked();
2074
2075 cx.update(|cx| {
2076 let store = ThreadMetadataStore::global(cx);
2077 let store = store.read(cx);
2078
2079 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2080 pretty_assertions::assert_eq!(
2081 entries,
2082 vec![ThreadMetadata {
2083 archived: true,
2084 ..metadata
2085 }]
2086 );
2087 });
2088 }
2089
2090 #[gpui::test]
2091 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2092 init_test(cx);
2093 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2094
2095 let id = store
2096 .read_with(cx, |store, cx| {
2097 store.create_archived_worktree(
2098 "/tmp/worktree".to_string(),
2099 "/home/user/repo".to_string(),
2100 Some("feature-branch".to_string()),
2101 "staged_aaa".to_string(),
2102 "unstaged_bbb".to_string(),
2103 "original_000".to_string(),
2104 cx,
2105 )
2106 })
2107 .await
2108 .unwrap();
2109
2110 store
2111 .read_with(cx, |store, cx| {
2112 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2113 })
2114 .await
2115 .unwrap();
2116
2117 let worktrees = store
2118 .read_with(cx, |store, cx| {
2119 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2120 })
2121 .await
2122 .unwrap();
2123
2124 assert_eq!(worktrees.len(), 1);
2125 let wt = &worktrees[0];
2126 assert_eq!(wt.id, id);
2127 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2128 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2129 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2130 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2131 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2132 assert_eq!(wt.original_commit_hash, "original_000");
2133 }
2134
2135 #[gpui::test]
2136 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2137 init_test(cx);
2138 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2139
2140 let id = store
2141 .read_with(cx, |store, cx| {
2142 store.create_archived_worktree(
2143 "/tmp/worktree".to_string(),
2144 "/home/user/repo".to_string(),
2145 Some("main".to_string()),
2146 "deadbeef".to_string(),
2147 "deadbeef".to_string(),
2148 "original_000".to_string(),
2149 cx,
2150 )
2151 })
2152 .await
2153 .unwrap();
2154
2155 store
2156 .read_with(cx, |store, cx| {
2157 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2158 })
2159 .await
2160 .unwrap();
2161
2162 store
2163 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2164 .await
2165 .unwrap();
2166
2167 let worktrees = store
2168 .read_with(cx, |store, cx| {
2169 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2170 })
2171 .await
2172 .unwrap();
2173 assert!(worktrees.is_empty());
2174 }
2175
2176 #[gpui::test]
2177 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2178 init_test(cx);
2179 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2180
2181 let id = store
2182 .read_with(cx, |store, cx| {
2183 store.create_archived_worktree(
2184 "/tmp/worktree".to_string(),
2185 "/home/user/repo".to_string(),
2186 None,
2187 "abc123".to_string(),
2188 "abc123".to_string(),
2189 "original_000".to_string(),
2190 cx,
2191 )
2192 })
2193 .await
2194 .unwrap();
2195
2196 store
2197 .read_with(cx, |store, cx| {
2198 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2199 })
2200 .await
2201 .unwrap();
2202
2203 store
2204 .read_with(cx, |store, cx| {
2205 store.link_thread_to_archived_worktree("session-2".to_string(), id, cx)
2206 })
2207 .await
2208 .unwrap();
2209
2210 let wt1 = store
2211 .read_with(cx, |store, cx| {
2212 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2213 })
2214 .await
2215 .unwrap();
2216
2217 let wt2 = store
2218 .read_with(cx, |store, cx| {
2219 store.get_archived_worktrees_for_thread("session-2".to_string(), cx)
2220 })
2221 .await
2222 .unwrap();
2223
2224 assert_eq!(wt1.len(), 1);
2225 assert_eq!(wt2.len(), 1);
2226 assert_eq!(wt1[0].id, wt2[0].id);
2227 }
2228
2229 #[gpui::test]
2230 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
2231 init_test(cx);
2232 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2233
2234 let id1 = store
2235 .read_with(cx, |store, cx| {
2236 store.create_archived_worktree(
2237 "/projects/worktree-a".to_string(),
2238 "/home/user/repo".to_string(),
2239 Some("branch-a".to_string()),
2240 "staged_a".to_string(),
2241 "unstaged_a".to_string(),
2242 "original_000".to_string(),
2243 cx,
2244 )
2245 })
2246 .await
2247 .unwrap();
2248
2249 let id2 = store
2250 .read_with(cx, |store, cx| {
2251 store.create_archived_worktree(
2252 "/projects/worktree-b".to_string(),
2253 "/home/user/repo".to_string(),
2254 Some("branch-b".to_string()),
2255 "staged_b".to_string(),
2256 "unstaged_b".to_string(),
2257 "original_000".to_string(),
2258 cx,
2259 )
2260 })
2261 .await
2262 .unwrap();
2263
2264 store
2265 .read_with(cx, |store, cx| {
2266 store.link_thread_to_archived_worktree("session-1".to_string(), id1, cx)
2267 })
2268 .await
2269 .unwrap();
2270
2271 store
2272 .read_with(cx, |store, cx| {
2273 store.link_thread_to_archived_worktree("session-1".to_string(), id2, cx)
2274 })
2275 .await
2276 .unwrap();
2277
2278 let worktrees = store
2279 .read_with(cx, |store, cx| {
2280 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2281 })
2282 .await
2283 .unwrap();
2284
2285 assert_eq!(worktrees.len(), 2);
2286
2287 let paths: Vec<&Path> = worktrees
2288 .iter()
2289 .map(|w| w.worktree_path.as_path())
2290 .collect();
2291 assert!(paths.contains(&Path::new("/projects/worktree-a")));
2292 assert!(paths.contains(&Path::new("/projects/worktree-b")));
2293 }
2294}