1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use acp_thread::AcpThreadEvent;
7use agent::{ThreadStore, ZED_AGENT_ID};
8use agent_client_protocol as acp;
9use anyhow::Context as _;
10use chrono::{DateTime, Utc};
11use collections::{HashMap, HashSet};
12use db::{
13 kvp::KeyValueStore,
14 sqlez::{
15 bindable::Column, domain::Domain, statement::Statement,
16 thread_safe_connection::ThreadSafeConnection,
17 },
18 sqlez_macros::sql,
19};
20use fs::Fs;
21use futures::{FutureExt, future::Shared};
22use gpui::{AppContext as _, Entity, Global, Subscription, Task};
23use project::AgentId;
24use remote::RemoteConnectionOptions;
25use ui::{App, Context, SharedString};
26use util::ResultExt as _;
27use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
28
29use crate::DEFAULT_THREAD_TITLE;
30
31const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill";
32
33pub fn init(cx: &mut App) {
34 ThreadMetadataStore::init_global(cx);
35 let migration_task = migrate_thread_metadata(cx);
36 migrate_thread_remote_connections(cx, migration_task);
37}
38
39/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
40/// We skip migrating threads that do not have a project.
41///
42/// TODO: Remove this after N weeks of shipping the sidebar
43fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
44 let store = ThreadMetadataStore::global(cx);
45 let db = store.read(cx).db.clone();
46
47 cx.spawn(async move |cx| {
48 let existing_entries = db.list_ids()?.into_iter().collect::<HashSet<_>>();
49
50 let is_first_migration = existing_entries.is_empty();
51
52 let mut to_migrate = store.read_with(cx, |_store, cx| {
53 ThreadStore::global(cx)
54 .read(cx)
55 .entries()
56 .filter_map(|entry| {
57 if existing_entries.contains(&entry.id.0) {
58 return None;
59 }
60
61 Some(ThreadMetadata {
62 session_id: entry.id,
63 agent_id: ZED_AGENT_ID.clone(),
64 title: entry.title,
65 updated_at: entry.updated_at,
66 created_at: entry.created_at,
67 worktree_paths: ThreadWorktreePaths::from_folder_paths(&entry.folder_paths),
68 remote_connection: None,
69 archived: true,
70 })
71 })
72 .collect::<Vec<_>>()
73 });
74
75 if to_migrate.is_empty() {
76 return anyhow::Ok(());
77 }
78
79 // On the first migration (no entries in DB yet), keep the 5 most
80 // recent threads per project unarchived.
81 if is_first_migration {
82 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
83 for entry in &mut to_migrate {
84 if entry.worktree_paths.is_empty() {
85 continue;
86 }
87 per_project
88 .entry(entry.worktree_paths.folder_path_list().clone())
89 .or_default()
90 .push(entry);
91 }
92 for entries in per_project.values_mut() {
93 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
94 for entry in entries.iter_mut().take(5) {
95 entry.archived = false;
96 }
97 }
98 }
99
100 log::info!("Migrating {} thread store entries", to_migrate.len());
101
102 // Manually save each entry to the database and call reload, otherwise
103 // we'll end up triggering lots of reloads after each save
104 for entry in to_migrate {
105 db.save(entry).await?;
106 }
107
108 log::info!("Finished migrating thread store entries");
109
110 let _ = store.update(cx, |store, cx| store.reload(cx));
111 anyhow::Ok(())
112 })
113}
114
115fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task<anyhow::Result<()>>) {
116 let store = ThreadMetadataStore::global(cx);
117 let db = store.read(cx).db.clone();
118 let kvp = KeyValueStore::global(cx);
119 let workspace_db = WorkspaceDb::global(cx);
120 let fs = <dyn Fs>::global(cx);
121
122 cx.spawn(async move |cx| -> anyhow::Result<()> {
123 migration_task.await?;
124
125 if kvp
126 .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)?
127 .is_some()
128 {
129 return Ok(());
130 }
131
132 let recent_workspaces = workspace_db.recent_workspaces_on_disk(fs.as_ref()).await?;
133
134 let mut local_path_lists = HashSet::<PathList>::default();
135 let mut remote_path_lists = HashMap::<PathList, RemoteConnectionOptions>::default();
136
137 recent_workspaces
138 .iter()
139 .filter(|(_, location, path_list, _)| {
140 !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local)
141 })
142 .for_each(|(_, _, path_list, _)| {
143 local_path_lists.insert(path_list.clone());
144 });
145
146 for (_, location, path_list, _) in recent_workspaces {
147 match location {
148 SerializedWorkspaceLocation::Remote(remote_connection)
149 if !local_path_lists.contains(&path_list) =>
150 {
151 remote_path_lists
152 .entry(path_list)
153 .or_insert(remote_connection);
154 }
155 _ => {}
156 }
157 }
158
159 let mut reloaded = false;
160 for metadata in db.list()? {
161 if metadata.remote_connection.is_some() {
162 continue;
163 }
164
165 if let Some(remote_connection) = remote_path_lists
166 .get(metadata.folder_paths())
167 .or_else(|| remote_path_lists.get(metadata.main_worktree_paths()))
168 {
169 db.save(ThreadMetadata {
170 remote_connection: Some(remote_connection.clone()),
171 ..metadata
172 })
173 .await?;
174 reloaded = true;
175 }
176 }
177
178 let reloaded_task = reloaded
179 .then_some(store.update(cx, |store, cx| store.reload(cx)))
180 .unwrap_or(Task::ready(()).shared());
181
182 kvp.write_kvp(
183 THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(),
184 "1".to_string(),
185 )
186 .await?;
187 reloaded_task.await;
188
189 Ok(())
190 })
191 .detach_and_log_err(cx);
192}
193
194struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
195impl Global for GlobalThreadMetadataStore {}
196
197/// Paired worktree paths for a thread. Each folder path has a corresponding
198/// main worktree path at the same position. The two lists are always the
199/// same length and are modified together via `add_path` / `remove_main_path`.
200///
201/// For non-linked worktrees, the main path and folder path are identical.
202/// For linked worktrees, the main path is the original repo and the folder
203/// path is the linked worktree location.
204///
205/// Internally stores two `PathList`s with matching insertion order so that
206/// `ordered_paths()` on both yields positionally-paired results.
207#[derive(Default, Debug, Clone)]
208pub struct ThreadWorktreePaths {
209 folder_paths: PathList,
210 main_worktree_paths: PathList,
211}
212
213impl PartialEq for ThreadWorktreePaths {
214 fn eq(&self, other: &Self) -> bool {
215 self.folder_paths == other.folder_paths
216 && self.main_worktree_paths == other.main_worktree_paths
217 }
218}
219
220impl ThreadWorktreePaths {
221 /// Build from a project's current state. Each visible worktree is paired
222 /// with its main repo path (resolved via git), falling back to the
223 /// worktree's own path if no git repo is found.
224 pub fn from_project(project: &project::Project, cx: &App) -> Self {
225 let (mains, folders): (Vec<PathBuf>, Vec<PathBuf>) = project
226 .visible_worktrees(cx)
227 .map(|worktree| {
228 let snapshot = worktree.read(cx).snapshot();
229 let folder_path = snapshot.abs_path().to_path_buf();
230 let main_path = snapshot
231 .root_repo_common_dir()
232 .and_then(|dir| Some(dir.parent()?.to_path_buf()))
233 .unwrap_or_else(|| folder_path.clone());
234 (main_path, folder_path)
235 })
236 .unzip();
237 Self {
238 folder_paths: PathList::new(&folders),
239 main_worktree_paths: PathList::new(&mains),
240 }
241 }
242
243 /// Build from two parallel `PathList`s that already share the same
244 /// insertion order. Used for deserialization from DB.
245 ///
246 /// Returns an error if the two lists have different lengths, which
247 /// indicates corrupted data from a prior migration bug.
248 pub fn from_path_lists(
249 main_worktree_paths: PathList,
250 folder_paths: PathList,
251 ) -> anyhow::Result<Self> {
252 anyhow::ensure!(
253 main_worktree_paths.paths().len() == folder_paths.paths().len(),
254 "main_worktree_paths has {} entries but folder_paths has {}",
255 main_worktree_paths.paths().len(),
256 folder_paths.paths().len(),
257 );
258 Ok(Self {
259 folder_paths,
260 main_worktree_paths,
261 })
262 }
263
264 /// Build for non-linked worktrees where main == folder for every path.
265 pub fn from_folder_paths(folder_paths: &PathList) -> Self {
266 Self {
267 folder_paths: folder_paths.clone(),
268 main_worktree_paths: folder_paths.clone(),
269 }
270 }
271
272 pub fn is_empty(&self) -> bool {
273 self.folder_paths.is_empty()
274 }
275
276 /// The folder paths (for workspace matching / `threads_by_paths` index).
277 pub fn folder_path_list(&self) -> &PathList {
278 &self.folder_paths
279 }
280
281 /// The main worktree paths (for group key / `threads_by_main_paths` index).
282 pub fn main_worktree_path_list(&self) -> &PathList {
283 &self.main_worktree_paths
284 }
285
286 /// Iterate the (main_worktree_path, folder_path) pairs in insertion order.
287 pub fn ordered_pairs(&self) -> impl Iterator<Item = (&PathBuf, &PathBuf)> {
288 self.main_worktree_paths
289 .ordered_paths()
290 .zip(self.folder_paths.ordered_paths())
291 }
292
293 /// Add a new path pair. If the exact (main, folder) pair already exists,
294 /// this is a no-op. Rebuilds both internal `PathList`s to maintain
295 /// consistent ordering.
296 pub fn add_path(&mut self, main_path: &Path, folder_path: &Path) {
297 let already_exists = self
298 .ordered_pairs()
299 .any(|(m, f)| m.as_path() == main_path && f.as_path() == folder_path);
300 if already_exists {
301 return;
302 }
303 let (mut mains, mut folders): (Vec<PathBuf>, Vec<PathBuf>) = self
304 .ordered_pairs()
305 .map(|(m, f)| (m.clone(), f.clone()))
306 .unzip();
307 mains.push(main_path.to_path_buf());
308 folders.push(folder_path.to_path_buf());
309 self.main_worktree_paths = PathList::new(&mains);
310 self.folder_paths = PathList::new(&folders);
311 }
312
313 /// Remove all pairs whose main worktree path matches the given path.
314 /// This removes the corresponding entries from both lists.
315 pub fn remove_main_path(&mut self, main_path: &Path) {
316 let (mains, folders): (Vec<PathBuf>, Vec<PathBuf>) = self
317 .ordered_pairs()
318 .filter(|(m, _)| m.as_path() != main_path)
319 .map(|(m, f)| (m.clone(), f.clone()))
320 .unzip();
321 self.main_worktree_paths = PathList::new(&mains);
322 self.folder_paths = PathList::new(&folders);
323 }
324}
325
326/// Lightweight metadata for any thread (native or ACP), enough to populate
327/// the sidebar list and route to the correct load path when clicked.
328#[derive(Debug, Clone, PartialEq)]
329pub struct ThreadMetadata {
330 pub session_id: acp::SessionId,
331 pub agent_id: AgentId,
332 pub title: SharedString,
333 pub updated_at: DateTime<Utc>,
334 pub created_at: Option<DateTime<Utc>>,
335 pub worktree_paths: ThreadWorktreePaths,
336 pub remote_connection: Option<RemoteConnectionOptions>,
337 pub archived: bool,
338}
339
340impl ThreadMetadata {
341 pub fn folder_paths(&self) -> &PathList {
342 self.worktree_paths.folder_path_list()
343 }
344 pub fn main_worktree_paths(&self) -> &PathList {
345 self.worktree_paths.main_worktree_path_list()
346 }
347}
348
349impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
350 fn from(meta: &ThreadMetadata) -> Self {
351 Self {
352 session_id: meta.session_id.clone(),
353 work_dirs: Some(meta.folder_paths().clone()),
354 title: Some(meta.title.clone()),
355 updated_at: Some(meta.updated_at),
356 created_at: meta.created_at,
357 meta: None,
358 }
359 }
360}
361
362/// Record of a git worktree that was archived (deleted from disk) when its
363/// last thread was archived.
364pub struct ArchivedGitWorktree {
365 /// Auto-incrementing primary key.
366 pub id: i64,
367 /// Absolute path to the directory of the worktree before it was deleted.
368 /// Used when restoring, to put the recreated worktree back where it was.
369 /// If the path already exists on disk, the worktree is assumed to be
370 /// already restored and is used as-is.
371 pub worktree_path: PathBuf,
372 /// Absolute path of the main repository ("main worktree") that owned this worktree.
373 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
374 /// If the main repo isn't found on disk, unarchiving fails because we only store
375 /// commit hashes, and without the actual git repo being available, we can't restore
376 /// the files.
377 pub main_repo_path: PathBuf,
378 /// Branch that was checked out in the worktree at archive time. `None` if
379 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
380 /// could happen if the user made a detached one outside of Zed.
381 /// On restore, we try to switch to this branch. If that fails (e.g. it's
382 /// checked out elsewhere), we auto-generate a new one.
383 pub branch_name: Option<String>,
384 /// SHA of the WIP commit that captures files that were staged (but not yet
385 /// committed) at the time of archiving. This commit can be empty if the
386 /// user had no staged files at the time. It sits directly on top of whatever
387 /// the user's last actual commit was.
388 pub staged_commit_hash: String,
389 /// SHA of the WIP commit that captures files that were unstaged (including
390 /// untracked) at the time of archiving. This commit can be empty if the user
391 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
392 /// After doing `git reset` past both of these commits, we're back in the state
393 /// we had before archiving, including what was staged, what was unstaged, and
394 /// what was committed.
395 pub unstaged_commit_hash: String,
396 /// SHA of the commit that HEAD pointed at before we created the two WIP
397 /// commits during archival. After resetting past the WIP commits during
398 /// restore, HEAD should land back on this commit. It also serves as a
399 /// pre-restore sanity check (abort if this commit no longer exists in the
400 /// repo) and as a fallback target if the WIP resets fail.
401 pub original_commit_hash: String,
402}
403
404/// The store holds all metadata needed to show threads in the sidebar/the archive.
405///
406/// Automatically listens to AcpThread events and updates metadata if it has changed.
407pub struct ThreadMetadataStore {
408 db: ThreadMetadataDb,
409 threads: HashMap<acp::SessionId, ThreadMetadata>,
410 threads_by_paths: HashMap<PathList, HashSet<acp::SessionId>>,
411 threads_by_main_paths: HashMap<PathList, HashSet<acp::SessionId>>,
412 reload_task: Option<Shared<Task<()>>>,
413 session_subscriptions: HashMap<acp::SessionId, Subscription>,
414 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
415 in_flight_archives: HashMap<acp::SessionId, (Task<()>, smol::channel::Sender<()>)>,
416 _db_operations_task: Task<()>,
417}
418
419#[derive(Debug, PartialEq)]
420enum DbOperation {
421 Upsert(ThreadMetadata),
422 Delete(acp::SessionId),
423}
424
425impl DbOperation {
426 fn id(&self) -> &acp::SessionId {
427 match self {
428 DbOperation::Upsert(thread) => &thread.session_id,
429 DbOperation::Delete(session_id) => session_id,
430 }
431 }
432}
433
434impl ThreadMetadataStore {
435 #[cfg(not(any(test, feature = "test-support")))]
436 pub fn init_global(cx: &mut App) {
437 if cx.has_global::<Self>() {
438 return;
439 }
440
441 let db = ThreadMetadataDb::global(cx);
442 let thread_store = cx.new(|cx| Self::new(db, cx));
443 cx.set_global(GlobalThreadMetadataStore(thread_store));
444 }
445
446 #[cfg(any(test, feature = "test-support"))]
447 pub fn init_global(cx: &mut App) {
448 let thread = std::thread::current();
449 let test_name = thread.name().unwrap_or("unknown_test");
450 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
451 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
452 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
453 cx.set_global(GlobalThreadMetadataStore(thread_store));
454 }
455
456 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
457 cx.try_global::<GlobalThreadMetadataStore>()
458 .map(|store| store.0.clone())
459 }
460
461 pub fn global(cx: &App) -> Entity<Self> {
462 cx.global::<GlobalThreadMetadataStore>().0.clone()
463 }
464
465 pub fn is_empty(&self) -> bool {
466 self.threads.is_empty()
467 }
468
469 /// Returns all thread IDs.
470 pub fn entry_ids(&self) -> impl Iterator<Item = acp::SessionId> + '_ {
471 self.threads.keys().cloned()
472 }
473
474 /// Returns the metadata for a specific thread, if it exists.
475 pub fn entry(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
476 self.threads.get(session_id)
477 }
478
479 /// Returns all threads.
480 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
481 self.threads.values()
482 }
483
484 /// Returns all archived threads.
485 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
486 self.entries().filter(|t| t.archived)
487 }
488
489 /// Returns all threads for the given path list, excluding archived threads.
490 pub fn entries_for_path(
491 &self,
492 path_list: &PathList,
493 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
494 self.threads_by_paths
495 .get(path_list)
496 .into_iter()
497 .flatten()
498 .filter_map(|s| self.threads.get(s))
499 .filter(|s| !s.archived)
500 }
501
502 /// Returns threads whose `main_worktree_paths` matches the given path list,
503 /// excluding archived threads. This finds threads that were opened in a
504 /// linked worktree but are associated with the given main worktree.
505 pub fn entries_for_main_worktree_path(
506 &self,
507 path_list: &PathList,
508 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
509 self.threads_by_main_paths
510 .get(path_list)
511 .into_iter()
512 .flatten()
513 .filter_map(|s| self.threads.get(s))
514 .filter(|s| !s.archived)
515 }
516
517 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
518 let db = self.db.clone();
519 self.reload_task.take();
520
521 let list_task = cx
522 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
523
524 let reload_task = cx
525 .spawn(async move |this, cx| {
526 let Some(rows) = list_task.await.log_err() else {
527 return;
528 };
529
530 this.update(cx, |this, cx| {
531 this.threads.clear();
532 this.threads_by_paths.clear();
533 this.threads_by_main_paths.clear();
534
535 for row in rows {
536 this.threads_by_paths
537 .entry(row.folder_paths().clone())
538 .or_default()
539 .insert(row.session_id.clone());
540 if !row.main_worktree_paths().is_empty() {
541 this.threads_by_main_paths
542 .entry(row.main_worktree_paths().clone())
543 .or_default()
544 .insert(row.session_id.clone());
545 }
546 this.threads.insert(row.session_id.clone(), row);
547 }
548
549 cx.notify();
550 })
551 .ok();
552 })
553 .shared();
554 self.reload_task = Some(reload_task.clone());
555 reload_task
556 }
557
558 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
559 for metadata in metadata {
560 self.save_internal(metadata);
561 }
562 cx.notify();
563 }
564
565 #[cfg(any(test, feature = "test-support"))]
566 pub fn save_manually(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
567 self.save(metadata, cx)
568 }
569
570 fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
571 self.save_internal(metadata);
572 cx.notify();
573 }
574
575 fn save_internal(&mut self, metadata: ThreadMetadata) {
576 if let Some(thread) = self.threads.get(&metadata.session_id) {
577 if thread.folder_paths() != metadata.folder_paths() {
578 if let Some(session_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
579 session_ids.remove(&metadata.session_id);
580 }
581 }
582 if thread.main_worktree_paths() != metadata.main_worktree_paths()
583 && !thread.main_worktree_paths().is_empty()
584 {
585 if let Some(session_ids) = self
586 .threads_by_main_paths
587 .get_mut(thread.main_worktree_paths())
588 {
589 session_ids.remove(&metadata.session_id);
590 }
591 }
592 }
593
594 self.threads
595 .insert(metadata.session_id.clone(), metadata.clone());
596
597 self.threads_by_paths
598 .entry(metadata.folder_paths().clone())
599 .or_default()
600 .insert(metadata.session_id.clone());
601
602 if !metadata.main_worktree_paths().is_empty() {
603 self.threads_by_main_paths
604 .entry(metadata.main_worktree_paths().clone())
605 .or_default()
606 .insert(metadata.session_id.clone());
607 }
608
609 self.pending_thread_ops_tx
610 .try_send(DbOperation::Upsert(metadata))
611 .log_err();
612 }
613
614 pub fn update_working_directories(
615 &mut self,
616 session_id: &acp::SessionId,
617 work_dirs: PathList,
618 cx: &mut Context<Self>,
619 ) {
620 if let Some(thread) = self.threads.get(session_id) {
621 self.save_internal(ThreadMetadata {
622 worktree_paths: ThreadWorktreePaths::from_path_lists(
623 thread.main_worktree_paths().clone(),
624 work_dirs.clone(),
625 )
626 .unwrap_or_else(|_| ThreadWorktreePaths::from_folder_paths(&work_dirs)),
627 ..thread.clone()
628 });
629 cx.notify();
630 }
631 }
632
633 pub fn archive(
634 &mut self,
635 session_id: &acp::SessionId,
636 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
637 cx: &mut Context<Self>,
638 ) {
639 self.update_archived(session_id, true, cx);
640
641 if let Some(job) = archive_job {
642 self.in_flight_archives.insert(session_id.clone(), job);
643 }
644 }
645
646 pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
647 self.update_archived(session_id, false, cx);
648 // Dropping the Sender triggers cancellation in the background task.
649 self.in_flight_archives.remove(session_id);
650 }
651
652 pub fn cleanup_completed_archive(&mut self, session_id: &acp::SessionId) {
653 self.in_flight_archives.remove(session_id);
654 }
655
656 /// Updates a thread's `folder_paths` after an archived worktree has been
657 /// restored to disk. The restored worktree may land at a different path
658 /// than it had before archival, so each `(old_path, new_path)` pair in
659 /// `path_replacements` is applied to the thread's stored folder paths.
660 pub fn update_restored_worktree_paths(
661 &mut self,
662 session_id: &acp::SessionId,
663 path_replacements: &[(PathBuf, PathBuf)],
664 cx: &mut Context<Self>,
665 ) {
666 if let Some(thread) = self.threads.get(session_id).cloned() {
667 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
668 for (old_path, new_path) in path_replacements {
669 if let Some(pos) = paths.iter().position(|p| p == old_path) {
670 paths[pos] = new_path.clone();
671 }
672 }
673 let new_folder_paths = PathList::new(&paths);
674 self.save_internal(ThreadMetadata {
675 worktree_paths: ThreadWorktreePaths::from_path_lists(
676 thread.main_worktree_paths().clone(),
677 new_folder_paths.clone(),
678 )
679 .unwrap_or_else(|_| ThreadWorktreePaths::from_folder_paths(&new_folder_paths)),
680 ..thread
681 });
682 cx.notify();
683 }
684 }
685
686 pub fn complete_worktree_restore(
687 &mut self,
688 session_id: &acp::SessionId,
689 path_replacements: &[(PathBuf, PathBuf)],
690 cx: &mut Context<Self>,
691 ) {
692 if let Some(thread) = self.threads.get(session_id).cloned() {
693 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
694 for (old_path, new_path) in path_replacements {
695 for path in &mut paths {
696 if path == old_path {
697 *path = new_path.clone();
698 }
699 }
700 }
701 let new_folder_paths = PathList::new(&paths);
702 self.save_internal(ThreadMetadata {
703 worktree_paths: ThreadWorktreePaths::from_path_lists(
704 thread.main_worktree_paths().clone(),
705 new_folder_paths.clone(),
706 )
707 .unwrap_or_else(|_| ThreadWorktreePaths::from_folder_paths(&new_folder_paths)),
708 ..thread
709 });
710 cx.notify();
711 }
712 }
713
714 /// Apply a mutation to the worktree paths of all threads whose current
715 /// `main_worktree_paths` matches `current_main_paths`, then re-index.
716 pub fn change_worktree_paths(
717 &mut self,
718 current_main_paths: &PathList,
719 mutate: impl Fn(&mut ThreadWorktreePaths),
720 cx: &mut Context<Self>,
721 ) {
722 let session_ids: Vec<_> = self
723 .threads_by_main_paths
724 .get(current_main_paths)
725 .into_iter()
726 .flatten()
727 .cloned()
728 .collect();
729
730 if session_ids.is_empty() {
731 return;
732 }
733
734 for session_id in &session_ids {
735 if let Some(thread) = self.threads.get_mut(session_id) {
736 if let Some(ids) = self
737 .threads_by_main_paths
738 .get_mut(thread.main_worktree_paths())
739 {
740 ids.remove(session_id);
741 }
742 if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
743 ids.remove(session_id);
744 }
745
746 mutate(&mut thread.worktree_paths);
747
748 self.threads_by_main_paths
749 .entry(thread.main_worktree_paths().clone())
750 .or_default()
751 .insert(session_id.clone());
752 self.threads_by_paths
753 .entry(thread.folder_paths().clone())
754 .or_default()
755 .insert(session_id.clone());
756
757 self.pending_thread_ops_tx
758 .try_send(DbOperation::Upsert(thread.clone()))
759 .log_err();
760 }
761 }
762
763 cx.notify();
764 }
765
766 pub fn create_archived_worktree(
767 &self,
768 worktree_path: String,
769 main_repo_path: String,
770 branch_name: Option<String>,
771 staged_commit_hash: String,
772 unstaged_commit_hash: String,
773 original_commit_hash: String,
774 cx: &App,
775 ) -> Task<anyhow::Result<i64>> {
776 let db = self.db.clone();
777 cx.background_spawn(async move {
778 db.create_archived_worktree(
779 worktree_path,
780 main_repo_path,
781 branch_name,
782 staged_commit_hash,
783 unstaged_commit_hash,
784 original_commit_hash,
785 )
786 .await
787 })
788 }
789
790 pub fn link_thread_to_archived_worktree(
791 &self,
792 session_id: String,
793 archived_worktree_id: i64,
794 cx: &App,
795 ) -> Task<anyhow::Result<()>> {
796 let db = self.db.clone();
797 cx.background_spawn(async move {
798 db.link_thread_to_archived_worktree(session_id, archived_worktree_id)
799 .await
800 })
801 }
802
803 pub fn get_archived_worktrees_for_thread(
804 &self,
805 session_id: String,
806 cx: &App,
807 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
808 let db = self.db.clone();
809 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(session_id).await })
810 }
811
812 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
813 let db = self.db.clone();
814 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
815 }
816
817 pub fn unlink_thread_from_all_archived_worktrees(
818 &self,
819 session_id: String,
820 cx: &App,
821 ) -> Task<anyhow::Result<()>> {
822 let db = self.db.clone();
823 cx.background_spawn(async move {
824 db.unlink_thread_from_all_archived_worktrees(session_id)
825 .await
826 })
827 }
828
829 pub fn is_archived_worktree_referenced(
830 &self,
831 archived_worktree_id: i64,
832 cx: &App,
833 ) -> Task<anyhow::Result<bool>> {
834 let db = self.db.clone();
835 cx.background_spawn(async move {
836 db.is_archived_worktree_referenced(archived_worktree_id)
837 .await
838 })
839 }
840
841 fn update_archived(
842 &mut self,
843 session_id: &acp::SessionId,
844 archived: bool,
845 cx: &mut Context<Self>,
846 ) {
847 if let Some(thread) = self.threads.get(session_id) {
848 self.save_internal(ThreadMetadata {
849 archived,
850 ..thread.clone()
851 });
852 cx.notify();
853 }
854 }
855
856 pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context<Self>) {
857 if let Some(thread) = self.threads.get(&session_id) {
858 if let Some(session_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
859 session_ids.remove(&session_id);
860 }
861 if !thread.main_worktree_paths().is_empty() {
862 if let Some(session_ids) = self
863 .threads_by_main_paths
864 .get_mut(thread.main_worktree_paths())
865 {
866 session_ids.remove(&session_id);
867 }
868 }
869 }
870 self.threads.remove(&session_id);
871 self.pending_thread_ops_tx
872 .try_send(DbOperation::Delete(session_id))
873 .log_err();
874 cx.notify();
875 }
876
877 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
878 let weak_store = cx.weak_entity();
879
880 cx.observe_new::<acp_thread::AcpThread>(move |thread, _window, cx| {
881 // Don't track subagent threads in the sidebar.
882 if thread.parent_session_id().is_some() {
883 return;
884 }
885
886 let thread_entity = cx.entity();
887
888 cx.on_release({
889 let weak_store = weak_store.clone();
890 move |thread, cx| {
891 weak_store
892 .update(cx, |store, _cx| {
893 let session_id = thread.session_id().clone();
894 store.session_subscriptions.remove(&session_id);
895 })
896 .ok();
897 }
898 })
899 .detach();
900
901 weak_store
902 .update(cx, |this, cx| {
903 let subscription = cx.subscribe(&thread_entity, Self::handle_thread_event);
904 this.session_subscriptions
905 .insert(thread.session_id().clone(), subscription);
906 })
907 .ok();
908 })
909 .detach();
910
911 let (tx, rx) = smol::channel::unbounded();
912 let _db_operations_task = cx.background_spawn({
913 let db = db.clone();
914 async move {
915 while let Ok(first_update) = rx.recv().await {
916 let mut updates = vec![first_update];
917 while let Ok(update) = rx.try_recv() {
918 updates.push(update);
919 }
920 let updates = Self::dedup_db_operations(updates);
921 for operation in updates {
922 match operation {
923 DbOperation::Upsert(metadata) => {
924 db.save(metadata).await.log_err();
925 }
926 DbOperation::Delete(session_id) => {
927 db.delete(session_id).await.log_err();
928 }
929 }
930 }
931 }
932 }
933 });
934
935 let mut this = Self {
936 db,
937 threads: HashMap::default(),
938 threads_by_paths: HashMap::default(),
939 threads_by_main_paths: HashMap::default(),
940 reload_task: None,
941 session_subscriptions: HashMap::default(),
942 pending_thread_ops_tx: tx,
943 in_flight_archives: HashMap::default(),
944 _db_operations_task,
945 };
946 let _ = this.reload(cx);
947 this
948 }
949
950 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
951 let mut ops = HashMap::default();
952 for operation in operations.into_iter().rev() {
953 if ops.contains_key(operation.id()) {
954 continue;
955 }
956 ops.insert(operation.id().clone(), operation);
957 }
958 ops.into_values().collect()
959 }
960
961 fn handle_thread_event(
962 &mut self,
963 thread: Entity<acp_thread::AcpThread>,
964 event: &AcpThreadEvent,
965 cx: &mut Context<Self>,
966 ) {
967 // Don't track subagent threads in the sidebar.
968 if thread.read(cx).parent_session_id().is_some() {
969 return;
970 }
971
972 match event {
973 AcpThreadEvent::NewEntry
974 | AcpThreadEvent::TitleUpdated
975 | AcpThreadEvent::EntryUpdated(_)
976 | AcpThreadEvent::EntriesRemoved(_)
977 | AcpThreadEvent::ToolAuthorizationRequested(_)
978 | AcpThreadEvent::ToolAuthorizationReceived(_)
979 | AcpThreadEvent::Retry(_)
980 | AcpThreadEvent::Stopped(_)
981 | AcpThreadEvent::Error
982 | AcpThreadEvent::LoadError(_)
983 | AcpThreadEvent::Refusal
984 | AcpThreadEvent::WorkingDirectoriesUpdated => {
985 let thread_ref = thread.read(cx);
986 if thread_ref.entries().is_empty() {
987 return;
988 }
989
990 let existing_thread = self.threads.get(thread_ref.session_id());
991 let session_id = thread_ref.session_id().clone();
992 let title = thread_ref
993 .title()
994 .unwrap_or_else(|| DEFAULT_THREAD_TITLE.into());
995
996 let updated_at = Utc::now();
997
998 let created_at = existing_thread
999 .and_then(|t| t.created_at)
1000 .unwrap_or_else(|| updated_at);
1001
1002 let agent_id = thread_ref.connection().agent_id();
1003
1004 let project = thread_ref.project().read(cx);
1005 let worktree_paths = ThreadWorktreePaths::from_project(project, cx);
1006
1007 let project_group_key = project.project_group_key(cx);
1008 let remote_connection = project_group_key.host();
1009
1010 // Threads without a folder path (e.g. started in an empty
1011 // window) are archived by default so they don't get lost,
1012 // because they won't show up in the sidebar. Users can reload
1013 // them from the archive.
1014 let archived = existing_thread
1015 .map(|t| t.archived)
1016 .unwrap_or(worktree_paths.is_empty());
1017
1018 let metadata = ThreadMetadata {
1019 session_id,
1020 agent_id,
1021 title,
1022 created_at: Some(created_at),
1023 updated_at,
1024 worktree_paths,
1025 remote_connection,
1026 archived,
1027 };
1028
1029 self.save(metadata, cx);
1030 }
1031 AcpThreadEvent::TokenUsageUpdated
1032 | AcpThreadEvent::SubagentSpawned(_)
1033 | AcpThreadEvent::PromptCapabilitiesUpdated
1034 | AcpThreadEvent::AvailableCommandsUpdated(_)
1035 | AcpThreadEvent::ModeUpdated(_)
1036 | AcpThreadEvent::ConfigOptionsUpdated(_) => {}
1037 }
1038 }
1039}
1040
1041impl Global for ThreadMetadataStore {}
1042
1043struct ThreadMetadataDb(ThreadSafeConnection);
1044
1045impl Domain for ThreadMetadataDb {
1046 const NAME: &str = stringify!(ThreadMetadataDb);
1047
1048 const MIGRATIONS: &[&str] = &[
1049 sql!(
1050 CREATE TABLE IF NOT EXISTS sidebar_threads(
1051 session_id TEXT PRIMARY KEY,
1052 agent_id TEXT,
1053 title TEXT NOT NULL,
1054 updated_at TEXT NOT NULL,
1055 created_at TEXT,
1056 folder_paths TEXT,
1057 folder_paths_order TEXT
1058 ) STRICT;
1059 ),
1060 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
1061 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
1062 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
1063 sql!(
1064 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
1065 id INTEGER PRIMARY KEY,
1066 worktree_path TEXT NOT NULL,
1067 main_repo_path TEXT NOT NULL,
1068 branch_name TEXT,
1069 staged_commit_hash TEXT,
1070 unstaged_commit_hash TEXT,
1071 original_commit_hash TEXT
1072 ) STRICT;
1073
1074 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
1075 session_id TEXT NOT NULL,
1076 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1077 PRIMARY KEY (session_id, archived_worktree_id)
1078 ) STRICT;
1079 ),
1080 sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT),
1081 ];
1082}
1083
1084db::static_connection!(ThreadMetadataDb, []);
1085
1086impl ThreadMetadataDb {
1087 pub fn list_ids(&self) -> anyhow::Result<Vec<Arc<str>>> {
1088 self.select::<Arc<str>>(
1089 "SELECT session_id FROM sidebar_threads \
1090 ORDER BY updated_at DESC",
1091 )?()
1092 }
1093
1094 /// List all sidebar thread metadata, ordered by updated_at descending.
1095 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
1096 self.select::<ThreadMetadata>(
1097 "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection \
1098 FROM sidebar_threads \
1099 ORDER BY updated_at DESC"
1100 )?()
1101 }
1102
1103 /// Upsert metadata for a thread.
1104 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
1105 let id = row.session_id.0.clone();
1106 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
1107 None
1108 } else {
1109 Some(row.agent_id.to_string())
1110 };
1111 let title = row.title.to_string();
1112 let updated_at = row.updated_at.to_rfc3339();
1113 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
1114 let serialized = row.folder_paths().serialize();
1115 let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
1116 (None, None)
1117 } else {
1118 (Some(serialized.paths), Some(serialized.order))
1119 };
1120 let main_serialized = row.main_worktree_paths().serialize();
1121 let (main_worktree_paths, main_worktree_paths_order) =
1122 if row.main_worktree_paths().is_empty() {
1123 (None, None)
1124 } else {
1125 (Some(main_serialized.paths), Some(main_serialized.order))
1126 };
1127 let remote_connection = row
1128 .remote_connection
1129 .as_ref()
1130 .map(serde_json::to_string)
1131 .transpose()
1132 .context("serialize thread metadata remote connection")?;
1133 let archived = row.archived;
1134
1135 self.write(move |conn| {
1136 let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
1137 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11) \
1138 ON CONFLICT(session_id) DO UPDATE SET \
1139 agent_id = excluded.agent_id, \
1140 title = excluded.title, \
1141 updated_at = excluded.updated_at, \
1142 created_at = excluded.created_at, \
1143 folder_paths = excluded.folder_paths, \
1144 folder_paths_order = excluded.folder_paths_order, \
1145 archived = excluded.archived, \
1146 main_worktree_paths = excluded.main_worktree_paths, \
1147 main_worktree_paths_order = excluded.main_worktree_paths_order, \
1148 remote_connection = excluded.remote_connection";
1149 let mut stmt = Statement::prepare(conn, sql)?;
1150 let mut i = stmt.bind(&id, 1)?;
1151 i = stmt.bind(&agent_id, i)?;
1152 i = stmt.bind(&title, i)?;
1153 i = stmt.bind(&updated_at, i)?;
1154 i = stmt.bind(&created_at, i)?;
1155 i = stmt.bind(&folder_paths, i)?;
1156 i = stmt.bind(&folder_paths_order, i)?;
1157 i = stmt.bind(&archived, i)?;
1158 i = stmt.bind(&main_worktree_paths, i)?;
1159 i = stmt.bind(&main_worktree_paths_order, i)?;
1160 stmt.bind(&remote_connection, i)?;
1161 stmt.exec()
1162 })
1163 .await
1164 }
1165
1166 /// Delete metadata for a single thread.
1167 pub async fn delete(&self, session_id: acp::SessionId) -> anyhow::Result<()> {
1168 let id = session_id.0.clone();
1169 self.write(move |conn| {
1170 let mut stmt =
1171 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE session_id = ?")?;
1172 stmt.bind(&id, 1)?;
1173 stmt.exec()
1174 })
1175 .await
1176 }
1177
1178 pub async fn create_archived_worktree(
1179 &self,
1180 worktree_path: String,
1181 main_repo_path: String,
1182 branch_name: Option<String>,
1183 staged_commit_hash: String,
1184 unstaged_commit_hash: String,
1185 original_commit_hash: String,
1186 ) -> anyhow::Result<i64> {
1187 self.write(move |conn| {
1188 let mut stmt = Statement::prepare(
1189 conn,
1190 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
1191 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
1192 RETURNING id",
1193 )?;
1194 let mut i = stmt.bind(&worktree_path, 1)?;
1195 i = stmt.bind(&main_repo_path, i)?;
1196 i = stmt.bind(&branch_name, i)?;
1197 i = stmt.bind(&staged_commit_hash, i)?;
1198 i = stmt.bind(&unstaged_commit_hash, i)?;
1199 stmt.bind(&original_commit_hash, i)?;
1200 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
1201 })
1202 .await
1203 }
1204
1205 pub async fn link_thread_to_archived_worktree(
1206 &self,
1207 session_id: String,
1208 archived_worktree_id: i64,
1209 ) -> anyhow::Result<()> {
1210 self.write(move |conn| {
1211 let mut stmt = Statement::prepare(
1212 conn,
1213 "INSERT INTO thread_archived_worktrees(session_id, archived_worktree_id) \
1214 VALUES (?1, ?2)",
1215 )?;
1216 let i = stmt.bind(&session_id, 1)?;
1217 stmt.bind(&archived_worktree_id, i)?;
1218 stmt.exec()
1219 })
1220 .await
1221 }
1222
1223 pub async fn get_archived_worktrees_for_thread(
1224 &self,
1225 session_id: String,
1226 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
1227 self.select_bound::<String, ArchivedGitWorktree>(
1228 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
1229 FROM archived_git_worktrees a \
1230 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
1231 WHERE t.session_id = ?1",
1232 )?(session_id)
1233 }
1234
1235 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
1236 self.write(move |conn| {
1237 let mut stmt = Statement::prepare(
1238 conn,
1239 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
1240 )?;
1241 stmt.bind(&id, 1)?;
1242 stmt.exec()?;
1243
1244 let mut stmt =
1245 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
1246 stmt.bind(&id, 1)?;
1247 stmt.exec()
1248 })
1249 .await
1250 }
1251
1252 pub async fn unlink_thread_from_all_archived_worktrees(
1253 &self,
1254 session_id: String,
1255 ) -> anyhow::Result<()> {
1256 self.write(move |conn| {
1257 let mut stmt = Statement::prepare(
1258 conn,
1259 "DELETE FROM thread_archived_worktrees WHERE session_id = ?",
1260 )?;
1261 stmt.bind(&session_id, 1)?;
1262 stmt.exec()
1263 })
1264 .await
1265 }
1266
1267 pub async fn is_archived_worktree_referenced(
1268 &self,
1269 archived_worktree_id: i64,
1270 ) -> anyhow::Result<bool> {
1271 self.select_row_bound::<i64, i64>(
1272 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1273 )?(archived_worktree_id)
1274 .map(|count| count.unwrap_or(0) > 0)
1275 }
1276}
1277
1278impl Column for ThreadMetadata {
1279 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1280 let (id, next): (Arc<str>, i32) = Column::column(statement, start_index)?;
1281 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1282 let (title, next): (String, i32) = Column::column(statement, next)?;
1283 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1284 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1285 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1286 let (folder_paths_order_str, next): (Option<String>, i32) =
1287 Column::column(statement, next)?;
1288 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1289 let (main_worktree_paths_str, next): (Option<String>, i32) =
1290 Column::column(statement, next)?;
1291 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1292 Column::column(statement, next)?;
1293 let (remote_connection_json, next): (Option<String>, i32) =
1294 Column::column(statement, next)?;
1295
1296 let agent_id = agent_id
1297 .map(|id| AgentId::new(id))
1298 .unwrap_or(ZED_AGENT_ID.clone());
1299
1300 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1301 let created_at = created_at_str
1302 .as_deref()
1303 .map(DateTime::parse_from_rfc3339)
1304 .transpose()?
1305 .map(|dt| dt.with_timezone(&Utc));
1306
1307 let folder_paths = folder_paths_str
1308 .map(|paths| {
1309 PathList::deserialize(&util::path_list::SerializedPathList {
1310 paths,
1311 order: folder_paths_order_str.unwrap_or_default(),
1312 })
1313 })
1314 .unwrap_or_default();
1315
1316 let main_worktree_paths = main_worktree_paths_str
1317 .map(|paths| {
1318 PathList::deserialize(&util::path_list::SerializedPathList {
1319 paths,
1320 order: main_worktree_paths_order_str.unwrap_or_default(),
1321 })
1322 })
1323 .unwrap_or_default();
1324
1325 let remote_connection = remote_connection_json
1326 .as_deref()
1327 .map(serde_json::from_str::<RemoteConnectionOptions>)
1328 .transpose()
1329 .context("deserialize thread metadata remote connection")?;
1330
1331 let worktree_paths =
1332 ThreadWorktreePaths::from_path_lists(main_worktree_paths, folder_paths)
1333 .unwrap_or_else(|_| ThreadWorktreePaths::default());
1334
1335 Ok((
1336 ThreadMetadata {
1337 session_id: acp::SessionId::new(id),
1338 agent_id,
1339 title: title.into(),
1340 updated_at,
1341 created_at,
1342 worktree_paths,
1343 remote_connection,
1344 archived,
1345 },
1346 next,
1347 ))
1348 }
1349}
1350
1351impl Column for ArchivedGitWorktree {
1352 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1353 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1354 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1355 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1356 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1357 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1358 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1359 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1360
1361 Ok((
1362 ArchivedGitWorktree {
1363 id,
1364 worktree_path: PathBuf::from(worktree_path_str),
1365 main_repo_path: PathBuf::from(main_repo_path_str),
1366 branch_name,
1367 staged_commit_hash,
1368 unstaged_commit_hash,
1369 original_commit_hash,
1370 },
1371 next,
1372 ))
1373 }
1374}
1375
1376#[cfg(test)]
1377mod tests {
1378 use super::*;
1379 use acp_thread::{AgentConnection, StubAgentConnection};
1380 use action_log::ActionLog;
1381 use agent::DbThread;
1382 use agent_client_protocol as acp;
1383
1384 use gpui::TestAppContext;
1385 use project::FakeFs;
1386 use project::Project;
1387 use remote::WslConnectionOptions;
1388 use std::path::Path;
1389 use std::rc::Rc;
1390
1391 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1392 DbThread {
1393 title: title.to_string().into(),
1394 messages: Vec::new(),
1395 updated_at,
1396 detailed_summary: None,
1397 initial_project_snapshot: None,
1398 cumulative_token_usage: Default::default(),
1399 request_token_usage: Default::default(),
1400 model: None,
1401 profile: None,
1402 imported: false,
1403 subagent_context: None,
1404 speed: None,
1405 thinking_enabled: false,
1406 thinking_effort: None,
1407 draft_prompt: None,
1408 ui_scroll_position: None,
1409 }
1410 }
1411
1412 fn make_metadata(
1413 session_id: &str,
1414 title: &str,
1415 updated_at: DateTime<Utc>,
1416 folder_paths: PathList,
1417 ) -> ThreadMetadata {
1418 ThreadMetadata {
1419 archived: false,
1420 session_id: acp::SessionId::new(session_id),
1421 agent_id: agent::ZED_AGENT_ID.clone(),
1422 title: title.to_string().into(),
1423 updated_at,
1424 created_at: Some(updated_at),
1425 worktree_paths: ThreadWorktreePaths::from_folder_paths(&folder_paths),
1426 remote_connection: None,
1427 }
1428 }
1429
1430 fn init_test(cx: &mut TestAppContext) {
1431 let fs = FakeFs::new(cx.executor());
1432 cx.update(|cx| {
1433 let settings_store = settings::SettingsStore::test(cx);
1434 cx.set_global(settings_store);
1435 <dyn Fs>::set_global(fs, cx);
1436 ThreadMetadataStore::init_global(cx);
1437 ThreadStore::init_global(cx);
1438 });
1439 cx.run_until_parked();
1440 }
1441
1442 fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) {
1443 let kvp = cx.update(|cx| KeyValueStore::global(cx));
1444 smol::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string()))
1445 .unwrap();
1446 }
1447
1448 fn run_thread_metadata_migrations(cx: &mut TestAppContext) {
1449 clear_thread_metadata_remote_connection_backfill(cx);
1450 cx.update(|cx| {
1451 let migration_task = migrate_thread_metadata(cx);
1452 migrate_thread_remote_connections(cx, migration_task);
1453 });
1454 cx.run_until_parked();
1455 }
1456
1457 #[gpui::test]
1458 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1459 let first_paths = PathList::new(&[Path::new("/project-a")]);
1460 let second_paths = PathList::new(&[Path::new("/project-b")]);
1461 let now = Utc::now();
1462 let older = now - chrono::Duration::seconds(1);
1463
1464 let thread = std::thread::current();
1465 let test_name = thread.name().unwrap_or("unknown_test");
1466 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1467 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1468 &db_name,
1469 )));
1470
1471 db.save(make_metadata(
1472 "session-1",
1473 "First Thread",
1474 now,
1475 first_paths.clone(),
1476 ))
1477 .await
1478 .unwrap();
1479 db.save(make_metadata(
1480 "session-2",
1481 "Second Thread",
1482 older,
1483 second_paths.clone(),
1484 ))
1485 .await
1486 .unwrap();
1487
1488 cx.update(|cx| {
1489 let settings_store = settings::SettingsStore::test(cx);
1490 cx.set_global(settings_store);
1491 ThreadMetadataStore::init_global(cx);
1492 });
1493
1494 cx.run_until_parked();
1495
1496 cx.update(|cx| {
1497 let store = ThreadMetadataStore::global(cx);
1498 let store = store.read(cx);
1499
1500 let entry_ids = store
1501 .entry_ids()
1502 .map(|session_id| session_id.0.to_string())
1503 .collect::<Vec<_>>();
1504 assert_eq!(entry_ids.len(), 2);
1505 assert!(entry_ids.contains(&"session-1".to_string()));
1506 assert!(entry_ids.contains(&"session-2".to_string()));
1507
1508 let first_path_entries = store
1509 .entries_for_path(&first_paths)
1510 .map(|entry| entry.session_id.0.to_string())
1511 .collect::<Vec<_>>();
1512 assert_eq!(first_path_entries, vec!["session-1"]);
1513
1514 let second_path_entries = store
1515 .entries_for_path(&second_paths)
1516 .map(|entry| entry.session_id.0.to_string())
1517 .collect::<Vec<_>>();
1518 assert_eq!(second_path_entries, vec!["session-2"]);
1519 });
1520 }
1521
1522 #[gpui::test]
1523 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1524 init_test(cx);
1525
1526 let first_paths = PathList::new(&[Path::new("/project-a")]);
1527 let second_paths = PathList::new(&[Path::new("/project-b")]);
1528 let initial_time = Utc::now();
1529 let updated_time = initial_time + chrono::Duration::seconds(1);
1530
1531 let initial_metadata = make_metadata(
1532 "session-1",
1533 "First Thread",
1534 initial_time,
1535 first_paths.clone(),
1536 );
1537
1538 let second_metadata = make_metadata(
1539 "session-2",
1540 "Second Thread",
1541 initial_time,
1542 second_paths.clone(),
1543 );
1544
1545 cx.update(|cx| {
1546 let store = ThreadMetadataStore::global(cx);
1547 store.update(cx, |store, cx| {
1548 store.save(initial_metadata, cx);
1549 store.save(second_metadata, cx);
1550 });
1551 });
1552
1553 cx.run_until_parked();
1554
1555 cx.update(|cx| {
1556 let store = ThreadMetadataStore::global(cx);
1557 let store = store.read(cx);
1558
1559 let first_path_entries = store
1560 .entries_for_path(&first_paths)
1561 .map(|entry| entry.session_id.0.to_string())
1562 .collect::<Vec<_>>();
1563 assert_eq!(first_path_entries, vec!["session-1"]);
1564
1565 let second_path_entries = store
1566 .entries_for_path(&second_paths)
1567 .map(|entry| entry.session_id.0.to_string())
1568 .collect::<Vec<_>>();
1569 assert_eq!(second_path_entries, vec!["session-2"]);
1570 });
1571
1572 let moved_metadata = make_metadata(
1573 "session-1",
1574 "First Thread",
1575 updated_time,
1576 second_paths.clone(),
1577 );
1578
1579 cx.update(|cx| {
1580 let store = ThreadMetadataStore::global(cx);
1581 store.update(cx, |store, cx| {
1582 store.save(moved_metadata, cx);
1583 });
1584 });
1585
1586 cx.run_until_parked();
1587
1588 cx.update(|cx| {
1589 let store = ThreadMetadataStore::global(cx);
1590 let store = store.read(cx);
1591
1592 let entry_ids = store
1593 .entry_ids()
1594 .map(|session_id| session_id.0.to_string())
1595 .collect::<Vec<_>>();
1596 assert_eq!(entry_ids.len(), 2);
1597 assert!(entry_ids.contains(&"session-1".to_string()));
1598 assert!(entry_ids.contains(&"session-2".to_string()));
1599
1600 let first_path_entries = store
1601 .entries_for_path(&first_paths)
1602 .map(|entry| entry.session_id.0.to_string())
1603 .collect::<Vec<_>>();
1604 assert!(first_path_entries.is_empty());
1605
1606 let second_path_entries = store
1607 .entries_for_path(&second_paths)
1608 .map(|entry| entry.session_id.0.to_string())
1609 .collect::<Vec<_>>();
1610 assert_eq!(second_path_entries.len(), 2);
1611 assert!(second_path_entries.contains(&"session-1".to_string()));
1612 assert!(second_path_entries.contains(&"session-2".to_string()));
1613 });
1614
1615 cx.update(|cx| {
1616 let store = ThreadMetadataStore::global(cx);
1617 store.update(cx, |store, cx| {
1618 store.delete(acp::SessionId::new("session-2"), cx);
1619 });
1620 });
1621
1622 cx.run_until_parked();
1623
1624 cx.update(|cx| {
1625 let store = ThreadMetadataStore::global(cx);
1626 let store = store.read(cx);
1627
1628 let entry_ids = store
1629 .entry_ids()
1630 .map(|session_id| session_id.0.to_string())
1631 .collect::<Vec<_>>();
1632 assert_eq!(entry_ids, vec!["session-1"]);
1633
1634 let second_path_entries = store
1635 .entries_for_path(&second_paths)
1636 .map(|entry| entry.session_id.0.to_string())
1637 .collect::<Vec<_>>();
1638 assert_eq!(second_path_entries, vec!["session-1"]);
1639 });
1640 }
1641
1642 #[gpui::test]
1643 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1644 init_test(cx);
1645
1646 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1647 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1648 let now = Utc::now();
1649
1650 let existing_metadata = ThreadMetadata {
1651 session_id: acp::SessionId::new("a-session-0"),
1652 agent_id: agent::ZED_AGENT_ID.clone(),
1653 title: "Existing Metadata".into(),
1654 updated_at: now - chrono::Duration::seconds(10),
1655 created_at: Some(now - chrono::Duration::seconds(10)),
1656 worktree_paths: ThreadWorktreePaths::from_folder_paths(&project_a_paths),
1657 remote_connection: None,
1658 archived: false,
1659 };
1660
1661 cx.update(|cx| {
1662 let store = ThreadMetadataStore::global(cx);
1663 store.update(cx, |store, cx| {
1664 store.save(existing_metadata, cx);
1665 });
1666 });
1667 cx.run_until_parked();
1668
1669 let threads_to_save = vec![
1670 (
1671 "a-session-0",
1672 "Thread A0 From Native Store",
1673 project_a_paths.clone(),
1674 now,
1675 ),
1676 (
1677 "a-session-1",
1678 "Thread A1",
1679 project_a_paths.clone(),
1680 now + chrono::Duration::seconds(1),
1681 ),
1682 (
1683 "b-session-0",
1684 "Thread B0",
1685 project_b_paths.clone(),
1686 now + chrono::Duration::seconds(2),
1687 ),
1688 (
1689 "projectless",
1690 "Projectless",
1691 PathList::default(),
1692 now + chrono::Duration::seconds(3),
1693 ),
1694 ];
1695
1696 for (session_id, title, paths, updated_at) in &threads_to_save {
1697 let save_task = cx.update(|cx| {
1698 let thread_store = ThreadStore::global(cx);
1699 let session_id = session_id.to_string();
1700 let title = title.to_string();
1701 let paths = paths.clone();
1702 thread_store.update(cx, |store, cx| {
1703 store.save_thread(
1704 acp::SessionId::new(session_id),
1705 make_db_thread(&title, *updated_at),
1706 paths,
1707 cx,
1708 )
1709 })
1710 });
1711 save_task.await.unwrap();
1712 cx.run_until_parked();
1713 }
1714
1715 run_thread_metadata_migrations(cx);
1716
1717 let list = cx.update(|cx| {
1718 let store = ThreadMetadataStore::global(cx);
1719 store.read(cx).entries().cloned().collect::<Vec<_>>()
1720 });
1721
1722 assert_eq!(list.len(), 4);
1723 assert!(
1724 list.iter()
1725 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1726 );
1727
1728 let existing_metadata = list
1729 .iter()
1730 .find(|metadata| metadata.session_id.0.as_ref() == "a-session-0")
1731 .unwrap();
1732 assert_eq!(existing_metadata.title.as_ref(), "Existing Metadata");
1733 assert!(!existing_metadata.archived);
1734
1735 let migrated_session_ids = list
1736 .iter()
1737 .map(|metadata| metadata.session_id.0.as_ref())
1738 .collect::<Vec<_>>();
1739 assert!(migrated_session_ids.contains(&"a-session-1"));
1740 assert!(migrated_session_ids.contains(&"b-session-0"));
1741 assert!(migrated_session_ids.contains(&"projectless"));
1742
1743 let migrated_entries = list
1744 .iter()
1745 .filter(|metadata| metadata.session_id.0.as_ref() != "a-session-0")
1746 .collect::<Vec<_>>();
1747 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1748 }
1749
1750 #[gpui::test]
1751 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1752 cx: &mut TestAppContext,
1753 ) {
1754 init_test(cx);
1755
1756 let project_paths = PathList::new(&[Path::new("/project-a")]);
1757 let existing_updated_at = Utc::now();
1758
1759 let existing_metadata = ThreadMetadata {
1760 session_id: acp::SessionId::new("existing-session"),
1761 agent_id: agent::ZED_AGENT_ID.clone(),
1762 title: "Existing Metadata".into(),
1763 updated_at: existing_updated_at,
1764 created_at: Some(existing_updated_at),
1765 worktree_paths: ThreadWorktreePaths::from_folder_paths(&project_paths),
1766 remote_connection: None,
1767 archived: false,
1768 };
1769
1770 cx.update(|cx| {
1771 let store = ThreadMetadataStore::global(cx);
1772 store.update(cx, |store, cx| {
1773 store.save(existing_metadata, cx);
1774 });
1775 });
1776 cx.run_until_parked();
1777
1778 let save_task = cx.update(|cx| {
1779 let thread_store = ThreadStore::global(cx);
1780 thread_store.update(cx, |store, cx| {
1781 store.save_thread(
1782 acp::SessionId::new("existing-session"),
1783 make_db_thread(
1784 "Updated Native Thread Title",
1785 existing_updated_at + chrono::Duration::seconds(1),
1786 ),
1787 project_paths.clone(),
1788 cx,
1789 )
1790 })
1791 });
1792 save_task.await.unwrap();
1793 cx.run_until_parked();
1794
1795 run_thread_metadata_migrations(cx);
1796
1797 let list = cx.update(|cx| {
1798 let store = ThreadMetadataStore::global(cx);
1799 store.read(cx).entries().cloned().collect::<Vec<_>>()
1800 });
1801
1802 assert_eq!(list.len(), 1);
1803 assert_eq!(list[0].session_id.0.as_ref(), "existing-session");
1804 }
1805
1806 #[gpui::test]
1807 async fn test_migrate_thread_remote_connections_backfills_from_workspace_db(
1808 cx: &mut TestAppContext,
1809 ) {
1810 init_test(cx);
1811
1812 let folder_paths = PathList::new(&[Path::new("/remote-project")]);
1813 let updated_at = Utc::now();
1814 let metadata = make_metadata(
1815 "remote-session",
1816 "Remote Thread",
1817 updated_at,
1818 folder_paths.clone(),
1819 );
1820
1821 cx.update(|cx| {
1822 let store = ThreadMetadataStore::global(cx);
1823 store.update(cx, |store, cx| {
1824 store.save(metadata, cx);
1825 });
1826 });
1827 cx.run_until_parked();
1828
1829 let workspace_db = cx.update(|cx| WorkspaceDb::global(cx));
1830 let workspace_id = workspace_db.next_id().await.unwrap();
1831 let serialized_paths = folder_paths.serialize();
1832 let remote_connection_id = 1_i64;
1833 workspace_db
1834 .write(move |conn| {
1835 let mut stmt = Statement::prepare(
1836 conn,
1837 "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)",
1838 )?;
1839 let mut next_index = stmt.bind(&remote_connection_id, 1)?;
1840 next_index = stmt.bind(&"wsl", next_index)?;
1841 next_index = stmt.bind(&Some("anth".to_string()), next_index)?;
1842 stmt.bind(&Some("Ubuntu".to_string()), next_index)?;
1843 stmt.exec()?;
1844
1845 let mut stmt = Statement::prepare(
1846 conn,
1847 "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1",
1848 )?;
1849 let mut next_index = stmt.bind(&workspace_id, 1)?;
1850 next_index = stmt.bind(&serialized_paths.paths, next_index)?;
1851 next_index = stmt.bind(&serialized_paths.order, next_index)?;
1852 stmt.bind(&Some(remote_connection_id as i32), next_index)?;
1853 stmt.exec()
1854 })
1855 .await
1856 .unwrap();
1857
1858 clear_thread_metadata_remote_connection_backfill(cx);
1859 cx.update(|cx| {
1860 migrate_thread_remote_connections(cx, Task::ready(Ok(())));
1861 });
1862 cx.run_until_parked();
1863
1864 let metadata = cx.update(|cx| {
1865 let store = ThreadMetadataStore::global(cx);
1866 store
1867 .read(cx)
1868 .entry(&acp::SessionId::new("remote-session"))
1869 .cloned()
1870 .expect("expected migrated metadata row")
1871 });
1872
1873 assert_eq!(
1874 metadata.remote_connection,
1875 Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
1876 distro_name: "Ubuntu".to_string(),
1877 user: Some("anth".to_string()),
1878 }))
1879 );
1880 }
1881
1882 #[gpui::test]
1883 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
1884 cx: &mut TestAppContext,
1885 ) {
1886 init_test(cx);
1887
1888 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1889 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1890 let now = Utc::now();
1891
1892 // Create 7 threads for project A and 3 for project B
1893 let mut threads_to_save = Vec::new();
1894 for i in 0..7 {
1895 threads_to_save.push((
1896 format!("a-session-{i}"),
1897 format!("Thread A{i}"),
1898 project_a_paths.clone(),
1899 now + chrono::Duration::seconds(i as i64),
1900 ));
1901 }
1902 for i in 0..3 {
1903 threads_to_save.push((
1904 format!("b-session-{i}"),
1905 format!("Thread B{i}"),
1906 project_b_paths.clone(),
1907 now + chrono::Duration::seconds(i as i64),
1908 ));
1909 }
1910
1911 for (session_id, title, paths, updated_at) in &threads_to_save {
1912 let save_task = cx.update(|cx| {
1913 let thread_store = ThreadStore::global(cx);
1914 let session_id = session_id.to_string();
1915 let title = title.to_string();
1916 let paths = paths.clone();
1917 thread_store.update(cx, |store, cx| {
1918 store.save_thread(
1919 acp::SessionId::new(session_id),
1920 make_db_thread(&title, *updated_at),
1921 paths,
1922 cx,
1923 )
1924 })
1925 });
1926 save_task.await.unwrap();
1927 cx.run_until_parked();
1928 }
1929
1930 run_thread_metadata_migrations(cx);
1931
1932 let list = cx.update(|cx| {
1933 let store = ThreadMetadataStore::global(cx);
1934 store.read(cx).entries().cloned().collect::<Vec<_>>()
1935 });
1936
1937 assert_eq!(list.len(), 10);
1938
1939 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
1940 let mut project_a_entries: Vec<_> = list
1941 .iter()
1942 .filter(|m| *m.folder_paths() == project_a_paths)
1943 .collect();
1944 assert_eq!(project_a_entries.len(), 7);
1945 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
1946
1947 for entry in &project_a_entries[..5] {
1948 assert!(
1949 !entry.archived,
1950 "Expected {} to be unarchived (top 5 most recent)",
1951 entry.session_id.0
1952 );
1953 }
1954 for entry in &project_a_entries[5..] {
1955 assert!(
1956 entry.archived,
1957 "Expected {} to be archived (older than top 5)",
1958 entry.session_id.0
1959 );
1960 }
1961
1962 // Project B: all 3 should be unarchived (under the limit)
1963 let project_b_entries: Vec<_> = list
1964 .iter()
1965 .filter(|m| *m.folder_paths() == project_b_paths)
1966 .collect();
1967 assert_eq!(project_b_entries.len(), 3);
1968 assert!(project_b_entries.iter().all(|m| !m.archived));
1969 }
1970
1971 #[gpui::test]
1972 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
1973 init_test(cx);
1974
1975 let fs = FakeFs::new(cx.executor());
1976 let project = Project::test(fs, None::<&Path>, cx).await;
1977 let connection = Rc::new(StubAgentConnection::new());
1978
1979 let thread = cx
1980 .update(|cx| {
1981 connection
1982 .clone()
1983 .new_session(project.clone(), PathList::default(), cx)
1984 })
1985 .await
1986 .unwrap();
1987 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1988
1989 cx.update(|cx| {
1990 thread.update(cx, |thread, cx| {
1991 thread.set_title("Draft Thread".into(), cx).detach();
1992 });
1993 });
1994 cx.run_until_parked();
1995
1996 let metadata_ids = cx.update(|cx| {
1997 ThreadMetadataStore::global(cx)
1998 .read(cx)
1999 .entry_ids()
2000 .collect::<Vec<_>>()
2001 });
2002 assert!(
2003 metadata_ids.is_empty(),
2004 "expected empty draft thread title updates to be ignored"
2005 );
2006
2007 cx.update(|cx| {
2008 thread.update(cx, |thread, cx| {
2009 thread.push_user_content_block(None, "Hello".into(), cx);
2010 });
2011 });
2012 cx.run_until_parked();
2013
2014 let metadata_ids = cx.update(|cx| {
2015 ThreadMetadataStore::global(cx)
2016 .read(cx)
2017 .entry_ids()
2018 .collect::<Vec<_>>()
2019 });
2020 assert_eq!(metadata_ids, vec![session_id]);
2021 }
2022
2023 #[gpui::test]
2024 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
2025 init_test(cx);
2026
2027 let fs = FakeFs::new(cx.executor());
2028 let project = Project::test(fs, None::<&Path>, cx).await;
2029 let connection = Rc::new(StubAgentConnection::new());
2030
2031 let thread = cx
2032 .update(|cx| {
2033 connection
2034 .clone()
2035 .new_session(project.clone(), PathList::default(), cx)
2036 })
2037 .await
2038 .unwrap();
2039 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
2040
2041 cx.update(|cx| {
2042 thread.update(cx, |thread, cx| {
2043 thread.push_user_content_block(None, "Hello".into(), cx);
2044 });
2045 });
2046 cx.run_until_parked();
2047
2048 let metadata_ids = cx.update(|cx| {
2049 ThreadMetadataStore::global(cx)
2050 .read(cx)
2051 .entry_ids()
2052 .collect::<Vec<_>>()
2053 });
2054 assert_eq!(metadata_ids, vec![session_id.clone()]);
2055
2056 drop(thread);
2057 cx.update(|_| {});
2058 cx.run_until_parked();
2059
2060 let metadata_ids = cx.update(|cx| {
2061 ThreadMetadataStore::global(cx)
2062 .read(cx)
2063 .entry_ids()
2064 .collect::<Vec<_>>()
2065 });
2066 assert_eq!(metadata_ids, vec![session_id]);
2067 }
2068
2069 #[gpui::test]
2070 async fn test_threads_without_project_association_are_archived_by_default(
2071 cx: &mut TestAppContext,
2072 ) {
2073 init_test(cx);
2074
2075 let fs = FakeFs::new(cx.executor());
2076 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
2077 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
2078 let connection = Rc::new(StubAgentConnection::new());
2079
2080 let thread_without_worktree = cx
2081 .update(|cx| {
2082 connection.clone().new_session(
2083 project_without_worktree.clone(),
2084 PathList::default(),
2085 cx,
2086 )
2087 })
2088 .await
2089 .unwrap();
2090 let session_without_worktree =
2091 cx.read(|cx| thread_without_worktree.read(cx).session_id().clone());
2092
2093 cx.update(|cx| {
2094 thread_without_worktree.update(cx, |thread, cx| {
2095 thread.push_user_content_block(None, "content".into(), cx);
2096 thread.set_title("No Project Thread".into(), cx).detach();
2097 });
2098 });
2099 cx.run_until_parked();
2100
2101 let thread_with_worktree = cx
2102 .update(|cx| {
2103 connection.clone().new_session(
2104 project_with_worktree.clone(),
2105 PathList::default(),
2106 cx,
2107 )
2108 })
2109 .await
2110 .unwrap();
2111 let session_with_worktree =
2112 cx.read(|cx| thread_with_worktree.read(cx).session_id().clone());
2113
2114 cx.update(|cx| {
2115 thread_with_worktree.update(cx, |thread, cx| {
2116 thread.push_user_content_block(None, "content".into(), cx);
2117 thread.set_title("Project Thread".into(), cx).detach();
2118 });
2119 });
2120 cx.run_until_parked();
2121
2122 cx.update(|cx| {
2123 let store = ThreadMetadataStore::global(cx);
2124 let store = store.read(cx);
2125
2126 let without_worktree = store
2127 .entry(&session_without_worktree)
2128 .expect("missing metadata for thread without project association");
2129 assert!(without_worktree.folder_paths().is_empty());
2130 assert!(
2131 without_worktree.archived,
2132 "expected thread without project association to be archived"
2133 );
2134
2135 let with_worktree = store
2136 .entry(&session_with_worktree)
2137 .expect("missing metadata for thread with project association");
2138 assert_eq!(
2139 *with_worktree.folder_paths(),
2140 PathList::new(&[Path::new("/project-a")])
2141 );
2142 assert!(
2143 !with_worktree.archived,
2144 "expected thread with project association to remain unarchived"
2145 );
2146 });
2147 }
2148
2149 #[gpui::test]
2150 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
2151 init_test(cx);
2152
2153 let fs = FakeFs::new(cx.executor());
2154 let project = Project::test(fs, None::<&Path>, cx).await;
2155 let connection = Rc::new(StubAgentConnection::new());
2156
2157 // Create a regular (non-subagent) AcpThread.
2158 let regular_thread = cx
2159 .update(|cx| {
2160 connection
2161 .clone()
2162 .new_session(project.clone(), PathList::default(), cx)
2163 })
2164 .await
2165 .unwrap();
2166
2167 let regular_session_id = cx.read(|cx| regular_thread.read(cx).session_id().clone());
2168
2169 // Set a title on the regular thread to trigger a save via handle_thread_update.
2170 cx.update(|cx| {
2171 regular_thread.update(cx, |thread, cx| {
2172 thread.push_user_content_block(None, "content".into(), cx);
2173 thread.set_title("Regular Thread".into(), cx).detach();
2174 });
2175 });
2176 cx.run_until_parked();
2177
2178 // Create a subagent AcpThread
2179 let subagent_session_id = acp::SessionId::new("subagent-session");
2180 let subagent_thread = cx.update(|cx| {
2181 let action_log = cx.new(|_| ActionLog::new(project.clone()));
2182 cx.new(|cx| {
2183 acp_thread::AcpThread::new(
2184 Some(regular_session_id.clone()),
2185 Some("Subagent Thread".into()),
2186 None,
2187 connection.clone(),
2188 project.clone(),
2189 action_log,
2190 subagent_session_id.clone(),
2191 watch::Receiver::constant(acp::PromptCapabilities::new()),
2192 cx,
2193 )
2194 })
2195 });
2196
2197 // Set a title on the subagent thread to trigger handle_thread_update.
2198 cx.update(|cx| {
2199 subagent_thread.update(cx, |thread, cx| {
2200 thread
2201 .set_title("Subagent Thread Title".into(), cx)
2202 .detach();
2203 });
2204 });
2205 cx.run_until_parked();
2206
2207 // List all metadata from the store cache.
2208 let list = cx.update(|cx| {
2209 let store = ThreadMetadataStore::global(cx);
2210 store.read(cx).entries().cloned().collect::<Vec<_>>()
2211 });
2212
2213 // The subagent thread should NOT appear in the sidebar metadata.
2214 // Only the regular thread should be listed.
2215 assert_eq!(
2216 list.len(),
2217 1,
2218 "Expected only the regular thread in sidebar metadata, \
2219 but found {} entries (subagent threads are leaking into the sidebar)",
2220 list.len(),
2221 );
2222 assert_eq!(list[0].session_id, regular_session_id);
2223 assert_eq!(list[0].title.as_ref(), "Regular Thread");
2224 }
2225
2226 #[test]
2227 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
2228 let now = Utc::now();
2229
2230 let operations = vec![
2231 DbOperation::Upsert(make_metadata(
2232 "session-1",
2233 "First Thread",
2234 now,
2235 PathList::default(),
2236 )),
2237 DbOperation::Delete(acp::SessionId::new("session-1")),
2238 ];
2239
2240 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
2241
2242 assert_eq!(deduped.len(), 1);
2243 assert_eq!(
2244 deduped[0],
2245 DbOperation::Delete(acp::SessionId::new("session-1"))
2246 );
2247 }
2248
2249 #[test]
2250 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
2251 let now = Utc::now();
2252 let later = now + chrono::Duration::seconds(1);
2253
2254 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
2255 let new_metadata = make_metadata("session-1", "New Title", later, PathList::default());
2256
2257 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2258 DbOperation::Upsert(old_metadata),
2259 DbOperation::Upsert(new_metadata.clone()),
2260 ]);
2261
2262 assert_eq!(deduped.len(), 1);
2263 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
2264 }
2265
2266 #[test]
2267 fn test_dedup_db_operations_preserves_distinct_sessions() {
2268 let now = Utc::now();
2269
2270 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
2271 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
2272 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2273 DbOperation::Upsert(metadata1.clone()),
2274 DbOperation::Upsert(metadata2.clone()),
2275 ]);
2276
2277 assert_eq!(deduped.len(), 2);
2278 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
2279 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
2280 }
2281
2282 #[gpui::test]
2283 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
2284 init_test(cx);
2285
2286 let paths = PathList::new(&[Path::new("/project-a")]);
2287 let now = Utc::now();
2288 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2289
2290 cx.update(|cx| {
2291 let store = ThreadMetadataStore::global(cx);
2292 store.update(cx, |store, cx| {
2293 store.save(metadata, cx);
2294 });
2295 });
2296
2297 cx.run_until_parked();
2298
2299 cx.update(|cx| {
2300 let store = ThreadMetadataStore::global(cx);
2301 let store = store.read(cx);
2302
2303 let path_entries = store
2304 .entries_for_path(&paths)
2305 .map(|e| e.session_id.0.to_string())
2306 .collect::<Vec<_>>();
2307 assert_eq!(path_entries, vec!["session-1"]);
2308
2309 let archived = store
2310 .archived_entries()
2311 .map(|e| e.session_id.0.to_string())
2312 .collect::<Vec<_>>();
2313 assert!(archived.is_empty());
2314 });
2315
2316 cx.update(|cx| {
2317 let store = ThreadMetadataStore::global(cx);
2318 store.update(cx, |store, cx| {
2319 store.archive(&acp::SessionId::new("session-1"), None, cx);
2320 });
2321 });
2322
2323 // Thread 1 should now be archived
2324 cx.run_until_parked();
2325
2326 cx.update(|cx| {
2327 let store = ThreadMetadataStore::global(cx);
2328 let store = store.read(cx);
2329
2330 let path_entries = store
2331 .entries_for_path(&paths)
2332 .map(|e| e.session_id.0.to_string())
2333 .collect::<Vec<_>>();
2334 assert!(path_entries.is_empty());
2335
2336 let archived = store.archived_entries().collect::<Vec<_>>();
2337 assert_eq!(archived.len(), 1);
2338 assert_eq!(archived[0].session_id.0.as_ref(), "session-1");
2339 assert!(archived[0].archived);
2340 });
2341
2342 cx.update(|cx| {
2343 let store = ThreadMetadataStore::global(cx);
2344 store.update(cx, |store, cx| {
2345 store.unarchive(&acp::SessionId::new("session-1"), cx);
2346 });
2347 });
2348
2349 cx.run_until_parked();
2350
2351 cx.update(|cx| {
2352 let store = ThreadMetadataStore::global(cx);
2353 let store = store.read(cx);
2354
2355 let path_entries = store
2356 .entries_for_path(&paths)
2357 .map(|e| e.session_id.0.to_string())
2358 .collect::<Vec<_>>();
2359 assert_eq!(path_entries, vec!["session-1"]);
2360
2361 let archived = store
2362 .archived_entries()
2363 .map(|e| e.session_id.0.to_string())
2364 .collect::<Vec<_>>();
2365 assert!(archived.is_empty());
2366 });
2367 }
2368
2369 #[gpui::test]
2370 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2371 init_test(cx);
2372
2373 let paths = PathList::new(&[Path::new("/project-a")]);
2374 let now = Utc::now();
2375
2376 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2377 let metadata2 = make_metadata(
2378 "session-2",
2379 "Archived Thread",
2380 now - chrono::Duration::seconds(1),
2381 paths.clone(),
2382 );
2383
2384 cx.update(|cx| {
2385 let store = ThreadMetadataStore::global(cx);
2386 store.update(cx, |store, cx| {
2387 store.save(metadata1, cx);
2388 store.save(metadata2, cx);
2389 });
2390 });
2391
2392 cx.run_until_parked();
2393
2394 cx.update(|cx| {
2395 let store = ThreadMetadataStore::global(cx);
2396 store.update(cx, |store, cx| {
2397 store.archive(&acp::SessionId::new("session-2"), None, cx);
2398 });
2399 });
2400
2401 cx.run_until_parked();
2402
2403 cx.update(|cx| {
2404 let store = ThreadMetadataStore::global(cx);
2405 let store = store.read(cx);
2406
2407 let path_entries = store
2408 .entries_for_path(&paths)
2409 .map(|e| e.session_id.0.to_string())
2410 .collect::<Vec<_>>();
2411 assert_eq!(path_entries, vec!["session-1"]);
2412
2413 let all_entries = store
2414 .entries()
2415 .map(|e| e.session_id.0.to_string())
2416 .collect::<Vec<_>>();
2417 assert_eq!(all_entries.len(), 2);
2418 assert!(all_entries.contains(&"session-1".to_string()));
2419 assert!(all_entries.contains(&"session-2".to_string()));
2420
2421 let archived = store
2422 .archived_entries()
2423 .map(|e| e.session_id.0.to_string())
2424 .collect::<Vec<_>>();
2425 assert_eq!(archived, vec!["session-2"]);
2426 });
2427 }
2428
2429 #[gpui::test]
2430 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2431 init_test(cx);
2432
2433 let paths = PathList::new(&[Path::new("/project-a")]);
2434 let now = Utc::now();
2435
2436 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2437 let m2 = make_metadata(
2438 "session-2",
2439 "Thread Two",
2440 now - chrono::Duration::seconds(1),
2441 paths.clone(),
2442 );
2443 let m3 = make_metadata(
2444 "session-3",
2445 "Thread Three",
2446 now - chrono::Duration::seconds(2),
2447 paths,
2448 );
2449
2450 cx.update(|cx| {
2451 let store = ThreadMetadataStore::global(cx);
2452 store.update(cx, |store, cx| {
2453 store.save_all(vec![m1, m2, m3], cx);
2454 });
2455 });
2456
2457 cx.run_until_parked();
2458
2459 cx.update(|cx| {
2460 let store = ThreadMetadataStore::global(cx);
2461 let store = store.read(cx);
2462
2463 let all_entries = store
2464 .entries()
2465 .map(|e| e.session_id.0.to_string())
2466 .collect::<Vec<_>>();
2467 assert_eq!(all_entries.len(), 3);
2468 assert!(all_entries.contains(&"session-1".to_string()));
2469 assert!(all_entries.contains(&"session-2".to_string()));
2470 assert!(all_entries.contains(&"session-3".to_string()));
2471
2472 let entry_ids = store.entry_ids().collect::<Vec<_>>();
2473 assert_eq!(entry_ids.len(), 3);
2474 });
2475 }
2476
2477 #[gpui::test]
2478 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2479 init_test(cx);
2480
2481 let paths = PathList::new(&[Path::new("/project-a")]);
2482 let now = Utc::now();
2483 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2484
2485 cx.update(|cx| {
2486 let store = ThreadMetadataStore::global(cx);
2487 store.update(cx, |store, cx| {
2488 store.save(metadata, cx);
2489 });
2490 });
2491
2492 cx.run_until_parked();
2493
2494 cx.update(|cx| {
2495 let store = ThreadMetadataStore::global(cx);
2496 store.update(cx, |store, cx| {
2497 store.archive(&acp::SessionId::new("session-1"), None, cx);
2498 });
2499 });
2500
2501 cx.run_until_parked();
2502
2503 cx.update(|cx| {
2504 let store = ThreadMetadataStore::global(cx);
2505 store.update(cx, |store, cx| {
2506 let _ = store.reload(cx);
2507 });
2508 });
2509
2510 cx.run_until_parked();
2511
2512 cx.update(|cx| {
2513 let store = ThreadMetadataStore::global(cx);
2514 let store = store.read(cx);
2515
2516 let thread = store
2517 .entries()
2518 .find(|e| e.session_id.0.as_ref() == "session-1")
2519 .expect("thread should exist after reload");
2520 assert!(thread.archived);
2521
2522 let path_entries = store
2523 .entries_for_path(&paths)
2524 .map(|e| e.session_id.0.to_string())
2525 .collect::<Vec<_>>();
2526 assert!(path_entries.is_empty());
2527
2528 let archived = store
2529 .archived_entries()
2530 .map(|e| e.session_id.0.to_string())
2531 .collect::<Vec<_>>();
2532 assert_eq!(archived, vec!["session-1"]);
2533 });
2534 }
2535
2536 #[gpui::test]
2537 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2538 init_test(cx);
2539
2540 cx.run_until_parked();
2541
2542 cx.update(|cx| {
2543 let store = ThreadMetadataStore::global(cx);
2544 store.update(cx, |store, cx| {
2545 store.archive(&acp::SessionId::new("nonexistent"), None, cx);
2546 });
2547 });
2548
2549 cx.run_until_parked();
2550
2551 cx.update(|cx| {
2552 let store = ThreadMetadataStore::global(cx);
2553 let store = store.read(cx);
2554
2555 assert!(store.is_empty());
2556 assert_eq!(store.entries().count(), 0);
2557 assert_eq!(store.archived_entries().count(), 0);
2558 });
2559 }
2560
2561 #[gpui::test]
2562 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2563 init_test(cx);
2564
2565 let paths = PathList::new(&[Path::new("/project-a")]);
2566 let now = Utc::now();
2567 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2568 let session_id = metadata.session_id.clone();
2569
2570 cx.update(|cx| {
2571 let store = ThreadMetadataStore::global(cx);
2572 store.update(cx, |store, cx| {
2573 store.save(metadata.clone(), cx);
2574 store.archive(&session_id, None, cx);
2575 });
2576 });
2577
2578 cx.run_until_parked();
2579
2580 cx.update(|cx| {
2581 let store = ThreadMetadataStore::global(cx);
2582 let store = store.read(cx);
2583
2584 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2585 pretty_assertions::assert_eq!(
2586 entries,
2587 vec![ThreadMetadata {
2588 archived: true,
2589 ..metadata
2590 }]
2591 );
2592 });
2593 }
2594
2595 #[gpui::test]
2596 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2597 init_test(cx);
2598 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2599
2600 let id = store
2601 .read_with(cx, |store, cx| {
2602 store.create_archived_worktree(
2603 "/tmp/worktree".to_string(),
2604 "/home/user/repo".to_string(),
2605 Some("feature-branch".to_string()),
2606 "staged_aaa".to_string(),
2607 "unstaged_bbb".to_string(),
2608 "original_000".to_string(),
2609 cx,
2610 )
2611 })
2612 .await
2613 .unwrap();
2614
2615 store
2616 .read_with(cx, |store, cx| {
2617 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2618 })
2619 .await
2620 .unwrap();
2621
2622 let worktrees = store
2623 .read_with(cx, |store, cx| {
2624 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2625 })
2626 .await
2627 .unwrap();
2628
2629 assert_eq!(worktrees.len(), 1);
2630 let wt = &worktrees[0];
2631 assert_eq!(wt.id, id);
2632 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2633 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2634 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2635 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2636 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2637 assert_eq!(wt.original_commit_hash, "original_000");
2638 }
2639
2640 #[gpui::test]
2641 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2642 init_test(cx);
2643 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2644
2645 let id = store
2646 .read_with(cx, |store, cx| {
2647 store.create_archived_worktree(
2648 "/tmp/worktree".to_string(),
2649 "/home/user/repo".to_string(),
2650 Some("main".to_string()),
2651 "deadbeef".to_string(),
2652 "deadbeef".to_string(),
2653 "original_000".to_string(),
2654 cx,
2655 )
2656 })
2657 .await
2658 .unwrap();
2659
2660 store
2661 .read_with(cx, |store, cx| {
2662 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2663 })
2664 .await
2665 .unwrap();
2666
2667 store
2668 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2669 .await
2670 .unwrap();
2671
2672 let worktrees = store
2673 .read_with(cx, |store, cx| {
2674 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2675 })
2676 .await
2677 .unwrap();
2678 assert!(worktrees.is_empty());
2679 }
2680
2681 #[gpui::test]
2682 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2683 init_test(cx);
2684 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2685
2686 let id = store
2687 .read_with(cx, |store, cx| {
2688 store.create_archived_worktree(
2689 "/tmp/worktree".to_string(),
2690 "/home/user/repo".to_string(),
2691 None,
2692 "abc123".to_string(),
2693 "abc123".to_string(),
2694 "original_000".to_string(),
2695 cx,
2696 )
2697 })
2698 .await
2699 .unwrap();
2700
2701 store
2702 .read_with(cx, |store, cx| {
2703 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2704 })
2705 .await
2706 .unwrap();
2707
2708 store
2709 .read_with(cx, |store, cx| {
2710 store.link_thread_to_archived_worktree("session-2".to_string(), id, cx)
2711 })
2712 .await
2713 .unwrap();
2714
2715 let wt1 = store
2716 .read_with(cx, |store, cx| {
2717 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2718 })
2719 .await
2720 .unwrap();
2721
2722 let wt2 = store
2723 .read_with(cx, |store, cx| {
2724 store.get_archived_worktrees_for_thread("session-2".to_string(), cx)
2725 })
2726 .await
2727 .unwrap();
2728
2729 assert_eq!(wt1.len(), 1);
2730 assert_eq!(wt2.len(), 1);
2731 assert_eq!(wt1[0].id, wt2[0].id);
2732 }
2733
2734 #[gpui::test]
2735 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
2736 init_test(cx);
2737 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2738
2739 let original_paths = PathList::new(&[
2740 Path::new("/projects/worktree-a"),
2741 Path::new("/projects/worktree-b"),
2742 Path::new("/other/unrelated"),
2743 ]);
2744 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2745
2746 store.update(cx, |store, cx| {
2747 store.save_manually(meta, cx);
2748 });
2749
2750 let replacements = vec![
2751 (
2752 PathBuf::from("/projects/worktree-a"),
2753 PathBuf::from("/restored/worktree-a"),
2754 ),
2755 (
2756 PathBuf::from("/projects/worktree-b"),
2757 PathBuf::from("/restored/worktree-b"),
2758 ),
2759 ];
2760
2761 store.update(cx, |store, cx| {
2762 store.complete_worktree_restore(
2763 &acp::SessionId::new("session-multi"),
2764 &replacements,
2765 cx,
2766 );
2767 });
2768
2769 let entry = store.read_with(cx, |store, _cx| {
2770 store.entry(&acp::SessionId::new("session-multi")).cloned()
2771 });
2772 let entry = entry.unwrap();
2773 let paths = entry.folder_paths().paths();
2774 assert_eq!(paths.len(), 3);
2775 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2776 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2777 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2778 }
2779
2780 #[gpui::test]
2781 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
2782 init_test(cx);
2783 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2784
2785 let original_paths =
2786 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2787 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2788
2789 store.update(cx, |store, cx| {
2790 store.save_manually(meta, cx);
2791 });
2792
2793 let replacements = vec![
2794 (
2795 PathBuf::from("/projects/worktree-a"),
2796 PathBuf::from("/new/worktree-a"),
2797 ),
2798 (
2799 PathBuf::from("/nonexistent/path"),
2800 PathBuf::from("/should/not/appear"),
2801 ),
2802 ];
2803
2804 store.update(cx, |store, cx| {
2805 store.complete_worktree_restore(
2806 &acp::SessionId::new("session-partial"),
2807 &replacements,
2808 cx,
2809 );
2810 });
2811
2812 let entry = store.read_with(cx, |store, _cx| {
2813 store
2814 .entry(&acp::SessionId::new("session-partial"))
2815 .cloned()
2816 });
2817 let entry = entry.unwrap();
2818 let paths = entry.folder_paths().paths();
2819 assert_eq!(paths.len(), 2);
2820 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2821 assert!(paths.contains(&PathBuf::from("/other/path")));
2822 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2823 }
2824
2825 #[gpui::test]
2826 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
2827 init_test(cx);
2828 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2829
2830 let original_paths = PathList::new(&[
2831 Path::new("/projects/worktree-a"),
2832 Path::new("/projects/worktree-b"),
2833 Path::new("/other/unrelated"),
2834 ]);
2835 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2836
2837 store.update(cx, |store, cx| {
2838 store.save_manually(meta, cx);
2839 });
2840
2841 let replacements = vec![
2842 (
2843 PathBuf::from("/projects/worktree-a"),
2844 PathBuf::from("/restored/worktree-a"),
2845 ),
2846 (
2847 PathBuf::from("/projects/worktree-b"),
2848 PathBuf::from("/restored/worktree-b"),
2849 ),
2850 ];
2851
2852 store.update(cx, |store, cx| {
2853 store.update_restored_worktree_paths(
2854 &acp::SessionId::new("session-multi"),
2855 &replacements,
2856 cx,
2857 );
2858 });
2859
2860 let entry = store.read_with(cx, |store, _cx| {
2861 store.entry(&acp::SessionId::new("session-multi")).cloned()
2862 });
2863 let entry = entry.unwrap();
2864 let paths = entry.folder_paths().paths();
2865 assert_eq!(paths.len(), 3);
2866 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2867 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2868 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2869 }
2870
2871 #[gpui::test]
2872 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
2873 init_test(cx);
2874 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2875
2876 let original_paths =
2877 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2878 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2879
2880 store.update(cx, |store, cx| {
2881 store.save_manually(meta, cx);
2882 });
2883
2884 let replacements = vec![
2885 (
2886 PathBuf::from("/projects/worktree-a"),
2887 PathBuf::from("/new/worktree-a"),
2888 ),
2889 (
2890 PathBuf::from("/nonexistent/path"),
2891 PathBuf::from("/should/not/appear"),
2892 ),
2893 ];
2894
2895 store.update(cx, |store, cx| {
2896 store.update_restored_worktree_paths(
2897 &acp::SessionId::new("session-partial"),
2898 &replacements,
2899 cx,
2900 );
2901 });
2902
2903 let entry = store.read_with(cx, |store, _cx| {
2904 store
2905 .entry(&acp::SessionId::new("session-partial"))
2906 .cloned()
2907 });
2908 let entry = entry.unwrap();
2909 let paths = entry.folder_paths().paths();
2910 assert_eq!(paths.len(), 2);
2911 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2912 assert!(paths.contains(&PathBuf::from("/other/path")));
2913 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2914 }
2915
2916 #[gpui::test]
2917 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
2918 init_test(cx);
2919 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2920
2921 let id1 = store
2922 .read_with(cx, |store, cx| {
2923 store.create_archived_worktree(
2924 "/projects/worktree-a".to_string(),
2925 "/home/user/repo".to_string(),
2926 Some("branch-a".to_string()),
2927 "staged_a".to_string(),
2928 "unstaged_a".to_string(),
2929 "original_000".to_string(),
2930 cx,
2931 )
2932 })
2933 .await
2934 .unwrap();
2935
2936 let id2 = store
2937 .read_with(cx, |store, cx| {
2938 store.create_archived_worktree(
2939 "/projects/worktree-b".to_string(),
2940 "/home/user/repo".to_string(),
2941 Some("branch-b".to_string()),
2942 "staged_b".to_string(),
2943 "unstaged_b".to_string(),
2944 "original_000".to_string(),
2945 cx,
2946 )
2947 })
2948 .await
2949 .unwrap();
2950
2951 store
2952 .read_with(cx, |store, cx| {
2953 store.link_thread_to_archived_worktree("session-1".to_string(), id1, cx)
2954 })
2955 .await
2956 .unwrap();
2957
2958 store
2959 .read_with(cx, |store, cx| {
2960 store.link_thread_to_archived_worktree("session-1".to_string(), id2, cx)
2961 })
2962 .await
2963 .unwrap();
2964
2965 let worktrees = store
2966 .read_with(cx, |store, cx| {
2967 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2968 })
2969 .await
2970 .unwrap();
2971
2972 assert_eq!(worktrees.len(), 2);
2973
2974 let paths: Vec<&Path> = worktrees
2975 .iter()
2976 .map(|w| w.worktree_path.as_path())
2977 .collect();
2978 assert!(paths.contains(&Path::new("/projects/worktree-a")));
2979 assert!(paths.contains(&Path::new("/projects/worktree-b")));
2980 }
2981
2982 // ── ThreadWorktreePaths tests ──────────────────────────────────────
2983
2984 /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs.
2985 fn make_worktree_paths(pairs: &[(&str, &str)]) -> ThreadWorktreePaths {
2986 let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs
2987 .iter()
2988 .map(|(m, f)| (Path::new(*m), Path::new(*f)))
2989 .unzip();
2990 ThreadWorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders))
2991 .unwrap()
2992 }
2993
2994 #[test]
2995 fn test_thread_worktree_paths_full_add_then_remove_cycle() {
2996 // Full scenario from the issue:
2997 // 1. Start with linked worktree selectric → zed
2998 // 2. Add cloud
2999 // 3. Remove zed
3000
3001 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3002
3003 // Step 2: add cloud
3004 paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud"));
3005
3006 assert_eq!(paths.ordered_pairs().count(), 2);
3007 assert_eq!(
3008 paths.folder_path_list(),
3009 &PathList::new(&[
3010 Path::new("/worktrees/selectric/zed"),
3011 Path::new("/projects/cloud"),
3012 ])
3013 );
3014 assert_eq!(
3015 paths.main_worktree_path_list(),
3016 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),])
3017 );
3018
3019 // Step 3: remove zed
3020 paths.remove_main_path(Path::new("/projects/zed"));
3021
3022 assert_eq!(paths.ordered_pairs().count(), 1);
3023 assert_eq!(
3024 paths.folder_path_list(),
3025 &PathList::new(&[Path::new("/projects/cloud")])
3026 );
3027 assert_eq!(
3028 paths.main_worktree_path_list(),
3029 &PathList::new(&[Path::new("/projects/cloud")])
3030 );
3031 }
3032
3033 #[test]
3034 fn test_thread_worktree_paths_add_is_idempotent() {
3035 let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]);
3036
3037 paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed"));
3038
3039 assert_eq!(paths.ordered_pairs().count(), 1);
3040 }
3041
3042 #[test]
3043 fn test_thread_worktree_paths_remove_nonexistent_is_noop() {
3044 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3045
3046 paths.remove_main_path(Path::new("/projects/nonexistent"));
3047
3048 assert_eq!(paths.ordered_pairs().count(), 1);
3049 }
3050
3051 #[test]
3052 fn test_thread_worktree_paths_from_path_lists_preserves_association() {
3053 let folder = PathList::new(&[
3054 Path::new("/worktrees/selectric/zed"),
3055 Path::new("/projects/cloud"),
3056 ]);
3057 let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]);
3058
3059 let paths = ThreadWorktreePaths::from_path_lists(main, folder).unwrap();
3060
3061 let pairs: Vec<_> = paths
3062 .ordered_pairs()
3063 .map(|(m, f)| (m.clone(), f.clone()))
3064 .collect();
3065 assert_eq!(pairs.len(), 2);
3066 assert!(pairs.contains(&(
3067 PathBuf::from("/projects/zed"),
3068 PathBuf::from("/worktrees/selectric/zed")
3069 )));
3070 assert!(pairs.contains(&(
3071 PathBuf::from("/projects/cloud"),
3072 PathBuf::from("/projects/cloud")
3073 )));
3074 }
3075
3076 #[test]
3077 fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() {
3078 // Two linked worktrees of the same main repo: the main_worktree_path_list
3079 // deduplicates because PathList stores unique sorted paths, but
3080 // ordered_pairs still has both entries.
3081 let paths = make_worktree_paths(&[
3082 ("/projects/zed", "/worktrees/selectric/zed"),
3083 ("/projects/zed", "/worktrees/feature/zed"),
3084 ]);
3085
3086 // main_worktree_path_list has the duplicate main path twice
3087 // (PathList keeps all entries from its input)
3088 assert_eq!(paths.ordered_pairs().count(), 2);
3089 assert_eq!(
3090 paths.folder_path_list(),
3091 &PathList::new(&[
3092 Path::new("/worktrees/selectric/zed"),
3093 Path::new("/worktrees/feature/zed"),
3094 ])
3095 );
3096 assert_eq!(
3097 paths.main_worktree_path_list(),
3098 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),])
3099 );
3100 }
3101
3102 #[test]
3103 fn test_thread_worktree_paths_mismatched_lengths_returns_error() {
3104 let folder = PathList::new(&[
3105 Path::new("/worktrees/selectric/zed"),
3106 Path::new("/projects/cloud"),
3107 ]);
3108 let main = PathList::new(&[Path::new("/projects/zed")]);
3109
3110 let result = ThreadWorktreePaths::from_path_lists(main, folder);
3111 assert!(result.is_err());
3112 }
3113}