1use std::{path::PathBuf, sync::Arc};
2
3use agent::{ThreadStore, ZED_AGENT_ID};
4use agent_client_protocol as acp;
5use anyhow::Context as _;
6use chrono::{DateTime, Utc};
7use collections::{HashMap, HashSet};
8use db::{
9 kvp::KeyValueStore,
10 sqlez::{
11 bindable::{Bind, Column},
12 domain::Domain,
13 statement::Statement,
14 thread_safe_connection::ThreadSafeConnection,
15 },
16 sqlez_macros::sql,
17};
18use fs::Fs;
19use futures::{FutureExt, future::Shared};
20use gpui::{AppContext as _, Entity, Global, Subscription, Task};
21use project::AgentId;
22pub use project::WorktreePaths;
23use remote::RemoteConnectionOptions;
24use ui::{App, Context, SharedString};
25use util::ResultExt as _;
26use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
27
28use crate::DEFAULT_THREAD_TITLE;
29
30#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, serde::Serialize, serde::Deserialize)]
31pub struct ThreadId(uuid::Uuid);
32
33impl ThreadId {
34 pub fn new() -> Self {
35 Self(uuid::Uuid::new_v4())
36 }
37}
38
39impl Bind for ThreadId {
40 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
41 self.0.bind(statement, start_index)
42 }
43}
44
45impl Column for ThreadId {
46 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
47 let (uuid, next) = Column::column(statement, start_index)?;
48 Ok((ThreadId(uuid), next))
49 }
50}
51
52const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill";
53const THREAD_ID_MIGRATION_KEY: &str = "thread-metadata-thread-id-backfill";
54
55pub fn init(cx: &mut App) {
56 ThreadMetadataStore::init_global(cx);
57 let migration_task = migrate_thread_metadata(cx);
58 migrate_thread_remote_connections(cx, migration_task);
59 migrate_thread_ids(cx);
60}
61
62/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
63/// We skip migrating threads that do not have a project.
64///
65/// TODO: Remove this after N weeks of shipping the sidebar
66fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
67 let store = ThreadMetadataStore::global(cx);
68 let db = store.read(cx).db.clone();
69
70 cx.spawn(async move |cx| {
71 let existing_list = db.list()?;
72 let is_first_migration = existing_list.is_empty();
73 let existing_session_ids: HashSet<Arc<str>> = existing_list
74 .into_iter()
75 .filter_map(|m| m.session_id.map(|s| s.0))
76 .collect();
77
78 let mut to_migrate = store.read_with(cx, |_store, cx| {
79 ThreadStore::global(cx)
80 .read(cx)
81 .entries()
82 .filter_map(|entry| {
83 if existing_session_ids.contains(&entry.id.0) {
84 return None;
85 }
86
87 Some(ThreadMetadata {
88 thread_id: ThreadId::new(),
89 session_id: Some(entry.id),
90 agent_id: ZED_AGENT_ID.clone(),
91 title: if entry.title.is_empty()
92 || entry.title.as_ref() == DEFAULT_THREAD_TITLE
93 {
94 None
95 } else {
96 Some(entry.title)
97 },
98 updated_at: entry.updated_at,
99 created_at: entry.created_at,
100 worktree_paths: WorktreePaths::from_folder_paths(&entry.folder_paths),
101 remote_connection: None,
102 archived: true,
103 })
104 })
105 .collect::<Vec<_>>()
106 });
107
108 if to_migrate.is_empty() {
109 return anyhow::Ok(());
110 }
111
112 // On the first migration (no entries in DB yet), keep the 5 most
113 // recent threads per project unarchived.
114 if is_first_migration {
115 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
116 for entry in &mut to_migrate {
117 if entry.worktree_paths.is_empty() {
118 continue;
119 }
120 per_project
121 .entry(entry.worktree_paths.folder_path_list().clone())
122 .or_default()
123 .push(entry);
124 }
125 for entries in per_project.values_mut() {
126 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
127 for entry in entries.iter_mut().take(5) {
128 entry.archived = false;
129 }
130 }
131 }
132
133 log::info!("Migrating {} thread store entries", to_migrate.len());
134
135 // Manually save each entry to the database and call reload, otherwise
136 // we'll end up triggering lots of reloads after each save
137 for entry in to_migrate {
138 db.save(entry).await?;
139 }
140
141 log::info!("Finished migrating thread store entries");
142
143 let _ = store.update(cx, |store, cx| store.reload(cx));
144 anyhow::Ok(())
145 })
146}
147
148fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task<anyhow::Result<()>>) {
149 let store = ThreadMetadataStore::global(cx);
150 let db = store.read(cx).db.clone();
151 let kvp = KeyValueStore::global(cx);
152 let workspace_db = WorkspaceDb::global(cx);
153 let fs = <dyn Fs>::global(cx);
154
155 cx.spawn(async move |cx| -> anyhow::Result<()> {
156 migration_task.await?;
157
158 if kvp
159 .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)?
160 .is_some()
161 {
162 return Ok(());
163 }
164
165 let recent_workspaces = workspace_db.recent_workspaces_on_disk(fs.as_ref()).await?;
166
167 let mut local_path_lists = HashSet::<PathList>::default();
168 let mut remote_path_lists = HashMap::<PathList, RemoteConnectionOptions>::default();
169
170 recent_workspaces
171 .iter()
172 .filter(|(_, location, path_list, _)| {
173 !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local)
174 })
175 .for_each(|(_, _, path_list, _)| {
176 local_path_lists.insert(path_list.clone());
177 });
178
179 for (_, location, path_list, _) in recent_workspaces {
180 match location {
181 SerializedWorkspaceLocation::Remote(remote_connection)
182 if !local_path_lists.contains(&path_list) =>
183 {
184 remote_path_lists
185 .entry(path_list)
186 .or_insert(remote_connection);
187 }
188 _ => {}
189 }
190 }
191
192 let mut reloaded = false;
193 for metadata in db.list()? {
194 if metadata.remote_connection.is_some() {
195 continue;
196 }
197
198 if let Some(remote_connection) = remote_path_lists
199 .get(metadata.folder_paths())
200 .or_else(|| remote_path_lists.get(metadata.main_worktree_paths()))
201 {
202 db.save(ThreadMetadata {
203 remote_connection: Some(remote_connection.clone()),
204 ..metadata
205 })
206 .await?;
207 reloaded = true;
208 }
209 }
210
211 let reloaded_task = reloaded
212 .then_some(store.update(cx, |store, cx| store.reload(cx)))
213 .unwrap_or(Task::ready(()).shared());
214
215 kvp.write_kvp(
216 THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(),
217 "1".to_string(),
218 )
219 .await?;
220 reloaded_task.await;
221
222 Ok(())
223 })
224 .detach_and_log_err(cx);
225}
226
227fn migrate_thread_ids(cx: &mut App) {
228 let store = ThreadMetadataStore::global(cx);
229 let db = store.read(cx).db.clone();
230 let kvp = KeyValueStore::global(cx);
231
232 cx.spawn(async move |cx| -> anyhow::Result<()> {
233 if kvp.read_kvp(THREAD_ID_MIGRATION_KEY)?.is_some() {
234 return Ok(());
235 }
236
237 let mut reloaded = false;
238 for metadata in db.list()? {
239 db.save(metadata).await?;
240 reloaded = true;
241 }
242
243 let reloaded_task = reloaded
244 .then_some(store.update(cx, |store, cx| store.reload(cx)))
245 .unwrap_or(Task::ready(()).shared());
246
247 kvp.write_kvp(THREAD_ID_MIGRATION_KEY.to_string(), "1".to_string())
248 .await?;
249 reloaded_task.await;
250
251 Ok(())
252 })
253 .detach_and_log_err(cx);
254}
255
256struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
257impl Global for GlobalThreadMetadataStore {}
258
259/// Lightweight metadata for any thread (native or ACP), enough to populate
260/// the sidebar list and route to the correct load path when clicked.
261#[derive(Debug, Clone, PartialEq)]
262pub struct ThreadMetadata {
263 pub thread_id: ThreadId,
264 pub session_id: Option<acp::SessionId>,
265 pub agent_id: AgentId,
266 pub title: Option<SharedString>,
267 pub updated_at: DateTime<Utc>,
268 pub created_at: Option<DateTime<Utc>>,
269 pub worktree_paths: WorktreePaths,
270 pub remote_connection: Option<RemoteConnectionOptions>,
271 pub archived: bool,
272}
273
274impl ThreadMetadata {
275 pub fn new_draft(
276 thread_id: ThreadId,
277 agent_id: AgentId,
278 title: Option<SharedString>,
279 worktree_paths: WorktreePaths,
280 remote_connection: Option<RemoteConnectionOptions>,
281 ) -> Self {
282 let now = Utc::now();
283 Self {
284 thread_id,
285 session_id: None,
286 agent_id,
287 title,
288 updated_at: now,
289 created_at: Some(now),
290 worktree_paths: worktree_paths.clone(),
291 remote_connection,
292 archived: worktree_paths.is_empty(),
293 }
294 }
295
296 pub fn is_draft(&self) -> bool {
297 self.session_id.is_none()
298 }
299
300 pub fn display_title(&self) -> SharedString {
301 self.title
302 .clone()
303 .unwrap_or_else(|| crate::DEFAULT_THREAD_TITLE.into())
304 }
305
306 pub fn folder_paths(&self) -> &PathList {
307 self.worktree_paths.folder_path_list()
308 }
309 pub fn main_worktree_paths(&self) -> &PathList {
310 self.worktree_paths.main_worktree_path_list()
311 }
312}
313
314impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
315 fn from(meta: &ThreadMetadata) -> Self {
316 let session_id = meta
317 .session_id
318 .clone()
319 .unwrap_or_else(|| acp::SessionId::new(meta.thread_id.0.to_string()));
320 Self {
321 session_id,
322 work_dirs: Some(meta.folder_paths().clone()),
323 title: meta.title.clone(),
324 updated_at: Some(meta.updated_at),
325 created_at: meta.created_at,
326 meta: None,
327 }
328 }
329}
330
331/// Record of a git worktree that was archived (deleted from disk) when its
332/// last thread was archived.
333pub struct ArchivedGitWorktree {
334 /// Auto-incrementing primary key.
335 pub id: i64,
336 /// Absolute path to the directory of the worktree before it was deleted.
337 /// Used when restoring, to put the recreated worktree back where it was.
338 /// If the path already exists on disk, the worktree is assumed to be
339 /// already restored and is used as-is.
340 pub worktree_path: PathBuf,
341 /// Absolute path of the main repository ("main worktree") that owned this worktree.
342 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
343 /// If the main repo isn't found on disk, unarchiving fails because we only store
344 /// commit hashes, and without the actual git repo being available, we can't restore
345 /// the files.
346 pub main_repo_path: PathBuf,
347 /// Branch that was checked out in the worktree at archive time. `None` if
348 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
349 /// could happen if the user made a detached one outside of Zed.
350 /// On restore, we try to switch to this branch. If that fails (e.g. it's
351 /// checked out elsewhere), we auto-generate a new one.
352 pub branch_name: Option<String>,
353 /// SHA of the WIP commit that captures files that were staged (but not yet
354 /// committed) at the time of archiving. This commit can be empty if the
355 /// user had no staged files at the time. It sits directly on top of whatever
356 /// the user's last actual commit was.
357 pub staged_commit_hash: String,
358 /// SHA of the WIP commit that captures files that were unstaged (including
359 /// untracked) at the time of archiving. This commit can be empty if the user
360 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
361 /// After doing `git reset` past both of these commits, we're back in the state
362 /// we had before archiving, including what was staged, what was unstaged, and
363 /// what was committed.
364 pub unstaged_commit_hash: String,
365 /// SHA of the commit that HEAD pointed at before we created the two WIP
366 /// commits during archival. After resetting past the WIP commits during
367 /// restore, HEAD should land back on this commit. It also serves as a
368 /// pre-restore sanity check (abort if this commit no longer exists in the
369 /// repo) and as a fallback target if the WIP resets fail.
370 pub original_commit_hash: String,
371}
372
373/// The store holds all metadata needed to show threads in the sidebar/the archive.
374///
375/// Listens to ConversationView events and updates metadata when the root thread changes.
376pub struct ThreadMetadataStore {
377 db: ThreadMetadataDb,
378 threads: HashMap<ThreadId, ThreadMetadata>,
379 threads_by_paths: HashMap<PathList, HashSet<ThreadId>>,
380 threads_by_main_paths: HashMap<PathList, HashSet<ThreadId>>,
381 threads_by_session: HashMap<acp::SessionId, ThreadId>,
382 reload_task: Option<Shared<Task<()>>>,
383 conversation_subscriptions: HashMap<gpui::EntityId, Subscription>,
384 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
385 in_flight_archives: HashMap<ThreadId, (Task<()>, smol::channel::Sender<()>)>,
386 _db_operations_task: Task<()>,
387}
388
389#[derive(Debug, PartialEq)]
390enum DbOperation {
391 Upsert(ThreadMetadata),
392 Delete(ThreadId),
393}
394
395impl DbOperation {
396 fn id(&self) -> ThreadId {
397 match self {
398 DbOperation::Upsert(thread) => thread.thread_id,
399 DbOperation::Delete(thread_id) => *thread_id,
400 }
401 }
402}
403
404/// Override for the test DB name used by `ThreadMetadataStore::init_global`.
405/// When set as a GPUI global, `init_global` uses this name instead of
406/// deriving one from the thread name. This prevents data from leaking
407/// across proptest cases that share a thread name.
408#[cfg(any(test, feature = "test-support"))]
409pub struct TestMetadataDbName(pub String);
410#[cfg(any(test, feature = "test-support"))]
411impl gpui::Global for TestMetadataDbName {}
412
413#[cfg(any(test, feature = "test-support"))]
414impl TestMetadataDbName {
415 pub fn global(cx: &App) -> String {
416 cx.try_global::<Self>()
417 .map(|g| g.0.clone())
418 .unwrap_or_else(|| {
419 let thread = std::thread::current();
420 let test_name = thread.name().unwrap_or("unknown_test");
421 format!("THREAD_METADATA_DB_{}", test_name)
422 })
423 }
424}
425
426impl ThreadMetadataStore {
427 #[cfg(not(any(test, feature = "test-support")))]
428 pub fn init_global(cx: &mut App) {
429 if cx.has_global::<Self>() {
430 return;
431 }
432
433 let db = ThreadMetadataDb::global(cx);
434 let thread_store = cx.new(|cx| Self::new(db, cx));
435 cx.set_global(GlobalThreadMetadataStore(thread_store));
436 }
437
438 #[cfg(any(test, feature = "test-support"))]
439 pub fn init_global(cx: &mut App) {
440 let db_name = TestMetadataDbName::global(cx);
441 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
442 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
443 cx.set_global(GlobalThreadMetadataStore(thread_store));
444 }
445
446 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
447 cx.try_global::<GlobalThreadMetadataStore>()
448 .map(|store| store.0.clone())
449 }
450
451 pub fn global(cx: &App) -> Entity<Self> {
452 cx.global::<GlobalThreadMetadataStore>().0.clone()
453 }
454
455 pub fn is_empty(&self) -> bool {
456 self.threads.is_empty()
457 }
458
459 /// Returns all thread IDs.
460 pub fn entry_ids(&self) -> impl Iterator<Item = ThreadId> + '_ {
461 self.threads.keys().copied()
462 }
463
464 /// Returns the metadata for a specific thread, if it exists.
465 pub fn entry(&self, thread_id: ThreadId) -> Option<&ThreadMetadata> {
466 self.threads.get(&thread_id)
467 }
468
469 /// Returns the metadata for a thread identified by its ACP session ID.
470 pub fn entry_by_session(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
471 let thread_id = self.threads_by_session.get(session_id)?;
472 self.threads.get(thread_id)
473 }
474
475 /// Returns all threads.
476 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
477 self.threads.values()
478 }
479
480 /// Returns all archived threads.
481 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
482 self.entries().filter(|t| t.archived)
483 }
484
485 /// Returns all threads for the given path list, excluding archived threads.
486 pub fn entries_for_path(
487 &self,
488 path_list: &PathList,
489 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
490 self.threads_by_paths
491 .get(path_list)
492 .into_iter()
493 .flatten()
494 .filter_map(|s| self.threads.get(s))
495 .filter(|s| !s.archived)
496 }
497
498 /// Returns threads whose `main_worktree_paths` matches the given path list,
499 /// excluding archived threads. This finds threads that were opened in a
500 /// linked worktree but are associated with the given main worktree.
501 pub fn entries_for_main_worktree_path(
502 &self,
503 path_list: &PathList,
504 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
505 self.threads_by_main_paths
506 .get(path_list)
507 .into_iter()
508 .flatten()
509 .filter_map(|s| self.threads.get(s))
510 .filter(|s| !s.archived)
511 }
512
513 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
514 let db = self.db.clone();
515 self.reload_task.take();
516
517 let list_task = cx
518 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
519
520 let reload_task = cx
521 .spawn(async move |this, cx| {
522 let Some(rows) = list_task.await.log_err() else {
523 return;
524 };
525
526 this.update(cx, |this, cx| {
527 this.threads.clear();
528 this.threads_by_paths.clear();
529 this.threads_by_main_paths.clear();
530 this.threads_by_session.clear();
531
532 for row in rows {
533 if let Some(sid) = &row.session_id {
534 this.threads_by_session.insert(sid.clone(), row.thread_id);
535 }
536 this.threads_by_paths
537 .entry(row.folder_paths().clone())
538 .or_default()
539 .insert(row.thread_id);
540 if !row.main_worktree_paths().is_empty() {
541 this.threads_by_main_paths
542 .entry(row.main_worktree_paths().clone())
543 .or_default()
544 .insert(row.thread_id);
545 }
546 this.threads.insert(row.thread_id, row);
547 }
548
549 cx.notify();
550 })
551 .ok();
552 })
553 .shared();
554 self.reload_task = Some(reload_task.clone());
555 reload_task
556 }
557
558 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
559 for metadata in metadata {
560 self.save_internal(metadata);
561 }
562 cx.notify();
563 }
564
565 pub fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
566 self.save_internal(metadata);
567 cx.notify();
568 }
569
570 fn save_internal(&mut self, metadata: ThreadMetadata) {
571 if let Some(thread) = self.threads.get(&metadata.thread_id) {
572 if thread.folder_paths() != metadata.folder_paths() {
573 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
574 thread_ids.remove(&metadata.thread_id);
575 }
576 }
577 if thread.main_worktree_paths() != metadata.main_worktree_paths()
578 && !thread.main_worktree_paths().is_empty()
579 {
580 if let Some(thread_ids) = self
581 .threads_by_main_paths
582 .get_mut(thread.main_worktree_paths())
583 {
584 thread_ids.remove(&metadata.thread_id);
585 }
586 }
587 }
588
589 if let Some(sid) = &metadata.session_id {
590 self.threads_by_session
591 .insert(sid.clone(), metadata.thread_id);
592 }
593
594 self.threads.insert(metadata.thread_id, metadata.clone());
595
596 self.threads_by_paths
597 .entry(metadata.folder_paths().clone())
598 .or_default()
599 .insert(metadata.thread_id);
600
601 if !metadata.main_worktree_paths().is_empty() {
602 self.threads_by_main_paths
603 .entry(metadata.main_worktree_paths().clone())
604 .or_default()
605 .insert(metadata.thread_id);
606 }
607
608 self.pending_thread_ops_tx
609 .try_send(DbOperation::Upsert(metadata))
610 .log_err();
611 }
612
613 pub fn update_working_directories(
614 &mut self,
615 thread_id: ThreadId,
616 work_dirs: PathList,
617 cx: &mut Context<Self>,
618 ) {
619 if let Some(thread) = self.threads.get(&thread_id) {
620 self.save_internal(ThreadMetadata {
621 worktree_paths: WorktreePaths::from_path_lists(
622 thread.main_worktree_paths().clone(),
623 work_dirs.clone(),
624 )
625 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&work_dirs)),
626 ..thread.clone()
627 });
628 cx.notify();
629 }
630 }
631
632 pub fn update_worktree_paths(
633 &mut self,
634 thread_ids: &[ThreadId],
635 worktree_paths: WorktreePaths,
636 cx: &mut Context<Self>,
637 ) {
638 let mut changed = false;
639 for &thread_id in thread_ids {
640 let Some(thread) = self.threads.get(&thread_id) else {
641 continue;
642 };
643 if thread.worktree_paths == worktree_paths {
644 continue;
645 }
646 self.save_internal(ThreadMetadata {
647 worktree_paths: worktree_paths.clone(),
648 ..thread.clone()
649 });
650 changed = true;
651 }
652 if changed {
653 cx.notify();
654 }
655 }
656
657 pub fn archive(
658 &mut self,
659 thread_id: ThreadId,
660 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
661 cx: &mut Context<Self>,
662 ) {
663 self.update_archived(thread_id, true, cx);
664
665 if let Some(job) = archive_job {
666 self.in_flight_archives.insert(thread_id, job);
667 }
668 }
669
670 pub fn unarchive(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
671 self.update_archived(thread_id, false, cx);
672 // Dropping the Sender triggers cancellation in the background task.
673 self.in_flight_archives.remove(&thread_id);
674 }
675
676 pub fn cleanup_completed_archive(&mut self, thread_id: ThreadId) {
677 self.in_flight_archives.remove(&thread_id);
678 }
679
680 /// Updates a thread's `folder_paths` after an archived worktree has been
681 /// restored to disk. The restored worktree may land at a different path
682 /// than it had before archival, so each `(old_path, new_path)` pair in
683 /// `path_replacements` is applied to the thread's stored folder paths.
684 pub fn update_restored_worktree_paths(
685 &mut self,
686 thread_id: ThreadId,
687 path_replacements: &[(PathBuf, PathBuf)],
688 cx: &mut Context<Self>,
689 ) {
690 if let Some(thread) = self.threads.get(&thread_id).cloned() {
691 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
692 for (old_path, new_path) in path_replacements {
693 if let Some(pos) = paths.iter().position(|p| p == old_path) {
694 paths[pos] = new_path.clone();
695 }
696 }
697 let new_folder_paths = PathList::new(&paths);
698 self.save_internal(ThreadMetadata {
699 worktree_paths: WorktreePaths::from_path_lists(
700 thread.main_worktree_paths().clone(),
701 new_folder_paths.clone(),
702 )
703 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
704 ..thread
705 });
706 cx.notify();
707 }
708 }
709
710 pub fn complete_worktree_restore(
711 &mut self,
712 thread_id: ThreadId,
713 path_replacements: &[(PathBuf, PathBuf)],
714 cx: &mut Context<Self>,
715 ) {
716 if let Some(thread) = self.threads.get(&thread_id).cloned() {
717 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
718 for (old_path, new_path) in path_replacements {
719 for path in &mut paths {
720 if path == old_path {
721 *path = new_path.clone();
722 }
723 }
724 }
725 let new_folder_paths = PathList::new(&paths);
726 self.save_internal(ThreadMetadata {
727 worktree_paths: WorktreePaths::from_path_lists(
728 thread.main_worktree_paths().clone(),
729 new_folder_paths.clone(),
730 )
731 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
732 ..thread
733 });
734 cx.notify();
735 }
736 }
737
738 /// Apply a mutation to the worktree paths of all threads whose current
739 /// `folder_paths` matches `current_folder_paths`, then re-index.
740 /// When `remote_connection` is provided, only threads with a matching
741 /// remote connection are affected.
742 pub fn change_worktree_paths(
743 &mut self,
744 current_folder_paths: &PathList,
745 remote_connection: Option<&RemoteConnectionOptions>,
746 mutate: impl Fn(&mut WorktreePaths),
747 cx: &mut Context<Self>,
748 ) {
749 let thread_ids: Vec<_> = self
750 .threads_by_paths
751 .get(current_folder_paths)
752 .into_iter()
753 .flatten()
754 .filter(|id| {
755 remote_connection.is_none()
756 || self
757 .threads
758 .get(id)
759 .and_then(|t| t.remote_connection.as_ref())
760 == remote_connection
761 })
762 .copied()
763 .collect();
764
765 self.mutate_thread_paths(&thread_ids, mutate, cx);
766 }
767
768 /// Like `change_worktree_paths`, but looks up threads by their
769 /// `main_worktree_paths` instead of `folder_paths`. Used when
770 /// migrating threads for project group key changes where the
771 /// lookup key is the group key's main paths.
772 /// When `remote_connection` is provided, only threads with a matching
773 /// remote connection are affected.
774 pub fn change_worktree_paths_by_main(
775 &mut self,
776 current_main_paths: &PathList,
777 remote_connection: Option<&RemoteConnectionOptions>,
778 mutate: impl Fn(&mut WorktreePaths),
779 cx: &mut Context<Self>,
780 ) {
781 let thread_ids: Vec<_> = self
782 .threads_by_main_paths
783 .get(current_main_paths)
784 .into_iter()
785 .flatten()
786 .filter(|id| {
787 remote_connection.is_none()
788 || self
789 .threads
790 .get(id)
791 .and_then(|t| t.remote_connection.as_ref())
792 == remote_connection
793 })
794 .copied()
795 .collect();
796
797 self.mutate_thread_paths(&thread_ids, mutate, cx);
798 }
799
800 fn mutate_thread_paths(
801 &mut self,
802 thread_ids: &[ThreadId],
803 mutate: impl Fn(&mut WorktreePaths),
804 cx: &mut Context<Self>,
805 ) {
806 if thread_ids.is_empty() {
807 return;
808 }
809
810 for thread_id in thread_ids {
811 if let Some(thread) = self.threads.get_mut(thread_id) {
812 if let Some(ids) = self
813 .threads_by_main_paths
814 .get_mut(thread.main_worktree_paths())
815 {
816 ids.remove(thread_id);
817 }
818 if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
819 ids.remove(thread_id);
820 }
821
822 mutate(&mut thread.worktree_paths);
823
824 self.threads_by_main_paths
825 .entry(thread.main_worktree_paths().clone())
826 .or_default()
827 .insert(*thread_id);
828 self.threads_by_paths
829 .entry(thread.folder_paths().clone())
830 .or_default()
831 .insert(*thread_id);
832
833 self.pending_thread_ops_tx
834 .try_send(DbOperation::Upsert(thread.clone()))
835 .log_err();
836 }
837 }
838
839 cx.notify();
840 }
841
842 pub fn create_archived_worktree(
843 &self,
844 worktree_path: String,
845 main_repo_path: String,
846 branch_name: Option<String>,
847 staged_commit_hash: String,
848 unstaged_commit_hash: String,
849 original_commit_hash: String,
850 cx: &App,
851 ) -> Task<anyhow::Result<i64>> {
852 let db = self.db.clone();
853 cx.background_spawn(async move {
854 db.create_archived_worktree(
855 worktree_path,
856 main_repo_path,
857 branch_name,
858 staged_commit_hash,
859 unstaged_commit_hash,
860 original_commit_hash,
861 )
862 .await
863 })
864 }
865
866 pub fn link_thread_to_archived_worktree(
867 &self,
868 thread_id: ThreadId,
869 archived_worktree_id: i64,
870 cx: &App,
871 ) -> Task<anyhow::Result<()>> {
872 let db = self.db.clone();
873 cx.background_spawn(async move {
874 db.link_thread_to_archived_worktree(thread_id, archived_worktree_id)
875 .await
876 })
877 }
878
879 pub fn get_archived_worktrees_for_thread(
880 &self,
881 thread_id: ThreadId,
882 cx: &App,
883 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
884 let db = self.db.clone();
885 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(thread_id).await })
886 }
887
888 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
889 let db = self.db.clone();
890 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
891 }
892
893 pub fn unlink_thread_from_all_archived_worktrees(
894 &self,
895 thread_id: ThreadId,
896 cx: &App,
897 ) -> Task<anyhow::Result<()>> {
898 let db = self.db.clone();
899 cx.background_spawn(async move {
900 db.unlink_thread_from_all_archived_worktrees(thread_id)
901 .await
902 })
903 }
904
905 pub fn is_archived_worktree_referenced(
906 &self,
907 archived_worktree_id: i64,
908 cx: &App,
909 ) -> Task<anyhow::Result<bool>> {
910 let db = self.db.clone();
911 cx.background_spawn(async move {
912 db.is_archived_worktree_referenced(archived_worktree_id)
913 .await
914 })
915 }
916
917 fn update_archived(&mut self, thread_id: ThreadId, archived: bool, cx: &mut Context<Self>) {
918 if let Some(thread) = self.threads.get(&thread_id) {
919 self.save_internal(ThreadMetadata {
920 archived,
921 ..thread.clone()
922 });
923 cx.notify();
924 }
925 }
926
927 pub fn delete(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
928 if let Some(thread) = self.threads.get(&thread_id) {
929 if let Some(sid) = &thread.session_id {
930 self.threads_by_session.remove(sid);
931 }
932 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
933 thread_ids.remove(&thread_id);
934 }
935 if !thread.main_worktree_paths().is_empty() {
936 if let Some(thread_ids) = self
937 .threads_by_main_paths
938 .get_mut(thread.main_worktree_paths())
939 {
940 thread_ids.remove(&thread_id);
941 }
942 }
943 }
944 self.threads.remove(&thread_id);
945 self.pending_thread_ops_tx
946 .try_send(DbOperation::Delete(thread_id))
947 .log_err();
948 cx.notify();
949 }
950
951 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
952 let weak_store = cx.weak_entity();
953
954 cx.observe_new::<crate::ConversationView>(move |_view, _window, cx| {
955 let view_entity = cx.entity();
956 let entity_id = view_entity.entity_id();
957
958 cx.on_release({
959 let weak_store = weak_store.clone();
960 move |_view, cx| {
961 weak_store
962 .update(cx, |store, _cx| {
963 store.conversation_subscriptions.remove(&entity_id);
964 })
965 .ok();
966 }
967 })
968 .detach();
969
970 weak_store
971 .update(cx, |this, cx| {
972 let subscription = cx.subscribe(&view_entity, Self::handle_conversation_event);
973 this.conversation_subscriptions
974 .insert(entity_id, subscription);
975 })
976 .ok();
977 })
978 .detach();
979
980 let (tx, rx) = smol::channel::unbounded();
981 let _db_operations_task = cx.background_spawn({
982 let db = db.clone();
983 async move {
984 while let Ok(first_update) = rx.recv().await {
985 let mut updates = vec![first_update];
986 while let Ok(update) = rx.try_recv() {
987 updates.push(update);
988 }
989 let updates = Self::dedup_db_operations(updates);
990 for operation in updates {
991 match operation {
992 DbOperation::Upsert(metadata) => {
993 db.save(metadata).await.log_err();
994 }
995 DbOperation::Delete(thread_id) => {
996 db.delete(thread_id).await.log_err();
997 }
998 }
999 }
1000 }
1001 }
1002 });
1003
1004 let mut this = Self {
1005 db,
1006 threads: HashMap::default(),
1007 threads_by_paths: HashMap::default(),
1008 threads_by_main_paths: HashMap::default(),
1009 threads_by_session: HashMap::default(),
1010 reload_task: None,
1011 conversation_subscriptions: HashMap::default(),
1012 pending_thread_ops_tx: tx,
1013 in_flight_archives: HashMap::default(),
1014 _db_operations_task,
1015 };
1016 let _ = this.reload(cx);
1017 this
1018 }
1019
1020 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
1021 let mut ops = HashMap::default();
1022 for operation in operations.into_iter().rev() {
1023 if ops.contains_key(&operation.id()) {
1024 continue;
1025 }
1026 ops.insert(operation.id(), operation);
1027 }
1028 ops.into_values().collect()
1029 }
1030
1031 fn handle_conversation_event(
1032 &mut self,
1033 conversation_view: Entity<crate::ConversationView>,
1034 _event: &crate::conversation_view::RootThreadUpdated,
1035 cx: &mut Context<Self>,
1036 ) {
1037 let view = conversation_view.read(cx);
1038 let thread_id = view.thread_id;
1039 let Some(thread) = view.root_acp_thread(cx) else {
1040 return;
1041 };
1042
1043 let thread_ref = thread.read(cx);
1044 if thread_ref.entries().is_empty() {
1045 return;
1046 }
1047
1048 let existing_thread = self.entry(thread_id);
1049 let session_id = Some(thread_ref.session_id().clone());
1050 let title = thread_ref.title();
1051
1052 let updated_at = Utc::now();
1053
1054 let created_at = existing_thread
1055 .and_then(|t| t.created_at)
1056 .unwrap_or_else(|| updated_at);
1057
1058 let agent_id = thread_ref.connection().agent_id();
1059
1060 let project = thread_ref.project().read(cx);
1061 let worktree_paths = project.worktree_paths(cx);
1062
1063 let remote_connection = project.remote_connection_options(cx);
1064
1065 // Threads without a folder path (e.g. started in an empty
1066 // window) are archived by default so they don't get lost,
1067 // because they won't show up in the sidebar. Users can reload
1068 // them from the archive.
1069 let archived = existing_thread
1070 .map(|t| t.archived)
1071 .unwrap_or(worktree_paths.is_empty());
1072
1073 let metadata = ThreadMetadata {
1074 thread_id,
1075 session_id,
1076 agent_id,
1077 title,
1078 created_at: Some(created_at),
1079 updated_at,
1080 worktree_paths,
1081 remote_connection,
1082 archived,
1083 };
1084
1085 self.save(metadata, cx);
1086 }
1087}
1088
1089impl Global for ThreadMetadataStore {}
1090
1091struct ThreadMetadataDb(ThreadSafeConnection);
1092
1093impl Domain for ThreadMetadataDb {
1094 const NAME: &str = stringify!(ThreadMetadataDb);
1095
1096 const MIGRATIONS: &[&str] = &[
1097 sql!(
1098 CREATE TABLE IF NOT EXISTS sidebar_threads(
1099 session_id TEXT PRIMARY KEY,
1100 agent_id TEXT,
1101 title TEXT NOT NULL,
1102 updated_at TEXT NOT NULL,
1103 created_at TEXT,
1104 folder_paths TEXT,
1105 folder_paths_order TEXT
1106 ) STRICT;
1107 ),
1108 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
1109 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
1110 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
1111 sql!(
1112 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
1113 id INTEGER PRIMARY KEY,
1114 worktree_path TEXT NOT NULL,
1115 main_repo_path TEXT NOT NULL,
1116 branch_name TEXT,
1117 staged_commit_hash TEXT,
1118 unstaged_commit_hash TEXT,
1119 original_commit_hash TEXT
1120 ) STRICT;
1121
1122 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
1123 session_id TEXT NOT NULL,
1124 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1125 PRIMARY KEY (session_id, archived_worktree_id)
1126 ) STRICT;
1127 ),
1128 sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT),
1129 sql!(ALTER TABLE sidebar_threads ADD COLUMN thread_id BLOB),
1130 sql!(
1131 UPDATE sidebar_threads SET thread_id = randomblob(16) WHERE thread_id IS NULL;
1132
1133 CREATE TABLE thread_archived_worktrees_v2(
1134 thread_id BLOB NOT NULL,
1135 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1136 PRIMARY KEY (thread_id, archived_worktree_id)
1137 ) STRICT;
1138
1139 INSERT INTO thread_archived_worktrees_v2(thread_id, archived_worktree_id)
1140 SELECT s.thread_id, t.archived_worktree_id
1141 FROM thread_archived_worktrees t
1142 JOIN sidebar_threads s ON s.session_id = t.session_id;
1143
1144 DROP TABLE thread_archived_worktrees;
1145 ALTER TABLE thread_archived_worktrees_v2 RENAME TO thread_archived_worktrees;
1146
1147 CREATE TABLE sidebar_threads_v2(
1148 thread_id BLOB PRIMARY KEY,
1149 session_id TEXT,
1150 agent_id TEXT,
1151 title TEXT NOT NULL,
1152 updated_at TEXT NOT NULL,
1153 created_at TEXT,
1154 folder_paths TEXT,
1155 folder_paths_order TEXT,
1156 archived INTEGER DEFAULT 0,
1157 main_worktree_paths TEXT,
1158 main_worktree_paths_order TEXT,
1159 remote_connection TEXT
1160 ) STRICT;
1161
1162 INSERT INTO sidebar_threads_v2(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection)
1163 SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection
1164 FROM sidebar_threads;
1165
1166 DROP TABLE sidebar_threads;
1167 ALTER TABLE sidebar_threads_v2 RENAME TO sidebar_threads;
1168 ),
1169 ];
1170}
1171
1172db::static_connection!(ThreadMetadataDb, []);
1173
1174impl ThreadMetadataDb {
1175 #[allow(dead_code)]
1176 pub fn list_ids(&self) -> anyhow::Result<Vec<ThreadId>> {
1177 self.select::<ThreadId>(
1178 "SELECT thread_id FROM sidebar_threads \
1179 ORDER BY updated_at DESC",
1180 )?()
1181 }
1182
1183 /// List all sidebar thread metadata, ordered by updated_at descending.
1184 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
1185 self.select::<ThreadMetadata>(
1186 "SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection \
1187 FROM sidebar_threads \
1188 ORDER BY updated_at DESC"
1189 )?()
1190 }
1191
1192 /// Upsert metadata for a thread.
1193 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
1194 let session_id = row.session_id.as_ref().map(|s| s.0.clone());
1195 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
1196 None
1197 } else {
1198 Some(row.agent_id.to_string())
1199 };
1200 let title = row
1201 .title
1202 .as_ref()
1203 .map(|t| t.to_string())
1204 .unwrap_or_default();
1205 let updated_at = row.updated_at.to_rfc3339();
1206 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
1207 let serialized = row.folder_paths().serialize();
1208 let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
1209 (None, None)
1210 } else {
1211 (Some(serialized.paths), Some(serialized.order))
1212 };
1213 let main_serialized = row.main_worktree_paths().serialize();
1214 let (main_worktree_paths, main_worktree_paths_order) =
1215 if row.main_worktree_paths().is_empty() {
1216 (None, None)
1217 } else {
1218 (Some(main_serialized.paths), Some(main_serialized.order))
1219 };
1220 let remote_connection = row
1221 .remote_connection
1222 .as_ref()
1223 .map(serde_json::to_string)
1224 .transpose()
1225 .context("serialize thread metadata remote connection")?;
1226 let thread_id = row.thread_id;
1227 let archived = row.archived;
1228
1229 self.write(move |conn| {
1230 let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
1231 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12) \
1232 ON CONFLICT(thread_id) DO UPDATE SET \
1233 session_id = excluded.session_id, \
1234 agent_id = excluded.agent_id, \
1235 title = excluded.title, \
1236 updated_at = excluded.updated_at, \
1237 created_at = excluded.created_at, \
1238 folder_paths = excluded.folder_paths, \
1239 folder_paths_order = excluded.folder_paths_order, \
1240 archived = excluded.archived, \
1241 main_worktree_paths = excluded.main_worktree_paths, \
1242 main_worktree_paths_order = excluded.main_worktree_paths_order, \
1243 remote_connection = excluded.remote_connection";
1244 let mut stmt = Statement::prepare(conn, sql)?;
1245 let mut i = stmt.bind(&thread_id, 1)?;
1246 i = stmt.bind(&session_id, i)?;
1247 i = stmt.bind(&agent_id, i)?;
1248 i = stmt.bind(&title, i)?;
1249 i = stmt.bind(&updated_at, i)?;
1250 i = stmt.bind(&created_at, i)?;
1251 i = stmt.bind(&folder_paths, i)?;
1252 i = stmt.bind(&folder_paths_order, i)?;
1253 i = stmt.bind(&archived, i)?;
1254 i = stmt.bind(&main_worktree_paths, i)?;
1255 i = stmt.bind(&main_worktree_paths_order, i)?;
1256 stmt.bind(&remote_connection, i)?;
1257 stmt.exec()
1258 })
1259 .await
1260 }
1261
1262 /// Delete metadata for a single thread.
1263 pub async fn delete(&self, thread_id: ThreadId) -> anyhow::Result<()> {
1264 self.write(move |conn| {
1265 let mut stmt =
1266 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE thread_id = ?")?;
1267 stmt.bind(&thread_id, 1)?;
1268 stmt.exec()
1269 })
1270 .await
1271 }
1272
1273 pub async fn create_archived_worktree(
1274 &self,
1275 worktree_path: String,
1276 main_repo_path: String,
1277 branch_name: Option<String>,
1278 staged_commit_hash: String,
1279 unstaged_commit_hash: String,
1280 original_commit_hash: String,
1281 ) -> anyhow::Result<i64> {
1282 self.write(move |conn| {
1283 let mut stmt = Statement::prepare(
1284 conn,
1285 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
1286 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
1287 RETURNING id",
1288 )?;
1289 let mut i = stmt.bind(&worktree_path, 1)?;
1290 i = stmt.bind(&main_repo_path, i)?;
1291 i = stmt.bind(&branch_name, i)?;
1292 i = stmt.bind(&staged_commit_hash, i)?;
1293 i = stmt.bind(&unstaged_commit_hash, i)?;
1294 stmt.bind(&original_commit_hash, i)?;
1295 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
1296 })
1297 .await
1298 }
1299
1300 pub async fn link_thread_to_archived_worktree(
1301 &self,
1302 thread_id: ThreadId,
1303 archived_worktree_id: i64,
1304 ) -> anyhow::Result<()> {
1305 self.write(move |conn| {
1306 let mut stmt = Statement::prepare(
1307 conn,
1308 "INSERT INTO thread_archived_worktrees(thread_id, archived_worktree_id) \
1309 VALUES (?1, ?2)",
1310 )?;
1311 let i = stmt.bind(&thread_id, 1)?;
1312 stmt.bind(&archived_worktree_id, i)?;
1313 stmt.exec()
1314 })
1315 .await
1316 }
1317
1318 pub async fn get_archived_worktrees_for_thread(
1319 &self,
1320 thread_id: ThreadId,
1321 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
1322 self.select_bound::<ThreadId, ArchivedGitWorktree>(
1323 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
1324 FROM archived_git_worktrees a \
1325 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
1326 WHERE t.thread_id = ?1",
1327 )?(thread_id)
1328 }
1329
1330 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
1331 self.write(move |conn| {
1332 let mut stmt = Statement::prepare(
1333 conn,
1334 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
1335 )?;
1336 stmt.bind(&id, 1)?;
1337 stmt.exec()?;
1338
1339 let mut stmt =
1340 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
1341 stmt.bind(&id, 1)?;
1342 stmt.exec()
1343 })
1344 .await
1345 }
1346
1347 pub async fn unlink_thread_from_all_archived_worktrees(
1348 &self,
1349 thread_id: ThreadId,
1350 ) -> anyhow::Result<()> {
1351 self.write(move |conn| {
1352 let mut stmt = Statement::prepare(
1353 conn,
1354 "DELETE FROM thread_archived_worktrees WHERE thread_id = ?",
1355 )?;
1356 stmt.bind(&thread_id, 1)?;
1357 stmt.exec()
1358 })
1359 .await
1360 }
1361
1362 pub async fn is_archived_worktree_referenced(
1363 &self,
1364 archived_worktree_id: i64,
1365 ) -> anyhow::Result<bool> {
1366 self.select_row_bound::<i64, i64>(
1367 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1368 )?(archived_worktree_id)
1369 .map(|count| count.unwrap_or(0) > 0)
1370 }
1371}
1372
1373impl Column for ThreadMetadata {
1374 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1375 let (thread_id_uuid, next): (uuid::Uuid, i32) = Column::column(statement, start_index)?;
1376 let (id, next): (Option<Arc<str>>, i32) = Column::column(statement, next)?;
1377 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1378 let (title, next): (String, i32) = Column::column(statement, next)?;
1379 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1380 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1381 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1382 let (folder_paths_order_str, next): (Option<String>, i32) =
1383 Column::column(statement, next)?;
1384 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1385 let (main_worktree_paths_str, next): (Option<String>, i32) =
1386 Column::column(statement, next)?;
1387 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1388 Column::column(statement, next)?;
1389 let (remote_connection_json, next): (Option<String>, i32) =
1390 Column::column(statement, next)?;
1391
1392 let agent_id = agent_id
1393 .map(|id| AgentId::new(id))
1394 .unwrap_or(ZED_AGENT_ID.clone());
1395
1396 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1397 let created_at = created_at_str
1398 .as_deref()
1399 .map(DateTime::parse_from_rfc3339)
1400 .transpose()?
1401 .map(|dt| dt.with_timezone(&Utc));
1402
1403 let folder_paths = folder_paths_str
1404 .map(|paths| {
1405 PathList::deserialize(&util::path_list::SerializedPathList {
1406 paths,
1407 order: folder_paths_order_str.unwrap_or_default(),
1408 })
1409 })
1410 .unwrap_or_default();
1411
1412 let main_worktree_paths = main_worktree_paths_str
1413 .map(|paths| {
1414 PathList::deserialize(&util::path_list::SerializedPathList {
1415 paths,
1416 order: main_worktree_paths_order_str.unwrap_or_default(),
1417 })
1418 })
1419 .unwrap_or_default();
1420
1421 let remote_connection = remote_connection_json
1422 .as_deref()
1423 .map(serde_json::from_str::<RemoteConnectionOptions>)
1424 .transpose()
1425 .context("deserialize thread metadata remote connection")?;
1426
1427 let worktree_paths = WorktreePaths::from_path_lists(main_worktree_paths, folder_paths)
1428 .unwrap_or_else(|_| WorktreePaths::default());
1429
1430 let thread_id = ThreadId(thread_id_uuid);
1431
1432 Ok((
1433 ThreadMetadata {
1434 thread_id,
1435 session_id: id.map(acp::SessionId::new),
1436 agent_id,
1437 title: if title.is_empty() || title == DEFAULT_THREAD_TITLE {
1438 None
1439 } else {
1440 Some(title.into())
1441 },
1442 updated_at,
1443 created_at,
1444 worktree_paths,
1445 remote_connection,
1446 archived,
1447 },
1448 next,
1449 ))
1450 }
1451}
1452
1453impl Column for ArchivedGitWorktree {
1454 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1455 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1456 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1457 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1458 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1459 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1460 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1461 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1462
1463 Ok((
1464 ArchivedGitWorktree {
1465 id,
1466 worktree_path: PathBuf::from(worktree_path_str),
1467 main_repo_path: PathBuf::from(main_repo_path_str),
1468 branch_name,
1469 staged_commit_hash,
1470 unstaged_commit_hash,
1471 original_commit_hash,
1472 },
1473 next,
1474 ))
1475 }
1476}
1477
1478#[cfg(test)]
1479mod tests {
1480 use super::*;
1481 use acp_thread::StubAgentConnection;
1482 use action_log::ActionLog;
1483 use agent::DbThread;
1484 use agent_client_protocol as acp;
1485
1486 use gpui::{TestAppContext, VisualTestContext};
1487 use project::FakeFs;
1488 use project::Project;
1489 use remote::WslConnectionOptions;
1490 use std::path::Path;
1491 use std::rc::Rc;
1492 use workspace::MultiWorkspace;
1493
1494 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1495 DbThread {
1496 title: title.to_string().into(),
1497 messages: Vec::new(),
1498 updated_at,
1499 detailed_summary: None,
1500 initial_project_snapshot: None,
1501 cumulative_token_usage: Default::default(),
1502 request_token_usage: Default::default(),
1503 model: None,
1504 profile: None,
1505 imported: false,
1506 subagent_context: None,
1507 speed: None,
1508 thinking_enabled: false,
1509 thinking_effort: None,
1510 draft_prompt: None,
1511 ui_scroll_position: None,
1512 }
1513 }
1514
1515 fn make_metadata(
1516 session_id: &str,
1517 title: &str,
1518 updated_at: DateTime<Utc>,
1519 folder_paths: PathList,
1520 ) -> ThreadMetadata {
1521 ThreadMetadata {
1522 thread_id: ThreadId::new(),
1523 archived: false,
1524 session_id: Some(acp::SessionId::new(session_id)),
1525 agent_id: agent::ZED_AGENT_ID.clone(),
1526 title: if title.is_empty() {
1527 None
1528 } else {
1529 Some(title.to_string().into())
1530 },
1531 updated_at,
1532 created_at: Some(updated_at),
1533 worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
1534 remote_connection: None,
1535 }
1536 }
1537
1538 fn init_test(cx: &mut TestAppContext) {
1539 let fs = FakeFs::new(cx.executor());
1540 cx.update(|cx| {
1541 let settings_store = settings::SettingsStore::test(cx);
1542 cx.set_global(settings_store);
1543 theme_settings::init(theme::LoadThemes::JustBase, cx);
1544 editor::init(cx);
1545 release_channel::init("0.0.0".parse().unwrap(), cx);
1546 prompt_store::init(cx);
1547 <dyn Fs>::set_global(fs, cx);
1548 ThreadMetadataStore::init_global(cx);
1549 ThreadStore::init_global(cx);
1550 language_model::LanguageModelRegistry::test(cx);
1551 });
1552 cx.run_until_parked();
1553 }
1554
1555 fn setup_panel_with_project(
1556 project: Entity<Project>,
1557 cx: &mut TestAppContext,
1558 ) -> (Entity<crate::AgentPanel>, VisualTestContext) {
1559 let multi_workspace =
1560 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1561 let workspace_entity = multi_workspace
1562 .read_with(cx, |mw, _cx| mw.workspace().clone())
1563 .unwrap();
1564 let mut vcx = VisualTestContext::from_window(multi_workspace.into(), cx);
1565 let panel = workspace_entity.update_in(&mut vcx, |workspace, window, cx| {
1566 cx.new(|cx| crate::AgentPanel::new(workspace, None, window, cx))
1567 });
1568 (panel, vcx)
1569 }
1570
1571 fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) {
1572 let kvp = cx.update(|cx| KeyValueStore::global(cx));
1573 smol::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string()))
1574 .unwrap();
1575 }
1576
1577 fn run_thread_metadata_migrations(cx: &mut TestAppContext) {
1578 clear_thread_metadata_remote_connection_backfill(cx);
1579 cx.update(|cx| {
1580 let migration_task = migrate_thread_metadata(cx);
1581 migrate_thread_remote_connections(cx, migration_task);
1582 });
1583 cx.run_until_parked();
1584 }
1585
1586 #[gpui::test]
1587 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1588 let first_paths = PathList::new(&[Path::new("/project-a")]);
1589 let second_paths = PathList::new(&[Path::new("/project-b")]);
1590 let now = Utc::now();
1591 let older = now - chrono::Duration::seconds(1);
1592
1593 let thread = std::thread::current();
1594 let test_name = thread.name().unwrap_or("unknown_test");
1595 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1596 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1597 &db_name,
1598 )));
1599
1600 db.save(make_metadata(
1601 "session-1",
1602 "First Thread",
1603 now,
1604 first_paths.clone(),
1605 ))
1606 .await
1607 .unwrap();
1608 db.save(make_metadata(
1609 "session-2",
1610 "Second Thread",
1611 older,
1612 second_paths.clone(),
1613 ))
1614 .await
1615 .unwrap();
1616
1617 cx.update(|cx| {
1618 let settings_store = settings::SettingsStore::test(cx);
1619 cx.set_global(settings_store);
1620 ThreadMetadataStore::init_global(cx);
1621 });
1622
1623 cx.run_until_parked();
1624
1625 cx.update(|cx| {
1626 let store = ThreadMetadataStore::global(cx);
1627 let store = store.read(cx);
1628
1629 assert_eq!(store.entry_ids().count(), 2);
1630 assert!(
1631 store
1632 .entry_by_session(&acp::SessionId::new("session-1"))
1633 .is_some()
1634 );
1635 assert!(
1636 store
1637 .entry_by_session(&acp::SessionId::new("session-2"))
1638 .is_some()
1639 );
1640
1641 let first_path_entries: Vec<_> = store
1642 .entries_for_path(&first_paths)
1643 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1644 .collect();
1645 assert_eq!(first_path_entries, vec!["session-1"]);
1646
1647 let second_path_entries: Vec<_> = store
1648 .entries_for_path(&second_paths)
1649 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1650 .collect();
1651 assert_eq!(second_path_entries, vec!["session-2"]);
1652 });
1653 }
1654
1655 #[gpui::test]
1656 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1657 init_test(cx);
1658
1659 let first_paths = PathList::new(&[Path::new("/project-a")]);
1660 let second_paths = PathList::new(&[Path::new("/project-b")]);
1661 let initial_time = Utc::now();
1662 let updated_time = initial_time + chrono::Duration::seconds(1);
1663
1664 let initial_metadata = make_metadata(
1665 "session-1",
1666 "First Thread",
1667 initial_time,
1668 first_paths.clone(),
1669 );
1670 let session1_thread_id = initial_metadata.thread_id;
1671
1672 let second_metadata = make_metadata(
1673 "session-2",
1674 "Second Thread",
1675 initial_time,
1676 second_paths.clone(),
1677 );
1678 let session2_thread_id = second_metadata.thread_id;
1679
1680 cx.update(|cx| {
1681 let store = ThreadMetadataStore::global(cx);
1682 store.update(cx, |store, cx| {
1683 store.save(initial_metadata, cx);
1684 store.save(second_metadata, cx);
1685 });
1686 });
1687
1688 cx.run_until_parked();
1689
1690 cx.update(|cx| {
1691 let store = ThreadMetadataStore::global(cx);
1692 let store = store.read(cx);
1693
1694 let first_path_entries: Vec<_> = store
1695 .entries_for_path(&first_paths)
1696 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1697 .collect();
1698 assert_eq!(first_path_entries, vec!["session-1"]);
1699
1700 let second_path_entries: Vec<_> = store
1701 .entries_for_path(&second_paths)
1702 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1703 .collect();
1704 assert_eq!(second_path_entries, vec!["session-2"]);
1705 });
1706
1707 let moved_metadata = ThreadMetadata {
1708 thread_id: session1_thread_id,
1709 session_id: Some(acp::SessionId::new("session-1")),
1710 agent_id: agent::ZED_AGENT_ID.clone(),
1711 title: Some("First Thread".into()),
1712 updated_at: updated_time,
1713 created_at: Some(updated_time),
1714 worktree_paths: WorktreePaths::from_folder_paths(&second_paths),
1715 remote_connection: None,
1716 archived: false,
1717 };
1718
1719 cx.update(|cx| {
1720 let store = ThreadMetadataStore::global(cx);
1721 store.update(cx, |store, cx| {
1722 store.save(moved_metadata, cx);
1723 });
1724 });
1725
1726 cx.run_until_parked();
1727
1728 cx.update(|cx| {
1729 let store = ThreadMetadataStore::global(cx);
1730 let store = store.read(cx);
1731
1732 assert_eq!(store.entry_ids().count(), 2);
1733 assert!(
1734 store
1735 .entry_by_session(&acp::SessionId::new("session-1"))
1736 .is_some()
1737 );
1738 assert!(
1739 store
1740 .entry_by_session(&acp::SessionId::new("session-2"))
1741 .is_some()
1742 );
1743
1744 let first_path_entries: Vec<_> = store
1745 .entries_for_path(&first_paths)
1746 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1747 .collect();
1748 assert!(first_path_entries.is_empty());
1749
1750 let second_path_entries: Vec<_> = store
1751 .entries_for_path(&second_paths)
1752 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1753 .collect();
1754 assert_eq!(second_path_entries.len(), 2);
1755 assert!(second_path_entries.contains(&"session-1".to_string()));
1756 assert!(second_path_entries.contains(&"session-2".to_string()));
1757 });
1758
1759 cx.update(|cx| {
1760 let store = ThreadMetadataStore::global(cx);
1761 store.update(cx, |store, cx| {
1762 store.delete(session2_thread_id, cx);
1763 });
1764 });
1765
1766 cx.run_until_parked();
1767
1768 cx.update(|cx| {
1769 let store = ThreadMetadataStore::global(cx);
1770 let store = store.read(cx);
1771
1772 assert_eq!(store.entry_ids().count(), 1);
1773
1774 let second_path_entries: Vec<_> = store
1775 .entries_for_path(&second_paths)
1776 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1777 .collect();
1778 assert_eq!(second_path_entries, vec!["session-1"]);
1779 });
1780 }
1781
1782 #[gpui::test]
1783 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1784 init_test(cx);
1785
1786 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1787 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1788 let now = Utc::now();
1789
1790 let existing_metadata = ThreadMetadata {
1791 thread_id: ThreadId::new(),
1792 session_id: Some(acp::SessionId::new("a-session-0")),
1793 agent_id: agent::ZED_AGENT_ID.clone(),
1794 title: Some("Existing Metadata".into()),
1795 updated_at: now - chrono::Duration::seconds(10),
1796 created_at: Some(now - chrono::Duration::seconds(10)),
1797 worktree_paths: WorktreePaths::from_folder_paths(&project_a_paths),
1798 remote_connection: None,
1799 archived: false,
1800 };
1801
1802 cx.update(|cx| {
1803 let store = ThreadMetadataStore::global(cx);
1804 store.update(cx, |store, cx| {
1805 store.save(existing_metadata, cx);
1806 });
1807 });
1808 cx.run_until_parked();
1809
1810 let threads_to_save = vec![
1811 (
1812 "a-session-0",
1813 "Thread A0 From Native Store",
1814 project_a_paths.clone(),
1815 now,
1816 ),
1817 (
1818 "a-session-1",
1819 "Thread A1",
1820 project_a_paths.clone(),
1821 now + chrono::Duration::seconds(1),
1822 ),
1823 (
1824 "b-session-0",
1825 "Thread B0",
1826 project_b_paths.clone(),
1827 now + chrono::Duration::seconds(2),
1828 ),
1829 (
1830 "projectless",
1831 "Projectless",
1832 PathList::default(),
1833 now + chrono::Duration::seconds(3),
1834 ),
1835 ];
1836
1837 for (session_id, title, paths, updated_at) in &threads_to_save {
1838 let save_task = cx.update(|cx| {
1839 let thread_store = ThreadStore::global(cx);
1840 let session_id = session_id.to_string();
1841 let title = title.to_string();
1842 let paths = paths.clone();
1843 thread_store.update(cx, |store, cx| {
1844 store.save_thread(
1845 acp::SessionId::new(session_id),
1846 make_db_thread(&title, *updated_at),
1847 paths,
1848 cx,
1849 )
1850 })
1851 });
1852 save_task.await.unwrap();
1853 cx.run_until_parked();
1854 }
1855
1856 run_thread_metadata_migrations(cx);
1857
1858 let list = cx.update(|cx| {
1859 let store = ThreadMetadataStore::global(cx);
1860 store.read(cx).entries().cloned().collect::<Vec<_>>()
1861 });
1862
1863 assert_eq!(list.len(), 4);
1864 assert!(
1865 list.iter()
1866 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1867 );
1868
1869 let existing_metadata = list
1870 .iter()
1871 .find(|metadata| {
1872 metadata
1873 .session_id
1874 .as_ref()
1875 .is_some_and(|s| s.0.as_ref() == "a-session-0")
1876 })
1877 .unwrap();
1878 assert_eq!(existing_metadata.display_title(), "Existing Metadata");
1879 assert!(!existing_metadata.archived);
1880
1881 let migrated_session_ids: Vec<_> = list
1882 .iter()
1883 .filter_map(|metadata| metadata.session_id.as_ref().map(|s| s.0.to_string()))
1884 .collect();
1885 assert!(migrated_session_ids.iter().any(|s| s == "a-session-1"));
1886 assert!(migrated_session_ids.iter().any(|s| s == "b-session-0"));
1887 assert!(migrated_session_ids.iter().any(|s| s == "projectless"));
1888
1889 let migrated_entries: Vec<_> = list
1890 .iter()
1891 .filter(|metadata| {
1892 !metadata
1893 .session_id
1894 .as_ref()
1895 .is_some_and(|s| s.0.as_ref() == "a-session-0")
1896 })
1897 .collect();
1898 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1899 }
1900
1901 #[gpui::test]
1902 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1903 cx: &mut TestAppContext,
1904 ) {
1905 init_test(cx);
1906
1907 let project_paths = PathList::new(&[Path::new("/project-a")]);
1908 let existing_updated_at = Utc::now();
1909
1910 let existing_metadata = ThreadMetadata {
1911 thread_id: ThreadId::new(),
1912 session_id: Some(acp::SessionId::new("existing-session")),
1913 agent_id: agent::ZED_AGENT_ID.clone(),
1914 title: Some("Existing Metadata".into()),
1915 updated_at: existing_updated_at,
1916 created_at: Some(existing_updated_at),
1917 worktree_paths: WorktreePaths::from_folder_paths(&project_paths),
1918 remote_connection: None,
1919 archived: false,
1920 };
1921
1922 cx.update(|cx| {
1923 let store = ThreadMetadataStore::global(cx);
1924 store.update(cx, |store, cx| {
1925 store.save(existing_metadata, cx);
1926 });
1927 });
1928 cx.run_until_parked();
1929
1930 let save_task = cx.update(|cx| {
1931 let thread_store = ThreadStore::global(cx);
1932 thread_store.update(cx, |store, cx| {
1933 store.save_thread(
1934 acp::SessionId::new("existing-session"),
1935 make_db_thread(
1936 "Updated Native Thread Title",
1937 existing_updated_at + chrono::Duration::seconds(1),
1938 ),
1939 project_paths.clone(),
1940 cx,
1941 )
1942 })
1943 });
1944 save_task.await.unwrap();
1945 cx.run_until_parked();
1946
1947 run_thread_metadata_migrations(cx);
1948
1949 let list = cx.update(|cx| {
1950 let store = ThreadMetadataStore::global(cx);
1951 store.read(cx).entries().cloned().collect::<Vec<_>>()
1952 });
1953
1954 assert_eq!(list.len(), 1);
1955 assert_eq!(
1956 list[0].session_id.as_ref().unwrap().0.as_ref(),
1957 "existing-session"
1958 );
1959 }
1960
1961 #[gpui::test]
1962 async fn test_migrate_thread_remote_connections_backfills_from_workspace_db(
1963 cx: &mut TestAppContext,
1964 ) {
1965 init_test(cx);
1966
1967 let folder_paths = PathList::new(&[Path::new("/remote-project")]);
1968 let updated_at = Utc::now();
1969 let metadata = make_metadata(
1970 "remote-session",
1971 "Remote Thread",
1972 updated_at,
1973 folder_paths.clone(),
1974 );
1975
1976 cx.update(|cx| {
1977 let store = ThreadMetadataStore::global(cx);
1978 store.update(cx, |store, cx| {
1979 store.save(metadata, cx);
1980 });
1981 });
1982 cx.run_until_parked();
1983
1984 let workspace_db = cx.update(|cx| WorkspaceDb::global(cx));
1985 let workspace_id = workspace_db.next_id().await.unwrap();
1986 let serialized_paths = folder_paths.serialize();
1987 let remote_connection_id = 1_i64;
1988 workspace_db
1989 .write(move |conn| {
1990 let mut stmt = Statement::prepare(
1991 conn,
1992 "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)",
1993 )?;
1994 let mut next_index = stmt.bind(&remote_connection_id, 1)?;
1995 next_index = stmt.bind(&"wsl", next_index)?;
1996 next_index = stmt.bind(&Some("anth".to_string()), next_index)?;
1997 stmt.bind(&Some("Ubuntu".to_string()), next_index)?;
1998 stmt.exec()?;
1999
2000 let mut stmt = Statement::prepare(
2001 conn,
2002 "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1",
2003 )?;
2004 let mut next_index = stmt.bind(&workspace_id, 1)?;
2005 next_index = stmt.bind(&serialized_paths.paths, next_index)?;
2006 next_index = stmt.bind(&serialized_paths.order, next_index)?;
2007 stmt.bind(&Some(remote_connection_id as i32), next_index)?;
2008 stmt.exec()
2009 })
2010 .await
2011 .unwrap();
2012
2013 clear_thread_metadata_remote_connection_backfill(cx);
2014 cx.update(|cx| {
2015 migrate_thread_remote_connections(cx, Task::ready(Ok(())));
2016 });
2017 cx.run_until_parked();
2018
2019 let metadata = cx.update(|cx| {
2020 let store = ThreadMetadataStore::global(cx);
2021 store
2022 .read(cx)
2023 .entry_by_session(&acp::SessionId::new("remote-session"))
2024 .cloned()
2025 .expect("expected migrated metadata row")
2026 });
2027
2028 assert_eq!(
2029 metadata.remote_connection,
2030 Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
2031 distro_name: "Ubuntu".to_string(),
2032 user: Some("anth".to_string()),
2033 }))
2034 );
2035 }
2036
2037 #[gpui::test]
2038 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
2039 cx: &mut TestAppContext,
2040 ) {
2041 init_test(cx);
2042
2043 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
2044 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
2045 let now = Utc::now();
2046
2047 // Create 7 threads for project A and 3 for project B
2048 let mut threads_to_save = Vec::new();
2049 for i in 0..7 {
2050 threads_to_save.push((
2051 format!("a-session-{i}"),
2052 format!("Thread A{i}"),
2053 project_a_paths.clone(),
2054 now + chrono::Duration::seconds(i as i64),
2055 ));
2056 }
2057 for i in 0..3 {
2058 threads_to_save.push((
2059 format!("b-session-{i}"),
2060 format!("Thread B{i}"),
2061 project_b_paths.clone(),
2062 now + chrono::Duration::seconds(i as i64),
2063 ));
2064 }
2065
2066 for (session_id, title, paths, updated_at) in &threads_to_save {
2067 let save_task = cx.update(|cx| {
2068 let thread_store = ThreadStore::global(cx);
2069 let session_id = session_id.to_string();
2070 let title = title.to_string();
2071 let paths = paths.clone();
2072 thread_store.update(cx, |store, cx| {
2073 store.save_thread(
2074 acp::SessionId::new(session_id),
2075 make_db_thread(&title, *updated_at),
2076 paths,
2077 cx,
2078 )
2079 })
2080 });
2081 save_task.await.unwrap();
2082 cx.run_until_parked();
2083 }
2084
2085 run_thread_metadata_migrations(cx);
2086
2087 let list = cx.update(|cx| {
2088 let store = ThreadMetadataStore::global(cx);
2089 store.read(cx).entries().cloned().collect::<Vec<_>>()
2090 });
2091
2092 assert_eq!(list.len(), 10);
2093
2094 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
2095 let mut project_a_entries: Vec<_> = list
2096 .iter()
2097 .filter(|m| *m.folder_paths() == project_a_paths)
2098 .collect();
2099 assert_eq!(project_a_entries.len(), 7);
2100 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
2101
2102 for entry in &project_a_entries[..5] {
2103 assert!(
2104 !entry.archived,
2105 "Expected {:?} to be unarchived (top 5 most recent)",
2106 entry.session_id
2107 );
2108 }
2109 for entry in &project_a_entries[5..] {
2110 assert!(
2111 entry.archived,
2112 "Expected {:?} to be archived (older than top 5)",
2113 entry.session_id
2114 );
2115 }
2116
2117 // Project B: all 3 should be unarchived (under the limit)
2118 let project_b_entries: Vec<_> = list
2119 .iter()
2120 .filter(|m| *m.folder_paths() == project_b_paths)
2121 .collect();
2122 assert_eq!(project_b_entries.len(), 3);
2123 assert!(project_b_entries.iter().all(|m| !m.archived));
2124 }
2125
2126 #[gpui::test]
2127 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
2128 init_test(cx);
2129
2130 let fs = FakeFs::new(cx.executor());
2131 let project = Project::test(fs, None::<&Path>, cx).await;
2132 let connection = StubAgentConnection::new();
2133
2134 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2135 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2136
2137 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2138 let session_id = thread.read_with(&vcx, |t, _| t.session_id().clone());
2139 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
2140
2141 // Initial metadata was created by the panel with session_id: None.
2142 cx.read(|cx| {
2143 let store = ThreadMetadataStore::global(cx).read(cx);
2144 assert_eq!(store.entry_ids().count(), 1);
2145 assert!(
2146 store.entry(thread_id).unwrap().session_id.is_none(),
2147 "expected initial panel metadata to have no session_id"
2148 );
2149 });
2150
2151 // Setting a title on an empty thread should be ignored by the
2152 // event handler (entries are empty), leaving session_id as None.
2153 thread.update_in(&mut vcx, |thread, _window, cx| {
2154 thread.set_title("Draft Thread".into(), cx).detach();
2155 });
2156 vcx.run_until_parked();
2157
2158 cx.read(|cx| {
2159 let store = ThreadMetadataStore::global(cx).read(cx);
2160 assert!(
2161 store.entry(thread_id).unwrap().session_id.is_none(),
2162 "expected title updates on empty thread to be ignored by event handler"
2163 );
2164 });
2165
2166 // Pushing content makes entries non-empty, so the event handler
2167 // should now update metadata with the real session_id.
2168 thread.update_in(&mut vcx, |thread, _window, cx| {
2169 thread.push_user_content_block(None, "Hello".into(), cx);
2170 });
2171 vcx.run_until_parked();
2172
2173 cx.read(|cx| {
2174 let store = ThreadMetadataStore::global(cx).read(cx);
2175 assert_eq!(store.entry_ids().count(), 1);
2176 assert_eq!(
2177 store.entry(thread_id).unwrap().session_id.as_ref(),
2178 Some(&session_id),
2179 );
2180 });
2181 }
2182
2183 #[gpui::test]
2184 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
2185 init_test(cx);
2186
2187 let fs = FakeFs::new(cx.executor());
2188 let project = Project::test(fs, None::<&Path>, cx).await;
2189 let connection = StubAgentConnection::new();
2190
2191 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2192 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2193
2194 let session_id = crate::test_support::active_session_id(&panel, &vcx);
2195 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2196
2197 thread.update_in(&mut vcx, |thread, _window, cx| {
2198 thread.push_user_content_block(None, "Hello".into(), cx);
2199 });
2200 vcx.run_until_parked();
2201
2202 cx.read(|cx| {
2203 let store = ThreadMetadataStore::global(cx).read(cx);
2204 assert_eq!(store.entry_ids().count(), 1);
2205 assert!(store.entry_by_session(&session_id).is_some());
2206 });
2207
2208 // Dropping the panel releases the ConversationView and its thread.
2209 drop(panel);
2210 cx.update(|_| {});
2211 cx.run_until_parked();
2212
2213 cx.read(|cx| {
2214 let store = ThreadMetadataStore::global(cx).read(cx);
2215 assert_eq!(store.entry_ids().count(), 1);
2216 assert!(store.entry_by_session(&session_id).is_some());
2217 });
2218 }
2219
2220 #[gpui::test]
2221 async fn test_threads_without_project_association_are_archived_by_default(
2222 cx: &mut TestAppContext,
2223 ) {
2224 init_test(cx);
2225
2226 let fs = FakeFs::new(cx.executor());
2227 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
2228 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
2229
2230 // Thread in project without worktree
2231 let (panel_no_wt, mut vcx_no_wt) = setup_panel_with_project(project_without_worktree, cx);
2232 crate::test_support::open_thread_with_connection(
2233 &panel_no_wt,
2234 StubAgentConnection::new(),
2235 &mut vcx_no_wt,
2236 );
2237 let thread_no_wt = panel_no_wt.read_with(&vcx_no_wt, |panel, cx| {
2238 panel.active_agent_thread(cx).unwrap()
2239 });
2240 thread_no_wt.update_in(&mut vcx_no_wt, |thread, _window, cx| {
2241 thread.push_user_content_block(None, "content".into(), cx);
2242 thread.set_title("No Project Thread".into(), cx).detach();
2243 });
2244 vcx_no_wt.run_until_parked();
2245 let session_without_worktree =
2246 crate::test_support::active_session_id(&panel_no_wt, &vcx_no_wt);
2247
2248 // Thread in project with worktree
2249 let (panel_wt, mut vcx_wt) = setup_panel_with_project(project_with_worktree, cx);
2250 crate::test_support::open_thread_with_connection(
2251 &panel_wt,
2252 StubAgentConnection::new(),
2253 &mut vcx_wt,
2254 );
2255 let thread_wt =
2256 panel_wt.read_with(&vcx_wt, |panel, cx| panel.active_agent_thread(cx).unwrap());
2257 thread_wt.update_in(&mut vcx_wt, |thread, _window, cx| {
2258 thread.push_user_content_block(None, "content".into(), cx);
2259 thread.set_title("Project Thread".into(), cx).detach();
2260 });
2261 vcx_wt.run_until_parked();
2262 let session_with_worktree = crate::test_support::active_session_id(&panel_wt, &vcx_wt);
2263
2264 cx.update(|cx| {
2265 let store = ThreadMetadataStore::global(cx);
2266 let store = store.read(cx);
2267
2268 let without_worktree = store
2269 .entry_by_session(&session_without_worktree)
2270 .expect("missing metadata for thread without project association");
2271 assert!(without_worktree.folder_paths().is_empty());
2272 assert!(
2273 without_worktree.archived,
2274 "expected thread without project association to be archived"
2275 );
2276
2277 let with_worktree = store
2278 .entry_by_session(&session_with_worktree)
2279 .expect("missing metadata for thread with project association");
2280 assert_eq!(
2281 *with_worktree.folder_paths(),
2282 PathList::new(&[Path::new("/project-a")])
2283 );
2284 assert!(
2285 !with_worktree.archived,
2286 "expected thread with project association to remain unarchived"
2287 );
2288 });
2289 }
2290
2291 #[gpui::test]
2292 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
2293 init_test(cx);
2294
2295 let fs = FakeFs::new(cx.executor());
2296 let project = Project::test(fs, None::<&Path>, cx).await;
2297 let connection = Rc::new(StubAgentConnection::new());
2298
2299 // Create a regular (non-subagent) thread through the panel.
2300 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
2301 crate::test_support::open_thread_with_connection(&panel, (*connection).clone(), &mut vcx);
2302
2303 let regular_thread =
2304 panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2305 let regular_session_id = regular_thread.read_with(&vcx, |t, _| t.session_id().clone());
2306
2307 regular_thread.update_in(&mut vcx, |thread, _window, cx| {
2308 thread.push_user_content_block(None, "content".into(), cx);
2309 thread.set_title("Regular Thread".into(), cx).detach();
2310 });
2311 vcx.run_until_parked();
2312
2313 // Create a standalone subagent AcpThread (not wrapped in a
2314 // ConversationView). The ThreadMetadataStore only observes
2315 // ConversationView events, so this thread's events should
2316 // have no effect on sidebar metadata.
2317 let subagent_session_id = acp::SessionId::new("subagent-session");
2318 let subagent_thread = cx.update(|cx| {
2319 let action_log = cx.new(|_| ActionLog::new(project.clone()));
2320 cx.new(|cx| {
2321 acp_thread::AcpThread::new(
2322 Some(regular_session_id.clone()),
2323 Some("Subagent Thread".into()),
2324 None,
2325 connection.clone(),
2326 project.clone(),
2327 action_log,
2328 subagent_session_id.clone(),
2329 watch::Receiver::constant(acp::PromptCapabilities::new()),
2330 cx,
2331 )
2332 })
2333 });
2334
2335 cx.update(|cx| {
2336 subagent_thread.update(cx, |thread, cx| {
2337 thread
2338 .set_title("Subagent Thread Title".into(), cx)
2339 .detach();
2340 });
2341 });
2342 cx.run_until_parked();
2343
2344 // Only the regular thread should appear in sidebar metadata.
2345 // The subagent thread is excluded because the metadata store
2346 // only observes ConversationView events.
2347 let list = cx.update(|cx| {
2348 let store = ThreadMetadataStore::global(cx);
2349 store.read(cx).entries().cloned().collect::<Vec<_>>()
2350 });
2351
2352 assert_eq!(
2353 list.len(),
2354 1,
2355 "Expected only the regular thread in sidebar metadata, \
2356 but found {} entries (subagent threads are leaking into the sidebar)",
2357 list.len(),
2358 );
2359 assert_eq!(list[0].session_id.as_ref().unwrap(), ®ular_session_id);
2360 assert_eq!(list[0].display_title(), "Regular Thread");
2361 }
2362
2363 #[test]
2364 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
2365 let now = Utc::now();
2366
2367 let meta = make_metadata("session-1", "First Thread", now, PathList::default());
2368 let thread_id = meta.thread_id;
2369 let operations = vec![DbOperation::Upsert(meta), DbOperation::Delete(thread_id)];
2370
2371 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
2372
2373 assert_eq!(deduped.len(), 1);
2374 assert_eq!(deduped[0], DbOperation::Delete(thread_id));
2375 }
2376
2377 #[test]
2378 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
2379 let now = Utc::now();
2380 let later = now + chrono::Duration::seconds(1);
2381
2382 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
2383 let shared_thread_id = old_metadata.thread_id;
2384 let new_metadata = ThreadMetadata {
2385 thread_id: shared_thread_id,
2386 ..make_metadata("session-1", "New Title", later, PathList::default())
2387 };
2388
2389 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2390 DbOperation::Upsert(old_metadata),
2391 DbOperation::Upsert(new_metadata.clone()),
2392 ]);
2393
2394 assert_eq!(deduped.len(), 1);
2395 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
2396 }
2397
2398 #[test]
2399 fn test_dedup_db_operations_preserves_distinct_sessions() {
2400 let now = Utc::now();
2401
2402 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
2403 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
2404 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2405 DbOperation::Upsert(metadata1.clone()),
2406 DbOperation::Upsert(metadata2.clone()),
2407 ]);
2408
2409 assert_eq!(deduped.len(), 2);
2410 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
2411 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
2412 }
2413
2414 #[gpui::test]
2415 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
2416 init_test(cx);
2417
2418 let paths = PathList::new(&[Path::new("/project-a")]);
2419 let now = Utc::now();
2420 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2421 let thread_id = metadata.thread_id;
2422
2423 cx.update(|cx| {
2424 let store = ThreadMetadataStore::global(cx);
2425 store.update(cx, |store, cx| {
2426 store.save(metadata, cx);
2427 });
2428 });
2429
2430 cx.run_until_parked();
2431
2432 cx.update(|cx| {
2433 let store = ThreadMetadataStore::global(cx);
2434 let store = store.read(cx);
2435
2436 let path_entries: Vec<_> = store
2437 .entries_for_path(&paths)
2438 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2439 .collect();
2440 assert_eq!(path_entries, vec!["session-1"]);
2441
2442 assert_eq!(store.archived_entries().count(), 0);
2443 });
2444
2445 cx.update(|cx| {
2446 let store = ThreadMetadataStore::global(cx);
2447 store.update(cx, |store, cx| {
2448 store.archive(thread_id, None, cx);
2449 });
2450 });
2451
2452 // Thread 1 should now be archived
2453 cx.run_until_parked();
2454
2455 cx.update(|cx| {
2456 let store = ThreadMetadataStore::global(cx);
2457 let store = store.read(cx);
2458
2459 let path_entries: Vec<_> = store
2460 .entries_for_path(&paths)
2461 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2462 .collect();
2463 assert!(path_entries.is_empty());
2464
2465 let archived: Vec<_> = store.archived_entries().collect();
2466 assert_eq!(archived.len(), 1);
2467 assert_eq!(
2468 archived[0].session_id.as_ref().unwrap().0.as_ref(),
2469 "session-1"
2470 );
2471 assert!(archived[0].archived);
2472 });
2473
2474 cx.update(|cx| {
2475 let store = ThreadMetadataStore::global(cx);
2476 store.update(cx, |store, cx| {
2477 store.unarchive(thread_id, cx);
2478 });
2479 });
2480
2481 cx.run_until_parked();
2482
2483 cx.update(|cx| {
2484 let store = ThreadMetadataStore::global(cx);
2485 let store = store.read(cx);
2486
2487 let path_entries: Vec<_> = store
2488 .entries_for_path(&paths)
2489 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2490 .collect();
2491 assert_eq!(path_entries, vec!["session-1"]);
2492
2493 assert_eq!(store.archived_entries().count(), 0);
2494 });
2495 }
2496
2497 #[gpui::test]
2498 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2499 init_test(cx);
2500
2501 let paths = PathList::new(&[Path::new("/project-a")]);
2502 let now = Utc::now();
2503
2504 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2505 let metadata2 = make_metadata(
2506 "session-2",
2507 "Archived Thread",
2508 now - chrono::Duration::seconds(1),
2509 paths.clone(),
2510 );
2511 let session2_thread_id = metadata2.thread_id;
2512
2513 cx.update(|cx| {
2514 let store = ThreadMetadataStore::global(cx);
2515 store.update(cx, |store, cx| {
2516 store.save(metadata1, cx);
2517 store.save(metadata2, cx);
2518 });
2519 });
2520
2521 cx.run_until_parked();
2522
2523 cx.update(|cx| {
2524 let store = ThreadMetadataStore::global(cx);
2525 store.update(cx, |store, cx| {
2526 store.archive(session2_thread_id, None, cx);
2527 });
2528 });
2529
2530 cx.run_until_parked();
2531
2532 cx.update(|cx| {
2533 let store = ThreadMetadataStore::global(cx);
2534 let store = store.read(cx);
2535
2536 let path_entries: Vec<_> = store
2537 .entries_for_path(&paths)
2538 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2539 .collect();
2540 assert_eq!(path_entries, vec!["session-1"]);
2541
2542 assert_eq!(store.entries().count(), 2);
2543
2544 let archived: Vec<_> = store
2545 .archived_entries()
2546 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2547 .collect();
2548 assert_eq!(archived, vec!["session-2"]);
2549 });
2550 }
2551
2552 #[gpui::test]
2553 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2554 init_test(cx);
2555
2556 let paths = PathList::new(&[Path::new("/project-a")]);
2557 let now = Utc::now();
2558
2559 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2560 let m2 = make_metadata(
2561 "session-2",
2562 "Thread Two",
2563 now - chrono::Duration::seconds(1),
2564 paths.clone(),
2565 );
2566 let m3 = make_metadata(
2567 "session-3",
2568 "Thread Three",
2569 now - chrono::Duration::seconds(2),
2570 paths,
2571 );
2572
2573 cx.update(|cx| {
2574 let store = ThreadMetadataStore::global(cx);
2575 store.update(cx, |store, cx| {
2576 store.save_all(vec![m1, m2, m3], cx);
2577 });
2578 });
2579
2580 cx.run_until_parked();
2581
2582 cx.update(|cx| {
2583 let store = ThreadMetadataStore::global(cx);
2584 let store = store.read(cx);
2585
2586 assert_eq!(store.entries().count(), 3);
2587 assert!(
2588 store
2589 .entry_by_session(&acp::SessionId::new("session-1"))
2590 .is_some()
2591 );
2592 assert!(
2593 store
2594 .entry_by_session(&acp::SessionId::new("session-2"))
2595 .is_some()
2596 );
2597 assert!(
2598 store
2599 .entry_by_session(&acp::SessionId::new("session-3"))
2600 .is_some()
2601 );
2602
2603 assert_eq!(store.entry_ids().count(), 3);
2604 });
2605 }
2606
2607 #[gpui::test]
2608 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2609 init_test(cx);
2610
2611 let paths = PathList::new(&[Path::new("/project-a")]);
2612 let now = Utc::now();
2613 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2614 let thread_id = metadata.thread_id;
2615
2616 cx.update(|cx| {
2617 let store = ThreadMetadataStore::global(cx);
2618 store.update(cx, |store, cx| {
2619 store.save(metadata, cx);
2620 });
2621 });
2622
2623 cx.run_until_parked();
2624
2625 cx.update(|cx| {
2626 let store = ThreadMetadataStore::global(cx);
2627 store.update(cx, |store, cx| {
2628 store.archive(thread_id, None, cx);
2629 });
2630 });
2631
2632 cx.run_until_parked();
2633
2634 cx.update(|cx| {
2635 let store = ThreadMetadataStore::global(cx);
2636 store.update(cx, |store, cx| {
2637 let _ = store.reload(cx);
2638 });
2639 });
2640
2641 cx.run_until_parked();
2642
2643 cx.update(|cx| {
2644 let store = ThreadMetadataStore::global(cx);
2645 let store = store.read(cx);
2646
2647 let thread = store
2648 .entry_by_session(&acp::SessionId::new("session-1"))
2649 .expect("thread should exist after reload");
2650 assert!(thread.archived);
2651
2652 let path_entries: Vec<_> = store
2653 .entries_for_path(&paths)
2654 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2655 .collect();
2656 assert!(path_entries.is_empty());
2657
2658 let archived: Vec<_> = store
2659 .archived_entries()
2660 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2661 .collect();
2662 assert_eq!(archived, vec!["session-1"]);
2663 });
2664 }
2665
2666 #[gpui::test]
2667 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2668 init_test(cx);
2669
2670 cx.run_until_parked();
2671
2672 cx.update(|cx| {
2673 let store = ThreadMetadataStore::global(cx);
2674 store.update(cx, |store, cx| {
2675 store.archive(ThreadId::new(), None, cx);
2676 });
2677 });
2678
2679 cx.run_until_parked();
2680
2681 cx.update(|cx| {
2682 let store = ThreadMetadataStore::global(cx);
2683 let store = store.read(cx);
2684
2685 assert!(store.is_empty());
2686 assert_eq!(store.entries().count(), 0);
2687 assert_eq!(store.archived_entries().count(), 0);
2688 });
2689 }
2690
2691 #[gpui::test]
2692 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2693 init_test(cx);
2694
2695 let paths = PathList::new(&[Path::new("/project-a")]);
2696 let now = Utc::now();
2697 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2698 let thread_id = metadata.thread_id;
2699
2700 cx.update(|cx| {
2701 let store = ThreadMetadataStore::global(cx);
2702 store.update(cx, |store, cx| {
2703 store.save(metadata.clone(), cx);
2704 store.archive(thread_id, None, cx);
2705 });
2706 });
2707
2708 cx.run_until_parked();
2709
2710 cx.update(|cx| {
2711 let store = ThreadMetadataStore::global(cx);
2712 let store = store.read(cx);
2713
2714 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2715 pretty_assertions::assert_eq!(
2716 entries,
2717 vec![ThreadMetadata {
2718 archived: true,
2719 ..metadata
2720 }]
2721 );
2722 });
2723 }
2724
2725 #[gpui::test]
2726 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2727 init_test(cx);
2728 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2729
2730 let id = store
2731 .read_with(cx, |store, cx| {
2732 store.create_archived_worktree(
2733 "/tmp/worktree".to_string(),
2734 "/home/user/repo".to_string(),
2735 Some("feature-branch".to_string()),
2736 "staged_aaa".to_string(),
2737 "unstaged_bbb".to_string(),
2738 "original_000".to_string(),
2739 cx,
2740 )
2741 })
2742 .await
2743 .unwrap();
2744
2745 let thread_id_1 = ThreadId::new();
2746
2747 store
2748 .read_with(cx, |store, cx| {
2749 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
2750 })
2751 .await
2752 .unwrap();
2753
2754 let worktrees = store
2755 .read_with(cx, |store, cx| {
2756 store.get_archived_worktrees_for_thread(thread_id_1, cx)
2757 })
2758 .await
2759 .unwrap();
2760
2761 assert_eq!(worktrees.len(), 1);
2762 let wt = &worktrees[0];
2763 assert_eq!(wt.id, id);
2764 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2765 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2766 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2767 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2768 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2769 assert_eq!(wt.original_commit_hash, "original_000");
2770 }
2771
2772 #[gpui::test]
2773 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2774 init_test(cx);
2775 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2776
2777 let id = store
2778 .read_with(cx, |store, cx| {
2779 store.create_archived_worktree(
2780 "/tmp/worktree".to_string(),
2781 "/home/user/repo".to_string(),
2782 Some("main".to_string()),
2783 "deadbeef".to_string(),
2784 "deadbeef".to_string(),
2785 "original_000".to_string(),
2786 cx,
2787 )
2788 })
2789 .await
2790 .unwrap();
2791
2792 let thread_id_1 = ThreadId::new();
2793
2794 store
2795 .read_with(cx, |store, cx| {
2796 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
2797 })
2798 .await
2799 .unwrap();
2800
2801 store
2802 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2803 .await
2804 .unwrap();
2805
2806 let worktrees = store
2807 .read_with(cx, |store, cx| {
2808 store.get_archived_worktrees_for_thread(thread_id_1, cx)
2809 })
2810 .await
2811 .unwrap();
2812 assert!(worktrees.is_empty());
2813 }
2814
2815 #[gpui::test]
2816 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2817 init_test(cx);
2818 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2819
2820 let id = store
2821 .read_with(cx, |store, cx| {
2822 store.create_archived_worktree(
2823 "/tmp/worktree".to_string(),
2824 "/home/user/repo".to_string(),
2825 None,
2826 "abc123".to_string(),
2827 "abc123".to_string(),
2828 "original_000".to_string(),
2829 cx,
2830 )
2831 })
2832 .await
2833 .unwrap();
2834
2835 let thread_id_1 = ThreadId::new();
2836 let thread_id_2 = ThreadId::new();
2837
2838 store
2839 .read_with(cx, |store, cx| {
2840 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
2841 })
2842 .await
2843 .unwrap();
2844
2845 store
2846 .read_with(cx, |store, cx| {
2847 store.link_thread_to_archived_worktree(thread_id_2, id, cx)
2848 })
2849 .await
2850 .unwrap();
2851
2852 let wt1 = store
2853 .read_with(cx, |store, cx| {
2854 store.get_archived_worktrees_for_thread(thread_id_1, cx)
2855 })
2856 .await
2857 .unwrap();
2858
2859 let wt2 = store
2860 .read_with(cx, |store, cx| {
2861 store.get_archived_worktrees_for_thread(thread_id_2, cx)
2862 })
2863 .await
2864 .unwrap();
2865
2866 assert_eq!(wt1.len(), 1);
2867 assert_eq!(wt2.len(), 1);
2868 assert_eq!(wt1[0].id, wt2[0].id);
2869 }
2870
2871 #[gpui::test]
2872 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
2873 init_test(cx);
2874 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2875
2876 let original_paths = PathList::new(&[
2877 Path::new("/projects/worktree-a"),
2878 Path::new("/projects/worktree-b"),
2879 Path::new("/other/unrelated"),
2880 ]);
2881 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2882 let thread_id = meta.thread_id;
2883
2884 store.update(cx, |store, cx| {
2885 store.save(meta, cx);
2886 });
2887
2888 let replacements = vec![
2889 (
2890 PathBuf::from("/projects/worktree-a"),
2891 PathBuf::from("/restored/worktree-a"),
2892 ),
2893 (
2894 PathBuf::from("/projects/worktree-b"),
2895 PathBuf::from("/restored/worktree-b"),
2896 ),
2897 ];
2898
2899 store.update(cx, |store, cx| {
2900 store.complete_worktree_restore(thread_id, &replacements, cx);
2901 });
2902
2903 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
2904 let entry = entry.unwrap();
2905 let paths = entry.folder_paths().paths();
2906 assert_eq!(paths.len(), 3);
2907 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2908 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2909 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2910 }
2911
2912 #[gpui::test]
2913 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
2914 init_test(cx);
2915 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2916
2917 let original_paths =
2918 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2919 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2920 let thread_id = meta.thread_id;
2921
2922 store.update(cx, |store, cx| {
2923 store.save(meta, cx);
2924 });
2925
2926 let replacements = vec![
2927 (
2928 PathBuf::from("/projects/worktree-a"),
2929 PathBuf::from("/new/worktree-a"),
2930 ),
2931 (
2932 PathBuf::from("/nonexistent/path"),
2933 PathBuf::from("/should/not/appear"),
2934 ),
2935 ];
2936
2937 store.update(cx, |store, cx| {
2938 store.complete_worktree_restore(thread_id, &replacements, cx);
2939 });
2940
2941 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
2942 let entry = entry.unwrap();
2943 let paths = entry.folder_paths().paths();
2944 assert_eq!(paths.len(), 2);
2945 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2946 assert!(paths.contains(&PathBuf::from("/other/path")));
2947 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2948 }
2949
2950 #[gpui::test]
2951 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
2952 init_test(cx);
2953 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2954
2955 let original_paths = PathList::new(&[
2956 Path::new("/projects/worktree-a"),
2957 Path::new("/projects/worktree-b"),
2958 Path::new("/other/unrelated"),
2959 ]);
2960 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2961 let thread_id = meta.thread_id;
2962
2963 store.update(cx, |store, cx| {
2964 store.save(meta, cx);
2965 });
2966
2967 let replacements = vec![
2968 (
2969 PathBuf::from("/projects/worktree-a"),
2970 PathBuf::from("/restored/worktree-a"),
2971 ),
2972 (
2973 PathBuf::from("/projects/worktree-b"),
2974 PathBuf::from("/restored/worktree-b"),
2975 ),
2976 ];
2977
2978 store.update(cx, |store, cx| {
2979 store.update_restored_worktree_paths(thread_id, &replacements, cx);
2980 });
2981
2982 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
2983 let entry = entry.unwrap();
2984 let paths = entry.folder_paths().paths();
2985 assert_eq!(paths.len(), 3);
2986 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2987 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2988 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2989 }
2990
2991 #[gpui::test]
2992 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
2993 init_test(cx);
2994 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2995
2996 let original_paths =
2997 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2998 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2999 let thread_id = meta.thread_id;
3000
3001 store.update(cx, |store, cx| {
3002 store.save(meta, cx);
3003 });
3004
3005 let replacements = vec![
3006 (
3007 PathBuf::from("/projects/worktree-a"),
3008 PathBuf::from("/new/worktree-a"),
3009 ),
3010 (
3011 PathBuf::from("/nonexistent/path"),
3012 PathBuf::from("/should/not/appear"),
3013 ),
3014 ];
3015
3016 store.update(cx, |store, cx| {
3017 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3018 });
3019
3020 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3021 let entry = entry.unwrap();
3022 let paths = entry.folder_paths().paths();
3023 assert_eq!(paths.len(), 2);
3024 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3025 assert!(paths.contains(&PathBuf::from("/other/path")));
3026 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3027 }
3028
3029 #[gpui::test]
3030 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
3031 init_test(cx);
3032 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3033
3034 let id1 = store
3035 .read_with(cx, |store, cx| {
3036 store.create_archived_worktree(
3037 "/projects/worktree-a".to_string(),
3038 "/home/user/repo".to_string(),
3039 Some("branch-a".to_string()),
3040 "staged_a".to_string(),
3041 "unstaged_a".to_string(),
3042 "original_000".to_string(),
3043 cx,
3044 )
3045 })
3046 .await
3047 .unwrap();
3048
3049 let id2 = store
3050 .read_with(cx, |store, cx| {
3051 store.create_archived_worktree(
3052 "/projects/worktree-b".to_string(),
3053 "/home/user/repo".to_string(),
3054 Some("branch-b".to_string()),
3055 "staged_b".to_string(),
3056 "unstaged_b".to_string(),
3057 "original_000".to_string(),
3058 cx,
3059 )
3060 })
3061 .await
3062 .unwrap();
3063
3064 let thread_id_1 = ThreadId::new();
3065
3066 store
3067 .read_with(cx, |store, cx| {
3068 store.link_thread_to_archived_worktree(thread_id_1, id1, cx)
3069 })
3070 .await
3071 .unwrap();
3072
3073 store
3074 .read_with(cx, |store, cx| {
3075 store.link_thread_to_archived_worktree(thread_id_1, id2, cx)
3076 })
3077 .await
3078 .unwrap();
3079
3080 let worktrees = store
3081 .read_with(cx, |store, cx| {
3082 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3083 })
3084 .await
3085 .unwrap();
3086
3087 assert_eq!(worktrees.len(), 2);
3088
3089 let paths: Vec<&Path> = worktrees
3090 .iter()
3091 .map(|w| w.worktree_path.as_path())
3092 .collect();
3093 assert!(paths.contains(&Path::new("/projects/worktree-a")));
3094 assert!(paths.contains(&Path::new("/projects/worktree-b")));
3095 }
3096
3097 // ── Migration tests ────────────────────────────────────────────────
3098
3099 #[test]
3100 fn test_thread_id_primary_key_migration_backfills_null_thread_ids() {
3101 use db::sqlez::connection::Connection;
3102
3103 let connection =
3104 Connection::open_memory(Some("test_thread_id_pk_migration_backfills_nulls"));
3105
3106 // Run migrations 0-6 (the old schema, before the thread_id PK migration).
3107 let old_migrations: &[&str] = &ThreadMetadataDb::MIGRATIONS[..7];
3108 connection
3109 .migrate(ThreadMetadataDb::NAME, old_migrations, &mut |_, _, _| false)
3110 .expect("old migrations should succeed");
3111
3112 // Insert rows: one with a thread_id, two without.
3113 connection
3114 .exec(
3115 "INSERT INTO sidebar_threads \
3116 (session_id, title, updated_at, thread_id) \
3117 VALUES ('has-tid', 'Has ThreadId', '2025-01-01T00:00:00Z', X'0102030405060708090A0B0C0D0E0F10')",
3118 )
3119 .unwrap()()
3120 .unwrap();
3121 connection
3122 .exec(
3123 "INSERT INTO sidebar_threads \
3124 (session_id, title, updated_at) \
3125 VALUES ('no-tid-1', 'No ThreadId 1', '2025-01-02T00:00:00Z')",
3126 )
3127 .unwrap()()
3128 .unwrap();
3129 connection
3130 .exec(
3131 "INSERT INTO sidebar_threads \
3132 (session_id, title, updated_at) \
3133 VALUES ('no-tid-2', 'No ThreadId 2', '2025-01-03T00:00:00Z')",
3134 )
3135 .unwrap()()
3136 .unwrap();
3137
3138 // Set up archived_git_worktrees + thread_archived_worktrees rows
3139 // referencing the session without a thread_id.
3140 connection
3141 .exec(
3142 "INSERT INTO archived_git_worktrees \
3143 (id, worktree_path, main_repo_path, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
3144 VALUES (1, '/wt', '/main', 'abc', 'def', '000')",
3145 )
3146 .unwrap()()
3147 .unwrap();
3148 connection
3149 .exec(
3150 "INSERT INTO thread_archived_worktrees \
3151 (session_id, archived_worktree_id) \
3152 VALUES ('no-tid-1', 1)",
3153 )
3154 .unwrap()()
3155 .unwrap();
3156
3157 // Run all migrations (0-7). sqlez skips 0-6 and runs only migration 7.
3158 connection
3159 .migrate(
3160 ThreadMetadataDb::NAME,
3161 ThreadMetadataDb::MIGRATIONS,
3162 &mut |_, _, _| false,
3163 )
3164 .expect("new migration should succeed");
3165
3166 // All 3 rows should survive with non-NULL thread_ids.
3167 let count: i64 = connection
3168 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM sidebar_threads")
3169 .unwrap()(())
3170 .unwrap()
3171 .unwrap();
3172 assert_eq!(count, 3, "all 3 rows should survive the migration");
3173
3174 let null_count: i64 = connection
3175 .select_row_bound::<(), i64>(
3176 "SELECT COUNT(*) FROM sidebar_threads WHERE thread_id IS NULL",
3177 )
3178 .unwrap()(())
3179 .unwrap()
3180 .unwrap();
3181 assert_eq!(
3182 null_count, 0,
3183 "no rows should have NULL thread_id after migration"
3184 );
3185
3186 // The row that already had a thread_id should keep its original value.
3187 let original_tid: Vec<u8> = connection
3188 .select_row_bound::<&str, Vec<u8>>(
3189 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3190 )
3191 .unwrap()("has-tid")
3192 .unwrap()
3193 .unwrap();
3194 assert_eq!(
3195 original_tid,
3196 vec![
3197 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
3198 0x0F, 0x10
3199 ],
3200 "pre-existing thread_id should be preserved"
3201 );
3202
3203 // The two rows that had NULL thread_ids should now have distinct non-empty blobs.
3204 let generated_tid_1: Vec<u8> = connection
3205 .select_row_bound::<&str, Vec<u8>>(
3206 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3207 )
3208 .unwrap()("no-tid-1")
3209 .unwrap()
3210 .unwrap();
3211 let generated_tid_2: Vec<u8> = connection
3212 .select_row_bound::<&str, Vec<u8>>(
3213 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3214 )
3215 .unwrap()("no-tid-2")
3216 .unwrap()
3217 .unwrap();
3218 assert_eq!(
3219 generated_tid_1.len(),
3220 16,
3221 "generated thread_id should be 16 bytes"
3222 );
3223 assert_eq!(
3224 generated_tid_2.len(),
3225 16,
3226 "generated thread_id should be 16 bytes"
3227 );
3228 assert_ne!(
3229 generated_tid_1, generated_tid_2,
3230 "each generated thread_id should be unique"
3231 );
3232
3233 // The thread_archived_worktrees join row should have migrated
3234 // using the backfilled thread_id from the session without a
3235 // pre-existing thread_id.
3236 let archived_count: i64 = connection
3237 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM thread_archived_worktrees")
3238 .unwrap()(())
3239 .unwrap()
3240 .unwrap();
3241 assert_eq!(
3242 archived_count, 1,
3243 "thread_archived_worktrees row should survive migration"
3244 );
3245
3246 // The thread_archived_worktrees row should reference the
3247 // backfilled thread_id of the 'no-tid-1' session.
3248 let archived_tid: Vec<u8> = connection
3249 .select_row_bound::<(), Vec<u8>>(
3250 "SELECT thread_id FROM thread_archived_worktrees LIMIT 1",
3251 )
3252 .unwrap()(())
3253 .unwrap()
3254 .unwrap();
3255 assert_eq!(
3256 archived_tid, generated_tid_1,
3257 "thread_archived_worktrees should reference the backfilled thread_id"
3258 );
3259 }
3260
3261 // ── ThreadWorktreePaths tests ──────────────────────────────────────
3262
3263 /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs.
3264 fn make_worktree_paths(pairs: &[(&str, &str)]) -> WorktreePaths {
3265 let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs
3266 .iter()
3267 .map(|(m, f)| (Path::new(*m), Path::new(*f)))
3268 .unzip();
3269 WorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders)).unwrap()
3270 }
3271
3272 #[test]
3273 fn test_thread_worktree_paths_full_add_then_remove_cycle() {
3274 // Full scenario from the issue:
3275 // 1. Start with linked worktree selectric → zed
3276 // 2. Add cloud
3277 // 3. Remove zed
3278
3279 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3280
3281 // Step 2: add cloud
3282 paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud"));
3283
3284 assert_eq!(paths.ordered_pairs().count(), 2);
3285 assert_eq!(
3286 paths.folder_path_list(),
3287 &PathList::new(&[
3288 Path::new("/worktrees/selectric/zed"),
3289 Path::new("/projects/cloud"),
3290 ])
3291 );
3292 assert_eq!(
3293 paths.main_worktree_path_list(),
3294 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),])
3295 );
3296
3297 // Step 3: remove zed
3298 paths.remove_main_path(Path::new("/projects/zed"));
3299
3300 assert_eq!(paths.ordered_pairs().count(), 1);
3301 assert_eq!(
3302 paths.folder_path_list(),
3303 &PathList::new(&[Path::new("/projects/cloud")])
3304 );
3305 assert_eq!(
3306 paths.main_worktree_path_list(),
3307 &PathList::new(&[Path::new("/projects/cloud")])
3308 );
3309 }
3310
3311 #[test]
3312 fn test_thread_worktree_paths_add_is_idempotent() {
3313 let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]);
3314
3315 paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed"));
3316
3317 assert_eq!(paths.ordered_pairs().count(), 1);
3318 }
3319
3320 #[test]
3321 fn test_thread_worktree_paths_remove_nonexistent_is_noop() {
3322 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3323
3324 paths.remove_main_path(Path::new("/projects/nonexistent"));
3325
3326 assert_eq!(paths.ordered_pairs().count(), 1);
3327 }
3328
3329 #[test]
3330 fn test_thread_worktree_paths_from_path_lists_preserves_association() {
3331 let folder = PathList::new(&[
3332 Path::new("/worktrees/selectric/zed"),
3333 Path::new("/projects/cloud"),
3334 ]);
3335 let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]);
3336
3337 let paths = WorktreePaths::from_path_lists(main, folder).unwrap();
3338
3339 let pairs: Vec<_> = paths
3340 .ordered_pairs()
3341 .map(|(m, f)| (m.clone(), f.clone()))
3342 .collect();
3343 assert_eq!(pairs.len(), 2);
3344 assert!(pairs.contains(&(
3345 PathBuf::from("/projects/zed"),
3346 PathBuf::from("/worktrees/selectric/zed")
3347 )));
3348 assert!(pairs.contains(&(
3349 PathBuf::from("/projects/cloud"),
3350 PathBuf::from("/projects/cloud")
3351 )));
3352 }
3353
3354 #[test]
3355 fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() {
3356 // Two linked worktrees of the same main repo: the main_worktree_path_list
3357 // deduplicates because PathList stores unique sorted paths, but
3358 // ordered_pairs still has both entries.
3359 let paths = make_worktree_paths(&[
3360 ("/projects/zed", "/worktrees/selectric/zed"),
3361 ("/projects/zed", "/worktrees/feature/zed"),
3362 ]);
3363
3364 // main_worktree_path_list has the duplicate main path twice
3365 // (PathList keeps all entries from its input)
3366 assert_eq!(paths.ordered_pairs().count(), 2);
3367 assert_eq!(
3368 paths.folder_path_list(),
3369 &PathList::new(&[
3370 Path::new("/worktrees/selectric/zed"),
3371 Path::new("/worktrees/feature/zed"),
3372 ])
3373 );
3374 assert_eq!(
3375 paths.main_worktree_path_list(),
3376 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),])
3377 );
3378 }
3379
3380 #[test]
3381 fn test_thread_worktree_paths_mismatched_lengths_returns_error() {
3382 let folder = PathList::new(&[
3383 Path::new("/worktrees/selectric/zed"),
3384 Path::new("/projects/cloud"),
3385 ]);
3386 let main = PathList::new(&[Path::new("/projects/zed")]);
3387
3388 let result = WorktreePaths::from_path_lists(main, folder);
3389 assert!(result.is_err());
3390 }
3391}