1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use agent::{ThreadStore, ZED_AGENT_ID};
7use agent_client_protocol::schema as acp;
8use anyhow::Context as _;
9use chrono::{DateTime, Utc};
10use collections::{HashMap, HashSet};
11use db::{
12 kvp::KeyValueStore,
13 sqlez::{
14 bindable::{Bind, Column},
15 domain::Domain,
16 statement::Statement,
17 thread_safe_connection::ThreadSafeConnection,
18 },
19 sqlez_macros::sql,
20};
21use fs::Fs;
22use futures::{FutureExt, future::Shared};
23use gpui::{AppContext as _, Entity, Global, Subscription, Task};
24pub use project::WorktreePaths;
25use project::{AgentId, linked_worktree_short_name};
26use remote::{RemoteConnectionOptions, same_remote_connection_identity};
27use ui::{App, Context, SharedString, ThreadItemWorktreeInfo, WorktreeKind};
28use util::{ResultExt as _, debug_panic};
29use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
30
31use crate::DEFAULT_THREAD_TITLE;
32
33#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, serde::Serialize, serde::Deserialize)]
34pub struct ThreadId(uuid::Uuid);
35
36impl ThreadId {
37 pub fn new() -> Self {
38 Self(uuid::Uuid::new_v4())
39 }
40}
41
42impl Bind for ThreadId {
43 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
44 self.0.bind(statement, start_index)
45 }
46}
47
48impl Column for ThreadId {
49 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
50 let (uuid, next) = Column::column(statement, start_index)?;
51 Ok((ThreadId(uuid), next))
52 }
53}
54
55const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill";
56const THREAD_ID_MIGRATION_KEY: &str = "thread-metadata-thread-id-backfill";
57
58/// List all sidebar thread metadata from an arbitrary SQLite connection.
59///
60/// This is used to read thread metadata from another release channel's
61/// database without opening a full `ThreadSafeConnection`.
62pub(crate) fn list_thread_metadata_from_connection(
63 connection: &db::sqlez::connection::Connection,
64) -> anyhow::Result<Vec<ThreadMetadata>> {
65 connection.select::<ThreadMetadata>(ThreadMetadataDb::LIST_QUERY)?()
66}
67
68/// Run the `ThreadMetadataDb` migrations on a raw connection.
69///
70/// This is used in tests to set up the sidebar_threads schema in a
71/// temporary database.
72#[cfg(test)]
73pub(crate) fn run_thread_metadata_migrations(connection: &db::sqlez::connection::Connection) {
74 connection
75 .migrate(
76 ThreadMetadataDb::NAME,
77 ThreadMetadataDb::MIGRATIONS,
78 &mut |_, _, _| false,
79 )
80 .expect("thread metadata migrations should succeed");
81}
82
83pub fn init(cx: &mut App) {
84 ThreadMetadataStore::init_global(cx);
85 let migration_task = migrate_thread_metadata(cx);
86 migrate_thread_remote_connections(cx, migration_task);
87 migrate_thread_ids(cx);
88}
89
90/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
91/// We skip migrating threads that do not have a project.
92///
93/// TODO: Remove this after N weeks of shipping the sidebar
94fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
95 let store = ThreadMetadataStore::global(cx);
96 let db = store.read(cx).db.clone();
97 let thread_store = ThreadStore::global(cx);
98 let thread_store_ready = thread_store.read(cx).reload_task();
99
100 cx.spawn(async move |cx| {
101 // Wait for `ThreadStore`'s initial reload to complete. Without this,
102 // reading `entries()` races with the store's async population from
103 // disk and usually observes an empty iterator, silently skipping the
104 // migration on every launch. The regression test
105 // `test_migration_awaits_thread_store_reload` pins this behavior.
106 thread_store_ready.await;
107
108 let existing_list = db.list()?;
109 let existing_session_ids: HashSet<Arc<str>> = existing_list
110 .into_iter()
111 .filter_map(|m| m.session_id.map(|s| s.0))
112 .collect();
113
114 let mut to_migrate = thread_store.read_with(cx, |store, _cx| {
115 store
116 .entries()
117 .filter_map(|entry| {
118 if existing_session_ids.contains(&entry.id.0) {
119 return None;
120 }
121
122 Some(ThreadMetadata {
123 thread_id: ThreadId::new(),
124 session_id: Some(entry.id),
125 agent_id: ZED_AGENT_ID.clone(),
126 title: if entry.title.is_empty()
127 || entry.title.as_ref() == DEFAULT_THREAD_TITLE
128 {
129 None
130 } else {
131 Some(entry.title)
132 },
133 updated_at: entry.updated_at,
134 created_at: entry.created_at,
135 interacted_at: None,
136 worktree_paths: WorktreePaths::from_folder_paths(&entry.folder_paths),
137 remote_connection: None,
138 archived: true,
139 })
140 })
141 .collect::<Vec<_>>()
142 });
143
144 if to_migrate.is_empty() {
145 return anyhow::Ok(());
146 }
147
148 // For each batch of newly-migrated threads, keep the 5 most recent
149 // per project unarchived. Previously this was gated on
150 // `is_first_migration` (an empty `sidebar_threads`), which meant any
151 // subsequent batch of newly-discovered legacy threads got migrated as
152 // fully archived. Running the rescue per-batch keeps the behavior
153 // idempotent across partial migrations and re-runs.
154 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
155 for entry in &mut to_migrate {
156 if entry.worktree_paths.is_empty() {
157 continue;
158 }
159 per_project
160 .entry(entry.worktree_paths.folder_path_list().clone())
161 .or_default()
162 .push(entry);
163 }
164 for entries in per_project.values_mut() {
165 entries.sort_by_key(|entry| std::cmp::Reverse(entry.updated_at));
166 for entry in entries.iter_mut().take(5) {
167 entry.archived = false;
168 }
169 }
170
171 log::info!("Migrating {} thread store entries", to_migrate.len());
172
173 // Manually save each entry to the database and call reload, otherwise
174 // we'll end up triggering lots of reloads after each save
175 for entry in to_migrate {
176 db.save(entry).await?;
177 }
178
179 log::info!("Finished migrating thread store entries");
180
181 let _ = store.update(cx, |store, cx| store.reload(cx));
182 anyhow::Ok(())
183 })
184}
185
186fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task<anyhow::Result<()>>) {
187 let store = ThreadMetadataStore::global(cx);
188 let db = store.read(cx).db.clone();
189 let kvp = KeyValueStore::global(cx);
190 let workspace_db = WorkspaceDb::global(cx);
191 let fs = <dyn Fs>::global(cx);
192
193 cx.spawn(async move |cx| -> anyhow::Result<()> {
194 migration_task.await?;
195
196 if kvp
197 .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)?
198 .is_some()
199 {
200 return Ok(());
201 }
202
203 let recent_workspaces = workspace_db.recent_project_workspaces(fs.as_ref()).await?;
204
205 let mut local_path_lists = HashSet::<PathList>::default();
206 let mut remote_path_lists = HashMap::<PathList, RemoteConnectionOptions>::default();
207
208 recent_workspaces
209 .iter()
210 .filter(|(_, location, path_list, _)| {
211 !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local)
212 })
213 .for_each(|(_, _, path_list, _)| {
214 local_path_lists.insert(path_list.clone());
215 });
216
217 for (_, location, path_list, _) in recent_workspaces {
218 match location {
219 SerializedWorkspaceLocation::Remote(remote_connection)
220 if !local_path_lists.contains(&path_list) =>
221 {
222 remote_path_lists
223 .entry(path_list)
224 .or_insert(remote_connection);
225 }
226 _ => {}
227 }
228 }
229
230 let mut reloaded = false;
231 for metadata in db.list()? {
232 if metadata.remote_connection.is_some() {
233 continue;
234 }
235
236 if let Some(remote_connection) = remote_path_lists
237 .get(metadata.folder_paths())
238 .or_else(|| remote_path_lists.get(metadata.main_worktree_paths()))
239 {
240 db.save(ThreadMetadata {
241 remote_connection: Some(remote_connection.clone()),
242 ..metadata
243 })
244 .await?;
245 reloaded = true;
246 }
247 }
248
249 let reloaded_task = reloaded
250 .then_some(store.update(cx, |store, cx| store.reload(cx)))
251 .unwrap_or(Task::ready(()).shared());
252
253 kvp.write_kvp(
254 THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(),
255 "1".to_string(),
256 )
257 .await?;
258 reloaded_task.await;
259
260 Ok(())
261 })
262 .detach_and_log_err(cx);
263}
264
265fn migrate_thread_ids(cx: &mut App) {
266 let store = ThreadMetadataStore::global(cx);
267 let db = store.read(cx).db.clone();
268 let kvp = KeyValueStore::global(cx);
269
270 cx.spawn(async move |cx| -> anyhow::Result<()> {
271 if kvp.read_kvp(THREAD_ID_MIGRATION_KEY)?.is_some() {
272 return Ok(());
273 }
274
275 let mut reloaded = false;
276 for metadata in db.list()? {
277 db.save(metadata).await?;
278 reloaded = true;
279 }
280
281 let reloaded_task = reloaded
282 .then_some(store.update(cx, |store, cx| store.reload(cx)))
283 .unwrap_or(Task::ready(()).shared());
284
285 kvp.write_kvp(THREAD_ID_MIGRATION_KEY.to_string(), "1".to_string())
286 .await?;
287 reloaded_task.await;
288
289 Ok(())
290 })
291 .detach_and_log_err(cx);
292}
293
294struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
295impl Global for GlobalThreadMetadataStore {}
296
297/// Lightweight metadata for any thread (native or ACP), enough to populate
298/// the sidebar list and route to the correct load path when clicked.
299#[derive(Debug, Clone, PartialEq)]
300pub struct ThreadMetadata {
301 pub thread_id: ThreadId,
302 pub session_id: Option<acp::SessionId>,
303 pub agent_id: AgentId,
304 pub title: Option<SharedString>,
305 pub updated_at: DateTime<Utc>,
306 pub created_at: Option<DateTime<Utc>>,
307 /// When a user last interacted to send a message (including queueing).
308 /// Doesn't include the time when a queued message is fired.
309 pub interacted_at: Option<DateTime<Utc>>,
310 pub worktree_paths: WorktreePaths,
311 pub remote_connection: Option<RemoteConnectionOptions>,
312 pub archived: bool,
313}
314
315impl ThreadMetadata {
316 pub fn display_title(&self) -> SharedString {
317 self.title
318 .clone()
319 .unwrap_or_else(|| crate::DEFAULT_THREAD_TITLE.into())
320 }
321
322 pub fn folder_paths(&self) -> &PathList {
323 self.worktree_paths.folder_path_list()
324 }
325 pub fn main_worktree_paths(&self) -> &PathList {
326 self.worktree_paths.main_worktree_path_list()
327 }
328}
329
330/// Derives worktree display info from a thread's stored path list.
331///
332/// For each path in the thread's `folder_paths`, produces a
333/// [`ThreadItemWorktreeInfo`] with a short display name, full path, and whether
334/// the worktree is the main checkout or a linked git worktree. When
335/// multiple main paths exist and a linked worktree's short name alone
336/// wouldn't identify which main project it belongs to, the main project
337/// name is prefixed for disambiguation (e.g. `project:feature`).
338pub fn worktree_info_from_thread_paths<S: std::hash::BuildHasher>(
339 worktree_paths: &WorktreePaths,
340 branch_names: &std::collections::HashMap<PathBuf, SharedString, S>,
341) -> Vec<ThreadItemWorktreeInfo> {
342 let mut infos: Vec<ThreadItemWorktreeInfo> = Vec::new();
343 let mut linked_short_names: Vec<(SharedString, SharedString)> = Vec::new();
344 let mut unique_main_count = HashSet::default();
345
346 for (main_path, folder_path) in worktree_paths.ordered_pairs() {
347 unique_main_count.insert(main_path.clone());
348 let is_linked = main_path != folder_path;
349
350 if is_linked {
351 let short_name = linked_worktree_short_name(main_path, folder_path).unwrap_or_default();
352 let project_name = main_path
353 .file_name()
354 .map(|n| SharedString::from(n.to_string_lossy().to_string()))
355 .unwrap_or_default();
356 linked_short_names.push((short_name.clone(), project_name));
357 infos.push(ThreadItemWorktreeInfo {
358 worktree_name: Some(short_name),
359 full_path: SharedString::from(folder_path.display().to_string()),
360 highlight_positions: Vec::new(),
361 kind: WorktreeKind::Linked,
362 branch_name: branch_names.get(folder_path).cloned(),
363 });
364 } else {
365 let Some(name) = folder_path.file_name() else {
366 continue;
367 };
368 infos.push(ThreadItemWorktreeInfo {
369 worktree_name: Some(SharedString::from(name.to_string_lossy().to_string())),
370 full_path: SharedString::from(folder_path.display().to_string()),
371 highlight_positions: Vec::new(),
372 kind: WorktreeKind::Main,
373 branch_name: branch_names.get(folder_path).cloned(),
374 });
375 }
376 }
377
378 // When the group has multiple main worktree paths and the thread's
379 // folder paths don't all share the same short name, prefix each
380 // linked worktree chip with its main project name so the user knows
381 // which project it belongs to.
382 let all_same_name = infos.len() > 1
383 && infos
384 .iter()
385 .all(|i| i.worktree_name == infos[0].worktree_name);
386
387 if unique_main_count.len() > 1 && !all_same_name {
388 for (info, (_short_name, project_name)) in infos
389 .iter_mut()
390 .filter(|i| i.kind == WorktreeKind::Linked)
391 .zip(linked_short_names.iter())
392 {
393 if let Some(name) = &info.worktree_name {
394 info.worktree_name = Some(SharedString::from(format!("{}:{}", project_name, name)));
395 }
396 }
397 }
398
399 infos
400}
401
402impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
403 fn from(meta: &ThreadMetadata) -> Self {
404 let session_id = meta
405 .session_id
406 .clone()
407 .unwrap_or_else(|| acp::SessionId::new(meta.thread_id.0.to_string()));
408 Self {
409 session_id,
410 work_dirs: Some(meta.folder_paths().clone()),
411 title: meta.title.clone(),
412 updated_at: Some(meta.updated_at),
413 created_at: meta.created_at,
414 meta: None,
415 }
416 }
417}
418
419/// Record of a git worktree that was archived (deleted from disk) when its
420/// last thread was archived.
421pub struct ArchivedGitWorktree {
422 /// Auto-incrementing primary key.
423 pub id: i64,
424 /// Absolute path to the directory of the worktree before it was deleted.
425 /// Used when restoring, to put the recreated worktree back where it was.
426 /// If the path already exists on disk, the worktree is assumed to be
427 /// already restored and is used as-is.
428 pub worktree_path: PathBuf,
429 /// Absolute path of the main repository ("main worktree") that owned this worktree.
430 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
431 /// If the main repo isn't found on disk, unarchiving fails because we only store
432 /// commit hashes, and without the actual git repo being available, we can't restore
433 /// the files.
434 pub main_repo_path: PathBuf,
435 /// Branch that was checked out in the worktree at archive time. `None` if
436 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
437 /// could happen if the user made a detached one outside of Zed.
438 /// On restore, we try to switch to this branch. If that fails (e.g. it's
439 /// checked out elsewhere), we auto-generate a new one.
440 pub branch_name: Option<String>,
441 /// SHA of the WIP commit that captures files that were staged (but not yet
442 /// committed) at the time of archiving. This commit can be empty if the
443 /// user had no staged files at the time. It sits directly on top of whatever
444 /// the user's last actual commit was.
445 pub staged_commit_hash: String,
446 /// SHA of the WIP commit that captures files that were unstaged (including
447 /// untracked) at the time of archiving. This commit can be empty if the user
448 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
449 /// After doing `git reset` past both of these commits, we're back in the state
450 /// we had before archiving, including what was staged, what was unstaged, and
451 /// what was committed.
452 pub unstaged_commit_hash: String,
453 /// SHA of the commit that HEAD pointed at before we created the two WIP
454 /// commits during archival. After resetting past the WIP commits during
455 /// restore, HEAD should land back on this commit. It also serves as a
456 /// pre-restore sanity check (abort if this commit no longer exists in the
457 /// repo) and as a fallback target if the WIP resets fail.
458 pub original_commit_hash: String,
459}
460
461/// The store holds all metadata needed to show threads in the sidebar/the archive.
462///
463/// Listens to ConversationView events and updates metadata when the root thread changes.
464pub struct ThreadMetadataStore {
465 db: ThreadMetadataDb,
466 threads: HashMap<ThreadId, ThreadMetadata>,
467 threads_by_paths: HashMap<PathList, HashSet<ThreadId>>,
468 threads_by_main_paths: HashMap<PathList, HashSet<ThreadId>>,
469 threads_by_session: HashMap<acp::SessionId, ThreadId>,
470 reload_task: Option<Shared<Task<()>>>,
471 conversation_subscriptions: HashMap<gpui::EntityId, Subscription>,
472 pending_thread_ops_tx: async_channel::Sender<DbOperation>,
473 in_flight_archives: HashMap<ThreadId, (Task<()>, async_channel::Sender<()>)>,
474 _db_operations_task: Task<()>,
475}
476
477#[derive(Debug, PartialEq)]
478enum DbOperation {
479 Upsert(ThreadMetadata),
480 Delete(ThreadId),
481}
482
483impl DbOperation {
484 fn id(&self) -> ThreadId {
485 match self {
486 DbOperation::Upsert(thread) => thread.thread_id,
487 DbOperation::Delete(thread_id) => *thread_id,
488 }
489 }
490}
491
492/// Override for the test DB name used by `ThreadMetadataStore::init_global`.
493/// When set as a GPUI global, `init_global` uses this name instead of
494/// deriving one from the thread name. This prevents data from leaking
495/// across proptest cases that share a thread name.
496#[cfg(any(test, feature = "test-support"))]
497pub struct TestMetadataDbName(pub String);
498#[cfg(any(test, feature = "test-support"))]
499impl gpui::Global for TestMetadataDbName {}
500
501#[cfg(any(test, feature = "test-support"))]
502impl TestMetadataDbName {
503 pub fn global(cx: &App) -> String {
504 cx.try_global::<Self>()
505 .map(|g| g.0.clone())
506 .unwrap_or_else(|| {
507 let thread = std::thread::current();
508 let test_name = thread.name().unwrap_or("unknown_test");
509 format!("THREAD_METADATA_DB_{}", test_name)
510 })
511 }
512}
513
514impl ThreadMetadataStore {
515 #[cfg(not(any(test, feature = "test-support")))]
516 pub fn init_global(cx: &mut App) {
517 if cx.has_global::<Self>() {
518 return;
519 }
520
521 let db = ThreadMetadataDb::global(cx);
522 let thread_store = cx.new(|cx| Self::new(db, cx));
523 cx.set_global(GlobalThreadMetadataStore(thread_store));
524 }
525
526 #[cfg(any(test, feature = "test-support"))]
527 pub fn init_global(cx: &mut App) {
528 let db_name = TestMetadataDbName::global(cx);
529 let db = gpui::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
530 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
531 cx.set_global(GlobalThreadMetadataStore(thread_store));
532 }
533
534 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
535 cx.try_global::<GlobalThreadMetadataStore>()
536 .map(|store| store.0.clone())
537 }
538
539 pub fn global(cx: &App) -> Entity<Self> {
540 cx.global::<GlobalThreadMetadataStore>().0.clone()
541 }
542
543 pub fn is_empty(&self) -> bool {
544 self.threads.is_empty()
545 }
546
547 /// Returns all thread IDs.
548 pub fn entry_ids(&self) -> impl Iterator<Item = ThreadId> + '_ {
549 self.threads.keys().copied()
550 }
551
552 /// Returns the metadata for a specific thread, if it exists.
553 pub fn entry(&self, thread_id: ThreadId) -> Option<&ThreadMetadata> {
554 self.threads.get(&thread_id)
555 }
556
557 /// Returns the metadata for a thread identified by its ACP session ID.
558 pub fn entry_by_session(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
559 let thread_id = self.threads_by_session.get(session_id)?;
560 self.threads.get(thread_id)
561 }
562
563 /// Returns all threads.
564 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
565 self.threads.values()
566 }
567
568 /// Returns all archived threads.
569 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
570 self.entries().filter(|t| t.archived)
571 }
572
573 /// Returns all threads for the given path list and remote connection,
574 /// excluding archived threads.
575 ///
576 /// When `remote_connection` is `Some`, only threads whose persisted
577 /// `remote_connection` matches by normalized identity are returned.
578 /// When `None`, only local (non-remote) threads are returned.
579 pub fn entries_for_path<'a>(
580 &'a self,
581 path_list: &PathList,
582 remote_connection: Option<&'a RemoteConnectionOptions>,
583 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
584 self.threads_by_paths
585 .get(path_list)
586 .into_iter()
587 .flatten()
588 .filter_map(|s| self.threads.get(s))
589 .filter(|s| !s.archived)
590 .filter(move |s| {
591 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
592 })
593 }
594
595 /// Returns threads whose `main_worktree_paths` matches the given path list
596 /// and remote connection, excluding archived threads. This finds threads
597 /// that were opened in a linked worktree but are associated with the given
598 /// main worktree.
599 ///
600 /// When `remote_connection` is `Some`, only threads whose persisted
601 /// `remote_connection` matches by normalized identity are returned.
602 /// When `None`, only local (non-remote) threads are returned.
603 pub fn entries_for_main_worktree_path<'a>(
604 &'a self,
605 path_list: &PathList,
606 remote_connection: Option<&'a RemoteConnectionOptions>,
607 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
608 self.threads_by_main_paths
609 .get(path_list)
610 .into_iter()
611 .flatten()
612 .filter_map(|s| self.threads.get(s))
613 .filter(|s| !s.archived)
614 .filter(move |s| {
615 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
616 })
617 }
618
619 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
620 let db = self.db.clone();
621 self.reload_task.take();
622
623 let list_task = cx
624 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
625
626 let reload_task = cx
627 .spawn(async move |this, cx| {
628 let Some(rows) = list_task.await.log_err() else {
629 return;
630 };
631
632 this.update(cx, |this, cx| {
633 this.threads.clear();
634 this.threads_by_paths.clear();
635 this.threads_by_main_paths.clear();
636 this.threads_by_session.clear();
637
638 for row in rows {
639 this.cache_thread_metadata(row);
640 }
641
642 cx.notify();
643 })
644 .ok();
645 })
646 .shared();
647 self.reload_task = Some(reload_task.clone());
648 reload_task
649 }
650
651 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
652 for metadata in metadata {
653 self.save_internal(metadata);
654 }
655 cx.notify();
656 }
657
658 pub fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
659 self.save_internal(metadata);
660 cx.notify();
661 }
662
663 fn save_internal(&mut self, metadata: ThreadMetadata) {
664 if metadata.session_id.is_none() {
665 debug_panic!("cannot store thread metadata without a session_id");
666 return;
667 };
668
669 if let Some(thread) = self.threads.get(&metadata.thread_id) {
670 if thread.folder_paths() != metadata.folder_paths() {
671 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
672 thread_ids.remove(&metadata.thread_id);
673 }
674 }
675 if thread.main_worktree_paths() != metadata.main_worktree_paths()
676 && !thread.main_worktree_paths().is_empty()
677 {
678 if let Some(thread_ids) = self
679 .threads_by_main_paths
680 .get_mut(thread.main_worktree_paths())
681 {
682 thread_ids.remove(&metadata.thread_id);
683 }
684 }
685 }
686
687 self.cache_thread_metadata(metadata.clone());
688 self.pending_thread_ops_tx
689 .try_send(DbOperation::Upsert(metadata))
690 .log_err();
691 }
692
693 fn cache_thread_metadata(&mut self, metadata: ThreadMetadata) {
694 let Some(session_id) = metadata.session_id.as_ref() else {
695 debug_panic!("cannot store thread metadata without a session_id");
696 return;
697 };
698
699 self.threads_by_session
700 .insert(session_id.clone(), metadata.thread_id);
701
702 self.threads.insert(metadata.thread_id, metadata.clone());
703
704 self.threads_by_paths
705 .entry(metadata.folder_paths().clone())
706 .or_default()
707 .insert(metadata.thread_id);
708
709 if !metadata.main_worktree_paths().is_empty() {
710 self.threads_by_main_paths
711 .entry(metadata.main_worktree_paths().clone())
712 .or_default()
713 .insert(metadata.thread_id);
714 }
715 }
716
717 pub fn update_working_directories(
718 &mut self,
719 thread_id: ThreadId,
720 work_dirs: PathList,
721 cx: &mut Context<Self>,
722 ) {
723 if let Some(thread) = self.threads.get(&thread_id) {
724 debug_assert!(
725 !thread.archived,
726 "update_working_directories called on archived thread"
727 );
728 self.save_internal(ThreadMetadata {
729 worktree_paths: WorktreePaths::from_path_lists(
730 thread.main_worktree_paths().clone(),
731 work_dirs.clone(),
732 )
733 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&work_dirs)),
734 ..thread.clone()
735 });
736 cx.notify();
737 }
738 }
739
740 pub fn update_worktree_paths(
741 &mut self,
742 thread_ids: &[ThreadId],
743 worktree_paths: WorktreePaths,
744 cx: &mut Context<Self>,
745 ) {
746 let mut changed = false;
747 for &thread_id in thread_ids {
748 let Some(thread) = self.threads.get(&thread_id) else {
749 continue;
750 };
751 if thread.worktree_paths == worktree_paths {
752 continue;
753 }
754 // Don't overwrite paths for archived threads — the
755 // project may no longer include the worktree that was
756 // removed during the archive flow.
757 if thread.archived {
758 continue;
759 }
760 self.save_internal(ThreadMetadata {
761 worktree_paths: worktree_paths.clone(),
762 ..thread.clone()
763 });
764 changed = true;
765 }
766 if changed {
767 cx.notify();
768 }
769 }
770
771 pub fn update_interacted_at(
772 &mut self,
773 thread_id: &ThreadId,
774 time: DateTime<Utc>,
775 cx: &mut Context<Self>,
776 ) {
777 if let Some(thread) = self.threads.get(thread_id) {
778 self.save_internal(ThreadMetadata {
779 interacted_at: Some(time),
780 ..thread.clone()
781 });
782 cx.notify();
783 };
784 }
785
786 pub fn archive(
787 &mut self,
788 thread_id: ThreadId,
789 archive_job: Option<(Task<()>, async_channel::Sender<()>)>,
790 cx: &mut Context<Self>,
791 ) {
792 self.update_archived(thread_id, true, cx);
793
794 if let Some(job) = archive_job {
795 self.in_flight_archives.insert(thread_id, job);
796 }
797
798 cx.emit(ThreadMetadataStoreEvent::ThreadArchived(thread_id));
799 }
800
801 pub fn unarchive(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
802 self.update_archived(thread_id, false, cx);
803 // Dropping the Sender triggers cancellation in the background task.
804 self.in_flight_archives.remove(&thread_id);
805 }
806
807 pub fn cleanup_completed_archive(&mut self, thread_id: ThreadId) {
808 self.in_flight_archives.remove(&thread_id);
809 }
810
811 /// Returns `true` if any unarchived thread other than `current_session_id`
812 /// references `path` in its folder paths. Used to determine whether a
813 /// worktree can safely be removed from disk.
814 pub fn path_is_referenced_by_other_unarchived_threads(
815 &self,
816 thread_id: ThreadId,
817 path: &Path,
818 remote_connection: Option<&RemoteConnectionOptions>,
819 ) -> bool {
820 self.entries().any(|thread| {
821 thread.thread_id != thread_id
822 && !thread.archived
823 && same_remote_connection_identity(
824 thread.remote_connection.as_ref(),
825 remote_connection,
826 )
827 && thread
828 .folder_paths()
829 .paths()
830 .iter()
831 .any(|other_path| other_path.as_path() == path)
832 })
833 }
834
835 /// Updates a thread's `folder_paths` after an archived worktree has been
836 /// restored to disk. The restored worktree may land at a different path
837 /// than it had before archival, so each `(old_path, new_path)` pair in
838 /// `path_replacements` is applied to the thread's stored folder paths.
839 pub fn update_restored_worktree_paths(
840 &mut self,
841 thread_id: ThreadId,
842 path_replacements: &[(PathBuf, PathBuf)],
843 cx: &mut Context<Self>,
844 ) {
845 if let Some(thread) = self.threads.get(&thread_id).cloned() {
846 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
847 for (old_path, new_path) in path_replacements {
848 if let Some(pos) = paths.iter().position(|p| p == old_path) {
849 paths[pos] = new_path.clone();
850 }
851 }
852 let new_folder_paths = PathList::new(&paths);
853 self.save_internal(ThreadMetadata {
854 worktree_paths: WorktreePaths::from_path_lists(
855 thread.main_worktree_paths().clone(),
856 new_folder_paths.clone(),
857 )
858 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
859 ..thread
860 });
861 cx.notify();
862 }
863 }
864
865 pub fn complete_worktree_restore(
866 &mut self,
867 thread_id: ThreadId,
868 path_replacements: &[(PathBuf, PathBuf)],
869 cx: &mut Context<Self>,
870 ) {
871 if let Some(thread) = self.threads.get(&thread_id).cloned() {
872 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
873 for (old_path, new_path) in path_replacements {
874 for path in &mut paths {
875 if path == old_path {
876 *path = new_path.clone();
877 }
878 }
879 }
880 let new_folder_paths = PathList::new(&paths);
881 self.save_internal(ThreadMetadata {
882 worktree_paths: WorktreePaths::from_path_lists(
883 thread.main_worktree_paths().clone(),
884 new_folder_paths.clone(),
885 )
886 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
887 ..thread
888 });
889 cx.notify();
890 }
891 }
892
893 /// Apply a mutation to the worktree paths of all threads whose current
894 /// `folder_paths` matches `current_folder_paths`, then re-index.
895 /// When `remote_connection` is provided, only threads with a matching
896 /// remote connection are affected.
897 pub fn change_worktree_paths(
898 &mut self,
899 current_folder_paths: &PathList,
900 remote_connection: Option<&RemoteConnectionOptions>,
901 mutate: impl Fn(&mut WorktreePaths),
902 cx: &mut Context<Self>,
903 ) {
904 let thread_ids: Vec<_> = self
905 .threads_by_paths
906 .get(current_folder_paths)
907 .into_iter()
908 .flatten()
909 .filter(|id| {
910 self.threads.get(id).is_some_and(|t| {
911 !t.archived
912 && same_remote_connection_identity(
913 t.remote_connection.as_ref(),
914 remote_connection,
915 )
916 })
917 })
918 .copied()
919 .collect();
920
921 self.mutate_thread_paths(&thread_ids, mutate, cx);
922 }
923
924 fn mutate_thread_paths(
925 &mut self,
926 thread_ids: &[ThreadId],
927 mutate: impl Fn(&mut WorktreePaths),
928 cx: &mut Context<Self>,
929 ) {
930 if thread_ids.is_empty() {
931 return;
932 }
933
934 for thread_id in thread_ids {
935 if let Some(thread) = self.threads.get_mut(thread_id) {
936 if let Some(ids) = self
937 .threads_by_main_paths
938 .get_mut(thread.main_worktree_paths())
939 {
940 ids.remove(thread_id);
941 }
942 if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
943 ids.remove(thread_id);
944 }
945
946 mutate(&mut thread.worktree_paths);
947
948 self.threads_by_main_paths
949 .entry(thread.main_worktree_paths().clone())
950 .or_default()
951 .insert(*thread_id);
952 self.threads_by_paths
953 .entry(thread.folder_paths().clone())
954 .or_default()
955 .insert(*thread_id);
956
957 self.pending_thread_ops_tx
958 .try_send(DbOperation::Upsert(thread.clone()))
959 .log_err();
960 }
961 }
962
963 cx.notify();
964 }
965
966 pub fn create_archived_worktree(
967 &self,
968 worktree_path: String,
969 main_repo_path: String,
970 branch_name: Option<String>,
971 staged_commit_hash: String,
972 unstaged_commit_hash: String,
973 original_commit_hash: String,
974 cx: &App,
975 ) -> Task<anyhow::Result<i64>> {
976 let db = self.db.clone();
977 cx.background_spawn(async move {
978 db.create_archived_worktree(
979 worktree_path,
980 main_repo_path,
981 branch_name,
982 staged_commit_hash,
983 unstaged_commit_hash,
984 original_commit_hash,
985 )
986 .await
987 })
988 }
989
990 pub fn link_thread_to_archived_worktree(
991 &self,
992 thread_id: ThreadId,
993 archived_worktree_id: i64,
994 cx: &App,
995 ) -> Task<anyhow::Result<()>> {
996 let db = self.db.clone();
997 cx.background_spawn(async move {
998 db.link_thread_to_archived_worktree(thread_id, archived_worktree_id)
999 .await
1000 })
1001 }
1002
1003 pub fn get_archived_worktrees_for_thread(
1004 &self,
1005 thread_id: ThreadId,
1006 cx: &App,
1007 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
1008 let db = self.db.clone();
1009 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(thread_id).await })
1010 }
1011
1012 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
1013 let db = self.db.clone();
1014 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
1015 }
1016
1017 pub fn unlink_thread_from_all_archived_worktrees(
1018 &self,
1019 thread_id: ThreadId,
1020 cx: &App,
1021 ) -> Task<anyhow::Result<()>> {
1022 let db = self.db.clone();
1023 cx.background_spawn(async move {
1024 db.unlink_thread_from_all_archived_worktrees(thread_id)
1025 .await
1026 })
1027 }
1028
1029 pub fn is_archived_worktree_referenced(
1030 &self,
1031 archived_worktree_id: i64,
1032 cx: &App,
1033 ) -> Task<anyhow::Result<bool>> {
1034 let db = self.db.clone();
1035 cx.background_spawn(async move {
1036 db.is_archived_worktree_referenced(archived_worktree_id)
1037 .await
1038 })
1039 }
1040
1041 pub fn get_all_archived_branch_names(
1042 &self,
1043 cx: &App,
1044 ) -> Task<anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>>> {
1045 let db = self.db.clone();
1046 cx.background_spawn(async move { db.get_all_archived_branch_names() })
1047 }
1048
1049 fn update_archived(&mut self, thread_id: ThreadId, archived: bool, cx: &mut Context<Self>) {
1050 if let Some(thread) = self.threads.get(&thread_id) {
1051 self.save_internal(ThreadMetadata {
1052 archived,
1053 ..thread.clone()
1054 });
1055 cx.notify();
1056 }
1057 }
1058
1059 pub fn delete(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
1060 if let Some(thread) = self.threads.get(&thread_id) {
1061 if let Some(sid) = &thread.session_id {
1062 self.threads_by_session.remove(sid);
1063 }
1064 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
1065 thread_ids.remove(&thread_id);
1066 }
1067 if !thread.main_worktree_paths().is_empty() {
1068 if let Some(thread_ids) = self
1069 .threads_by_main_paths
1070 .get_mut(thread.main_worktree_paths())
1071 {
1072 thread_ids.remove(&thread_id);
1073 }
1074 }
1075 }
1076 self.threads.remove(&thread_id);
1077 self.pending_thread_ops_tx
1078 .try_send(DbOperation::Delete(thread_id))
1079 .log_err();
1080 cx.notify();
1081 }
1082
1083 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
1084 let weak_store = cx.weak_entity();
1085
1086 cx.observe_new::<crate::ConversationView>(move |_view, _window, cx| {
1087 let view_entity = cx.entity();
1088 let entity_id = view_entity.entity_id();
1089
1090 cx.on_release({
1091 let weak_store = weak_store.clone();
1092 move |_view, cx| {
1093 weak_store
1094 .update(cx, |store, _cx| {
1095 store.conversation_subscriptions.remove(&entity_id);
1096 })
1097 .ok();
1098 }
1099 })
1100 .detach();
1101
1102 weak_store
1103 .update(cx, |this, cx| {
1104 let subscription = cx.subscribe(&view_entity, Self::handle_conversation_event);
1105 this.conversation_subscriptions
1106 .insert(entity_id, subscription);
1107 })
1108 .ok();
1109 })
1110 .detach();
1111
1112 let (tx, rx) = async_channel::unbounded();
1113 let _db_operations_task = cx.background_spawn({
1114 let db = db.clone();
1115 async move {
1116 while let Ok(first_update) = rx.recv().await {
1117 let mut updates = vec![first_update];
1118 while let Ok(update) = rx.try_recv() {
1119 updates.push(update);
1120 }
1121 let updates = Self::dedup_db_operations(updates);
1122 for operation in updates {
1123 match operation {
1124 DbOperation::Upsert(metadata) => {
1125 db.save(metadata).await.log_err();
1126 }
1127 DbOperation::Delete(thread_id) => {
1128 db.delete(thread_id).await.log_err();
1129 }
1130 }
1131 }
1132 }
1133 }
1134 });
1135
1136 let mut this = Self {
1137 db,
1138 threads: HashMap::default(),
1139 threads_by_paths: HashMap::default(),
1140 threads_by_main_paths: HashMap::default(),
1141 threads_by_session: HashMap::default(),
1142 reload_task: None,
1143 conversation_subscriptions: HashMap::default(),
1144 pending_thread_ops_tx: tx,
1145 in_flight_archives: HashMap::default(),
1146 _db_operations_task,
1147 };
1148 let _ = this.reload(cx);
1149 this
1150 }
1151
1152 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
1153 let mut ops = HashMap::default();
1154 for operation in operations.into_iter().rev() {
1155 if ops.contains_key(&operation.id()) {
1156 continue;
1157 }
1158 ops.insert(operation.id(), operation);
1159 }
1160 ops.into_values().collect()
1161 }
1162
1163 fn handle_conversation_event(
1164 &mut self,
1165 conversation_view: Entity<crate::ConversationView>,
1166 _event: &crate::conversation_view::RootThreadUpdated,
1167 cx: &mut Context<Self>,
1168 ) {
1169 let view = conversation_view.read(cx);
1170 let thread_id = view.thread_id;
1171 let Some(thread) = view.root_thread(cx) else {
1172 return;
1173 };
1174
1175 let thread_ref = thread.read(cx);
1176 if thread_ref.is_draft_thread() || thread_ref.project().read(cx).is_via_collab() {
1177 return;
1178 }
1179
1180 let existing_thread = self.entry(thread_id);
1181 let session_id = Some(thread_ref.session_id().clone());
1182 let title = thread_ref.title();
1183
1184 let updated_at = Utc::now();
1185
1186 let created_at = existing_thread
1187 .and_then(|t| t.created_at)
1188 .unwrap_or_else(|| updated_at);
1189
1190 let interacted_at = existing_thread
1191 .map(|t| t.interacted_at)
1192 .unwrap_or(Some(updated_at));
1193
1194 let agent_id = thread_ref.connection().agent_id();
1195
1196 // Preserve project-dependent fields for archived threads.
1197 // The worktree may already have been removed from the
1198 // project as part of the archive flow, so re-evaluating
1199 // these from the current project state would yield
1200 // empty/incorrect results.
1201 let (worktree_paths, remote_connection) =
1202 if let Some(existing) = existing_thread.filter(|t| t.archived) {
1203 (
1204 existing.worktree_paths.clone(),
1205 existing.remote_connection.clone(),
1206 )
1207 } else {
1208 let project = thread_ref.project().read(cx);
1209 let worktree_paths = project.worktree_paths(cx);
1210 let remote_connection = project.remote_connection_options(cx);
1211
1212 (worktree_paths, remote_connection)
1213 };
1214
1215 // Threads without a folder path (e.g. started in an empty
1216 // window) are archived by default so they don't get lost,
1217 // because they won't show up in the sidebar. Users can reload
1218 // them from the archive.
1219 let archived = existing_thread
1220 .map(|t| t.archived)
1221 .unwrap_or(worktree_paths.is_empty());
1222
1223 let metadata = ThreadMetadata {
1224 thread_id,
1225 session_id,
1226 agent_id,
1227 title,
1228 created_at: Some(created_at),
1229 interacted_at,
1230 updated_at,
1231 worktree_paths,
1232 remote_connection,
1233 archived,
1234 };
1235
1236 self.save(metadata, cx);
1237 }
1238}
1239
1240impl Global for ThreadMetadataStore {}
1241
1242#[derive(Clone, Debug)]
1243pub enum ThreadMetadataStoreEvent {
1244 ThreadArchived(ThreadId),
1245}
1246
1247impl gpui::EventEmitter<ThreadMetadataStoreEvent> for ThreadMetadataStore {}
1248
1249struct ThreadMetadataDb(ThreadSafeConnection);
1250
1251impl Domain for ThreadMetadataDb {
1252 const NAME: &str = stringify!(ThreadMetadataDb);
1253
1254 const MIGRATIONS: &[&str] = &[
1255 sql!(
1256 CREATE TABLE IF NOT EXISTS sidebar_threads(
1257 session_id TEXT PRIMARY KEY,
1258 agent_id TEXT,
1259 title TEXT NOT NULL,
1260 updated_at TEXT NOT NULL,
1261 created_at TEXT,
1262 folder_paths TEXT,
1263 folder_paths_order TEXT
1264 ) STRICT;
1265 ),
1266 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
1267 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
1268 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
1269 sql!(
1270 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
1271 id INTEGER PRIMARY KEY,
1272 worktree_path TEXT NOT NULL,
1273 main_repo_path TEXT NOT NULL,
1274 branch_name TEXT,
1275 staged_commit_hash TEXT,
1276 unstaged_commit_hash TEXT,
1277 original_commit_hash TEXT
1278 ) STRICT;
1279
1280 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
1281 session_id TEXT NOT NULL,
1282 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1283 PRIMARY KEY (session_id, archived_worktree_id)
1284 ) STRICT;
1285 ),
1286 sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT),
1287 sql!(ALTER TABLE sidebar_threads ADD COLUMN thread_id BLOB),
1288 sql!(
1289 UPDATE sidebar_threads SET thread_id = randomblob(16) WHERE thread_id IS NULL;
1290
1291 CREATE TABLE thread_archived_worktrees_v2(
1292 thread_id BLOB NOT NULL,
1293 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1294 PRIMARY KEY (thread_id, archived_worktree_id)
1295 ) STRICT;
1296
1297 INSERT INTO thread_archived_worktrees_v2(thread_id, archived_worktree_id)
1298 SELECT s.thread_id, t.archived_worktree_id
1299 FROM thread_archived_worktrees t
1300 JOIN sidebar_threads s ON s.session_id = t.session_id;
1301
1302 DROP TABLE thread_archived_worktrees;
1303 ALTER TABLE thread_archived_worktrees_v2 RENAME TO thread_archived_worktrees;
1304
1305 CREATE TABLE sidebar_threads_v2(
1306 thread_id BLOB PRIMARY KEY,
1307 session_id TEXT,
1308 agent_id TEXT,
1309 title TEXT NOT NULL,
1310 updated_at TEXT NOT NULL,
1311 created_at TEXT,
1312 folder_paths TEXT,
1313 folder_paths_order TEXT,
1314 archived INTEGER DEFAULT 0,
1315 main_worktree_paths TEXT,
1316 main_worktree_paths_order TEXT,
1317 remote_connection TEXT
1318 ) STRICT;
1319
1320 INSERT INTO sidebar_threads_v2(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection)
1321 SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection
1322 FROM sidebar_threads;
1323
1324 DROP TABLE sidebar_threads;
1325 ALTER TABLE sidebar_threads_v2 RENAME TO sidebar_threads;
1326 ),
1327 sql!(
1328 DELETE FROM thread_archived_worktrees
1329 WHERE thread_id IN (
1330 SELECT thread_id FROM sidebar_threads WHERE session_id IS NULL
1331 );
1332
1333 DELETE FROM sidebar_threads WHERE session_id IS NULL;
1334
1335 DELETE FROM archived_git_worktrees
1336 WHERE id NOT IN (
1337 SELECT archived_worktree_id FROM thread_archived_worktrees
1338 );
1339 ),
1340 sql!(
1341 ALTER TABLE sidebar_threads ADD COLUMN interacted_at TEXT;
1342 ),
1343 ];
1344}
1345
1346db::static_connection!(ThreadMetadataDb, []);
1347
1348impl ThreadMetadataDb {
1349 #[allow(dead_code)]
1350 pub fn list_ids(&self) -> anyhow::Result<Vec<ThreadId>> {
1351 self.select::<ThreadId>(
1352 "SELECT thread_id FROM sidebar_threads \
1353 WHERE session_id IS NOT NULL \
1354 ORDER BY updated_at DESC",
1355 )?()
1356 }
1357
1358 const LIST_QUERY: &str = "SELECT thread_id, session_id, agent_id, title, updated_at, \
1359 created_at, interacted_at, folder_paths, folder_paths_order, archived, main_worktree_paths, \
1360 main_worktree_paths_order, remote_connection \
1361 FROM sidebar_threads \
1362 WHERE session_id IS NOT NULL \
1363 ORDER BY updated_at DESC";
1364
1365 /// List all sidebar thread metadata, ordered by updated_at descending.
1366 ///
1367 /// Only returns threads that have a `session_id`.
1368 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
1369 self.select::<ThreadMetadata>(Self::LIST_QUERY)?()
1370 }
1371
1372 /// Upsert metadata for a thread.
1373 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
1374 anyhow::ensure!(
1375 row.session_id.is_some(),
1376 "refusing to persist thread metadata without a session_id"
1377 );
1378
1379 let session_id = row.session_id.as_ref().map(|s| s.0.clone());
1380 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
1381 None
1382 } else {
1383 Some(row.agent_id.to_string())
1384 };
1385 let title = row
1386 .title
1387 .as_ref()
1388 .map(|t| t.to_string())
1389 .unwrap_or_default();
1390 let updated_at = row.updated_at.to_rfc3339();
1391 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
1392 let interacted_at = row.interacted_at.map(|dt| dt.to_rfc3339());
1393 let serialized = row.folder_paths().serialize();
1394 let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
1395 (None, None)
1396 } else {
1397 (Some(serialized.paths), Some(serialized.order))
1398 };
1399 let main_serialized = row.main_worktree_paths().serialize();
1400 let (main_worktree_paths, main_worktree_paths_order) =
1401 if row.main_worktree_paths().is_empty() {
1402 (None, None)
1403 } else {
1404 (Some(main_serialized.paths), Some(main_serialized.order))
1405 };
1406 let remote_connection = row
1407 .remote_connection
1408 .as_ref()
1409 .map(serde_json::to_string)
1410 .transpose()
1411 .context("serialize thread metadata remote connection")?;
1412 let thread_id = row.thread_id;
1413 let archived = row.archived;
1414
1415 self.write(move |conn| {
1416 let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, interacted_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
1417 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13) \
1418 ON CONFLICT(thread_id) DO UPDATE SET \
1419 session_id = excluded.session_id, \
1420 agent_id = excluded.agent_id, \
1421 title = excluded.title, \
1422 updated_at = excluded.updated_at, \
1423 created_at = excluded.created_at, \
1424 interacted_at = excluded.interacted_at, \
1425 folder_paths = excluded.folder_paths, \
1426 folder_paths_order = excluded.folder_paths_order, \
1427 archived = excluded.archived, \
1428 main_worktree_paths = excluded.main_worktree_paths, \
1429 main_worktree_paths_order = excluded.main_worktree_paths_order, \
1430 remote_connection = excluded.remote_connection";
1431 let mut stmt = Statement::prepare(conn, sql)?;
1432 let mut i = stmt.bind(&thread_id, 1)?;
1433 i = stmt.bind(&session_id, i)?;
1434 i = stmt.bind(&agent_id, i)?;
1435 i = stmt.bind(&title, i)?;
1436 i = stmt.bind(&updated_at, i)?;
1437 i = stmt.bind(&created_at, i)?;
1438 i = stmt.bind(&interacted_at, i)?;
1439 i = stmt.bind(&folder_paths, i)?;
1440 i = stmt.bind(&folder_paths_order, i)?;
1441 i = stmt.bind(&archived, i)?;
1442 i = stmt.bind(&main_worktree_paths, i)?;
1443 i = stmt.bind(&main_worktree_paths_order, i)?;
1444 stmt.bind(&remote_connection, i)?;
1445 stmt.exec()
1446 })
1447 .await
1448 }
1449
1450 /// Delete metadata for a single thread.
1451 pub async fn delete(&self, thread_id: ThreadId) -> anyhow::Result<()> {
1452 self.write(move |conn| {
1453 let mut stmt =
1454 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE thread_id = ?")?;
1455 stmt.bind(&thread_id, 1)?;
1456 stmt.exec()
1457 })
1458 .await
1459 }
1460
1461 pub async fn create_archived_worktree(
1462 &self,
1463 worktree_path: String,
1464 main_repo_path: String,
1465 branch_name: Option<String>,
1466 staged_commit_hash: String,
1467 unstaged_commit_hash: String,
1468 original_commit_hash: String,
1469 ) -> anyhow::Result<i64> {
1470 self.write(move |conn| {
1471 let mut stmt = Statement::prepare(
1472 conn,
1473 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
1474 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
1475 RETURNING id",
1476 )?;
1477 let mut i = stmt.bind(&worktree_path, 1)?;
1478 i = stmt.bind(&main_repo_path, i)?;
1479 i = stmt.bind(&branch_name, i)?;
1480 i = stmt.bind(&staged_commit_hash, i)?;
1481 i = stmt.bind(&unstaged_commit_hash, i)?;
1482 stmt.bind(&original_commit_hash, i)?;
1483 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
1484 })
1485 .await
1486 }
1487
1488 pub async fn link_thread_to_archived_worktree(
1489 &self,
1490 thread_id: ThreadId,
1491 archived_worktree_id: i64,
1492 ) -> anyhow::Result<()> {
1493 self.write(move |conn| {
1494 let mut stmt = Statement::prepare(
1495 conn,
1496 "INSERT INTO thread_archived_worktrees(thread_id, archived_worktree_id) \
1497 VALUES (?1, ?2)",
1498 )?;
1499 let i = stmt.bind(&thread_id, 1)?;
1500 stmt.bind(&archived_worktree_id, i)?;
1501 stmt.exec()
1502 })
1503 .await
1504 }
1505
1506 pub async fn get_archived_worktrees_for_thread(
1507 &self,
1508 thread_id: ThreadId,
1509 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
1510 self.select_bound::<ThreadId, ArchivedGitWorktree>(
1511 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
1512 FROM archived_git_worktrees a \
1513 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
1514 WHERE t.thread_id = ?1",
1515 )?(thread_id)
1516 }
1517
1518 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
1519 self.write(move |conn| {
1520 let mut stmt = Statement::prepare(
1521 conn,
1522 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
1523 )?;
1524 stmt.bind(&id, 1)?;
1525 stmt.exec()?;
1526
1527 let mut stmt =
1528 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
1529 stmt.bind(&id, 1)?;
1530 stmt.exec()
1531 })
1532 .await
1533 }
1534
1535 pub async fn unlink_thread_from_all_archived_worktrees(
1536 &self,
1537 thread_id: ThreadId,
1538 ) -> anyhow::Result<()> {
1539 self.write(move |conn| {
1540 let mut stmt = Statement::prepare(
1541 conn,
1542 "DELETE FROM thread_archived_worktrees WHERE thread_id = ?",
1543 )?;
1544 stmt.bind(&thread_id, 1)?;
1545 stmt.exec()
1546 })
1547 .await
1548 }
1549
1550 pub async fn is_archived_worktree_referenced(
1551 &self,
1552 archived_worktree_id: i64,
1553 ) -> anyhow::Result<bool> {
1554 self.select_row_bound::<i64, i64>(
1555 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1556 )?(archived_worktree_id)
1557 .map(|count| count.unwrap_or(0) > 0)
1558 }
1559
1560 pub fn get_all_archived_branch_names(
1561 &self,
1562 ) -> anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>> {
1563 let rows = self.select::<(ThreadId, String, String)>(
1564 "SELECT t.thread_id, a.worktree_path, a.branch_name \
1565 FROM thread_archived_worktrees t \
1566 JOIN archived_git_worktrees a ON a.id = t.archived_worktree_id \
1567 WHERE a.branch_name IS NOT NULL \
1568 ORDER BY a.id ASC",
1569 )?()?;
1570
1571 let mut result: HashMap<ThreadId, HashMap<PathBuf, String>> = HashMap::default();
1572 for (thread_id, worktree_path, branch_name) in rows {
1573 result
1574 .entry(thread_id)
1575 .or_default()
1576 .insert(PathBuf::from(worktree_path), branch_name);
1577 }
1578 Ok(result)
1579 }
1580}
1581
1582impl Column for ThreadMetadata {
1583 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1584 let (thread_id_uuid, next): (uuid::Uuid, i32) = Column::column(statement, start_index)?;
1585 let (id, next): (Option<Arc<str>>, i32) = Column::column(statement, next)?;
1586 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1587 let (title, next): (String, i32) = Column::column(statement, next)?;
1588 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1589 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1590 let (interacted_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1591 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1592 let (folder_paths_order_str, next): (Option<String>, i32) =
1593 Column::column(statement, next)?;
1594 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1595 let (main_worktree_paths_str, next): (Option<String>, i32) =
1596 Column::column(statement, next)?;
1597 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1598 Column::column(statement, next)?;
1599 let (remote_connection_json, next): (Option<String>, i32) =
1600 Column::column(statement, next)?;
1601
1602 let agent_id = agent_id
1603 .map(|id| AgentId::new(id))
1604 .unwrap_or(ZED_AGENT_ID.clone());
1605
1606 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1607 let created_at = created_at_str
1608 .as_deref()
1609 .map(DateTime::parse_from_rfc3339)
1610 .transpose()?
1611 .map(|dt| dt.with_timezone(&Utc));
1612
1613 let interacted_at = interacted_at_str
1614 .as_deref()
1615 .map(DateTime::parse_from_rfc3339)
1616 .transpose()?
1617 .map(|dt| dt.with_timezone(&Utc));
1618
1619 let folder_paths = folder_paths_str
1620 .map(|paths| {
1621 PathList::deserialize(&util::path_list::SerializedPathList {
1622 paths,
1623 order: folder_paths_order_str.unwrap_or_default(),
1624 })
1625 })
1626 .unwrap_or_default();
1627
1628 let main_worktree_paths = main_worktree_paths_str
1629 .map(|paths| {
1630 PathList::deserialize(&util::path_list::SerializedPathList {
1631 paths,
1632 order: main_worktree_paths_order_str.unwrap_or_default(),
1633 })
1634 })
1635 .unwrap_or_default();
1636
1637 let remote_connection = remote_connection_json
1638 .as_deref()
1639 .map(serde_json::from_str::<RemoteConnectionOptions>)
1640 .transpose()
1641 .context("deserialize thread metadata remote connection")?;
1642
1643 let worktree_paths = WorktreePaths::from_path_lists(main_worktree_paths, folder_paths)
1644 .unwrap_or_else(|_| WorktreePaths::default());
1645
1646 let thread_id = ThreadId(thread_id_uuid);
1647
1648 Ok((
1649 ThreadMetadata {
1650 thread_id,
1651 session_id: id.map(acp::SessionId::new),
1652 agent_id,
1653 title: if title.is_empty() || title == DEFAULT_THREAD_TITLE {
1654 None
1655 } else {
1656 Some(title.into())
1657 },
1658 updated_at,
1659 created_at,
1660 interacted_at,
1661 worktree_paths,
1662 remote_connection,
1663 archived,
1664 },
1665 next,
1666 ))
1667 }
1668}
1669
1670impl Column for ArchivedGitWorktree {
1671 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1672 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1673 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1674 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1675 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1676 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1677 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1678 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1679
1680 Ok((
1681 ArchivedGitWorktree {
1682 id,
1683 worktree_path: PathBuf::from(worktree_path_str),
1684 main_repo_path: PathBuf::from(main_repo_path_str),
1685 branch_name,
1686 staged_commit_hash,
1687 unstaged_commit_hash,
1688 original_commit_hash,
1689 },
1690 next,
1691 ))
1692 }
1693}
1694
1695#[cfg(test)]
1696mod tests {
1697 use super::*;
1698 use acp_thread::StubAgentConnection;
1699 use action_log::ActionLog;
1700 use agent::DbThread;
1701 use agent_client_protocol::schema as acp;
1702 use gpui::{TestAppContext, VisualTestContext};
1703 use project::FakeFs;
1704 use project::Project;
1705 use remote::WslConnectionOptions;
1706 use std::path::Path;
1707 use std::rc::Rc;
1708 use workspace::MultiWorkspace;
1709
1710 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1711 DbThread {
1712 title: title.to_string().into(),
1713 messages: Vec::new(),
1714 updated_at,
1715 detailed_summary: None,
1716 initial_project_snapshot: None,
1717 cumulative_token_usage: Default::default(),
1718 request_token_usage: Default::default(),
1719 model: None,
1720 profile: None,
1721 imported: false,
1722 subagent_context: None,
1723 speed: None,
1724 thinking_enabled: false,
1725 thinking_effort: None,
1726 draft_prompt: None,
1727 ui_scroll_position: None,
1728 }
1729 }
1730
1731 fn make_metadata(
1732 session_id: &str,
1733 title: &str,
1734 updated_at: DateTime<Utc>,
1735 folder_paths: PathList,
1736 ) -> ThreadMetadata {
1737 ThreadMetadata {
1738 thread_id: ThreadId::new(),
1739 archived: false,
1740 session_id: Some(acp::SessionId::new(session_id)),
1741 agent_id: agent::ZED_AGENT_ID.clone(),
1742 title: if title.is_empty() {
1743 None
1744 } else {
1745 Some(title.to_string().into())
1746 },
1747 updated_at,
1748 created_at: Some(updated_at),
1749 interacted_at: None,
1750 worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
1751 remote_connection: None,
1752 }
1753 }
1754
1755 fn init_test(cx: &mut TestAppContext) {
1756 let fs = FakeFs::new(cx.executor());
1757 cx.update(|cx| {
1758 let settings_store = settings::SettingsStore::test(cx);
1759 cx.set_global(settings_store);
1760 theme_settings::init(theme::LoadThemes::JustBase, cx);
1761 editor::init(cx);
1762 release_channel::init("0.0.0".parse().unwrap(), cx);
1763 prompt_store::init(cx);
1764 <dyn Fs>::set_global(fs, cx);
1765 ThreadMetadataStore::init_global(cx);
1766 ThreadStore::init_global(cx);
1767 language_model::LanguageModelRegistry::test(cx);
1768 });
1769 cx.run_until_parked();
1770 }
1771
1772 fn setup_panel_with_project(
1773 project: Entity<Project>,
1774 cx: &mut TestAppContext,
1775 ) -> (Entity<crate::AgentPanel>, VisualTestContext) {
1776 let multi_workspace =
1777 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1778 let workspace_entity = multi_workspace
1779 .read_with(cx, |mw, _cx| mw.workspace().clone())
1780 .unwrap();
1781 let mut vcx = VisualTestContext::from_window(multi_workspace.into(), cx);
1782 let panel = workspace_entity.update_in(&mut vcx, |workspace, window, cx| {
1783 cx.new(|cx| crate::AgentPanel::new(workspace, None, window, cx))
1784 });
1785 (panel, vcx)
1786 }
1787
1788 fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) {
1789 let kvp = cx.update(|cx| KeyValueStore::global(cx));
1790 gpui::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string()))
1791 .unwrap();
1792 }
1793
1794 fn run_store_migrations(cx: &mut TestAppContext) {
1795 clear_thread_metadata_remote_connection_backfill(cx);
1796 cx.update(|cx| {
1797 let migration_task = migrate_thread_metadata(cx);
1798 migrate_thread_remote_connections(cx, migration_task);
1799 });
1800 cx.run_until_parked();
1801 }
1802
1803 #[gpui::test]
1804 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1805 let first_paths = PathList::new(&[Path::new("/project-a")]);
1806 let second_paths = PathList::new(&[Path::new("/project-b")]);
1807 let now = Utc::now();
1808 let older = now - chrono::Duration::seconds(1);
1809
1810 let thread = std::thread::current();
1811 let test_name = thread.name().unwrap_or("unknown_test");
1812 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1813 let db = ThreadMetadataDb(gpui::block_on(db::open_test_db::<ThreadMetadataDb>(
1814 &db_name,
1815 )));
1816
1817 db.save(make_metadata(
1818 "session-1",
1819 "First Thread",
1820 now,
1821 first_paths.clone(),
1822 ))
1823 .await
1824 .unwrap();
1825 db.save(make_metadata(
1826 "session-2",
1827 "Second Thread",
1828 older,
1829 second_paths.clone(),
1830 ))
1831 .await
1832 .unwrap();
1833
1834 cx.update(|cx| {
1835 let settings_store = settings::SettingsStore::test(cx);
1836 cx.set_global(settings_store);
1837 ThreadMetadataStore::init_global(cx);
1838 });
1839
1840 cx.run_until_parked();
1841
1842 cx.update(|cx| {
1843 let store = ThreadMetadataStore::global(cx);
1844 let store = store.read(cx);
1845
1846 assert_eq!(store.entry_ids().count(), 2);
1847 assert!(
1848 store
1849 .entry_by_session(&acp::SessionId::new("session-1"))
1850 .is_some()
1851 );
1852 assert!(
1853 store
1854 .entry_by_session(&acp::SessionId::new("session-2"))
1855 .is_some()
1856 );
1857
1858 let first_path_entries: Vec<_> = store
1859 .entries_for_path(&first_paths, None)
1860 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1861 .collect();
1862 assert_eq!(first_path_entries, vec!["session-1"]);
1863
1864 let second_path_entries: Vec<_> = store
1865 .entries_for_path(&second_paths, None)
1866 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1867 .collect();
1868 assert_eq!(second_path_entries, vec!["session-2"]);
1869 });
1870 }
1871
1872 #[gpui::test]
1873 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1874 init_test(cx);
1875
1876 let first_paths = PathList::new(&[Path::new("/project-a")]);
1877 let second_paths = PathList::new(&[Path::new("/project-b")]);
1878 let initial_time = Utc::now();
1879 let updated_time = initial_time + chrono::Duration::seconds(1);
1880
1881 let initial_metadata = make_metadata(
1882 "session-1",
1883 "First Thread",
1884 initial_time,
1885 first_paths.clone(),
1886 );
1887 let session1_thread_id = initial_metadata.thread_id;
1888
1889 let second_metadata = make_metadata(
1890 "session-2",
1891 "Second Thread",
1892 initial_time,
1893 second_paths.clone(),
1894 );
1895 let session2_thread_id = second_metadata.thread_id;
1896
1897 cx.update(|cx| {
1898 let store = ThreadMetadataStore::global(cx);
1899 store.update(cx, |store, cx| {
1900 store.save(initial_metadata, cx);
1901 store.save(second_metadata, cx);
1902 });
1903 });
1904
1905 cx.run_until_parked();
1906
1907 cx.update(|cx| {
1908 let store = ThreadMetadataStore::global(cx);
1909 let store = store.read(cx);
1910
1911 let first_path_entries: Vec<_> = store
1912 .entries_for_path(&first_paths, None)
1913 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1914 .collect();
1915 assert_eq!(first_path_entries, vec!["session-1"]);
1916
1917 let second_path_entries: Vec<_> = store
1918 .entries_for_path(&second_paths, None)
1919 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1920 .collect();
1921 assert_eq!(second_path_entries, vec!["session-2"]);
1922 });
1923
1924 let moved_metadata = ThreadMetadata {
1925 thread_id: session1_thread_id,
1926 session_id: Some(acp::SessionId::new("session-1")),
1927 agent_id: agent::ZED_AGENT_ID.clone(),
1928 title: Some("First Thread".into()),
1929 updated_at: updated_time,
1930 created_at: Some(updated_time),
1931 interacted_at: None,
1932 worktree_paths: WorktreePaths::from_folder_paths(&second_paths),
1933 remote_connection: None,
1934 archived: false,
1935 };
1936
1937 cx.update(|cx| {
1938 let store = ThreadMetadataStore::global(cx);
1939 store.update(cx, |store, cx| {
1940 store.save(moved_metadata, cx);
1941 });
1942 });
1943
1944 cx.run_until_parked();
1945
1946 cx.update(|cx| {
1947 let store = ThreadMetadataStore::global(cx);
1948 let store = store.read(cx);
1949
1950 assert_eq!(store.entry_ids().count(), 2);
1951 assert!(
1952 store
1953 .entry_by_session(&acp::SessionId::new("session-1"))
1954 .is_some()
1955 );
1956 assert!(
1957 store
1958 .entry_by_session(&acp::SessionId::new("session-2"))
1959 .is_some()
1960 );
1961
1962 let first_path_entries: Vec<_> = store
1963 .entries_for_path(&first_paths, None)
1964 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1965 .collect();
1966 assert!(first_path_entries.is_empty());
1967
1968 let second_path_entries: Vec<_> = store
1969 .entries_for_path(&second_paths, None)
1970 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1971 .collect();
1972 assert_eq!(second_path_entries.len(), 2);
1973 assert!(second_path_entries.contains(&"session-1".to_string()));
1974 assert!(second_path_entries.contains(&"session-2".to_string()));
1975 });
1976
1977 cx.update(|cx| {
1978 let store = ThreadMetadataStore::global(cx);
1979 store.update(cx, |store, cx| {
1980 store.delete(session2_thread_id, cx);
1981 });
1982 });
1983
1984 cx.run_until_parked();
1985
1986 cx.update(|cx| {
1987 let store = ThreadMetadataStore::global(cx);
1988 let store = store.read(cx);
1989
1990 assert_eq!(store.entry_ids().count(), 1);
1991
1992 let second_path_entries: Vec<_> = store
1993 .entries_for_path(&second_paths, None)
1994 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1995 .collect();
1996 assert_eq!(second_path_entries, vec!["session-1"]);
1997 });
1998 }
1999
2000 #[gpui::test]
2001 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
2002 init_test(cx);
2003
2004 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
2005 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
2006 let now = Utc::now();
2007
2008 let existing_metadata = ThreadMetadata {
2009 thread_id: ThreadId::new(),
2010 session_id: Some(acp::SessionId::new("a-session-0")),
2011 agent_id: agent::ZED_AGENT_ID.clone(),
2012 title: Some("Existing Metadata".into()),
2013 updated_at: now - chrono::Duration::seconds(10),
2014 created_at: Some(now - chrono::Duration::seconds(10)),
2015 interacted_at: None,
2016 worktree_paths: WorktreePaths::from_folder_paths(&project_a_paths),
2017 remote_connection: None,
2018 archived: false,
2019 };
2020
2021 cx.update(|cx| {
2022 let store = ThreadMetadataStore::global(cx);
2023 store.update(cx, |store, cx| {
2024 store.save(existing_metadata, cx);
2025 });
2026 });
2027 cx.run_until_parked();
2028
2029 let threads_to_save = vec![
2030 (
2031 "a-session-0",
2032 "Thread A0 From Native Store",
2033 project_a_paths.clone(),
2034 now,
2035 ),
2036 (
2037 "a-session-1",
2038 "Thread A1",
2039 project_a_paths.clone(),
2040 now + chrono::Duration::seconds(1),
2041 ),
2042 (
2043 "b-session-0",
2044 "Thread B0",
2045 project_b_paths.clone(),
2046 now + chrono::Duration::seconds(2),
2047 ),
2048 (
2049 "projectless",
2050 "Projectless",
2051 PathList::default(),
2052 now + chrono::Duration::seconds(3),
2053 ),
2054 ];
2055
2056 for (session_id, title, paths, updated_at) in &threads_to_save {
2057 let save_task = cx.update(|cx| {
2058 let thread_store = ThreadStore::global(cx);
2059 let session_id = session_id.to_string();
2060 let title = title.to_string();
2061 let paths = paths.clone();
2062 thread_store.update(cx, |store, cx| {
2063 store.save_thread(
2064 acp::SessionId::new(session_id),
2065 make_db_thread(&title, *updated_at),
2066 paths,
2067 cx,
2068 )
2069 })
2070 });
2071 save_task.await.unwrap();
2072 cx.run_until_parked();
2073 }
2074
2075 run_store_migrations(cx);
2076
2077 let list = cx.update(|cx| {
2078 let store = ThreadMetadataStore::global(cx);
2079 store.read(cx).entries().cloned().collect::<Vec<_>>()
2080 });
2081
2082 assert_eq!(list.len(), 4);
2083 assert!(
2084 list.iter()
2085 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
2086 );
2087
2088 let existing_metadata = list
2089 .iter()
2090 .find(|metadata| {
2091 metadata
2092 .session_id
2093 .as_ref()
2094 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2095 })
2096 .unwrap();
2097 assert_eq!(existing_metadata.display_title(), "Existing Metadata");
2098 assert!(!existing_metadata.archived);
2099
2100 let migrated_session_ids: Vec<_> = list
2101 .iter()
2102 .filter_map(|metadata| metadata.session_id.as_ref().map(|s| s.0.to_string()))
2103 .collect();
2104 assert!(migrated_session_ids.iter().any(|s| s == "a-session-1"));
2105 assert!(migrated_session_ids.iter().any(|s| s == "b-session-0"));
2106 assert!(migrated_session_ids.iter().any(|s| s == "projectless"));
2107
2108 // The per-batch top-5 rescue applies: each migrated thread that has
2109 // a project becomes the most-recent-in-its-project within this batch
2110 // and is unarchived. Only the projectless thread stays archived,
2111 // because the rescue only applies to threads with a folder path.
2112 let migrated_by_session: HashMap<String, &ThreadMetadata> = list
2113 .iter()
2114 .filter_map(|metadata| {
2115 let session_id = metadata.session_id.as_ref()?.0.to_string();
2116 (session_id != "a-session-0").then_some((session_id, metadata))
2117 })
2118 .collect();
2119 assert!(!migrated_by_session["a-session-1"].archived);
2120 assert!(!migrated_by_session["b-session-0"].archived);
2121 assert!(migrated_by_session["projectless"].archived);
2122 }
2123
2124 #[gpui::test]
2125 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
2126 cx: &mut TestAppContext,
2127 ) {
2128 init_test(cx);
2129
2130 let project_paths = PathList::new(&[Path::new("/project-a")]);
2131 let existing_updated_at = Utc::now();
2132
2133 let existing_metadata = ThreadMetadata {
2134 thread_id: ThreadId::new(),
2135 session_id: Some(acp::SessionId::new("existing-session")),
2136 agent_id: agent::ZED_AGENT_ID.clone(),
2137 title: Some("Existing Metadata".into()),
2138 updated_at: existing_updated_at,
2139 created_at: Some(existing_updated_at),
2140 interacted_at: None,
2141 worktree_paths: WorktreePaths::from_folder_paths(&project_paths),
2142 remote_connection: None,
2143 archived: false,
2144 };
2145
2146 cx.update(|cx| {
2147 let store = ThreadMetadataStore::global(cx);
2148 store.update(cx, |store, cx| {
2149 store.save(existing_metadata, cx);
2150 });
2151 });
2152 cx.run_until_parked();
2153
2154 let save_task = cx.update(|cx| {
2155 let thread_store = ThreadStore::global(cx);
2156 thread_store.update(cx, |store, cx| {
2157 store.save_thread(
2158 acp::SessionId::new("existing-session"),
2159 make_db_thread(
2160 "Updated Native Thread Title",
2161 existing_updated_at + chrono::Duration::seconds(1),
2162 ),
2163 project_paths.clone(),
2164 cx,
2165 )
2166 })
2167 });
2168 save_task.await.unwrap();
2169 cx.run_until_parked();
2170
2171 run_store_migrations(cx);
2172
2173 let list = cx.update(|cx| {
2174 let store = ThreadMetadataStore::global(cx);
2175 store.read(cx).entries().cloned().collect::<Vec<_>>()
2176 });
2177
2178 assert_eq!(list.len(), 1);
2179 assert_eq!(
2180 list[0].session_id.as_ref().unwrap().0.as_ref(),
2181 "existing-session"
2182 );
2183 }
2184
2185 #[gpui::test]
2186 async fn test_migrate_thread_remote_connections_backfills_from_workspace_db(
2187 cx: &mut TestAppContext,
2188 ) {
2189 init_test(cx);
2190
2191 let folder_paths = PathList::new(&[Path::new("/remote-project")]);
2192 let updated_at = Utc::now();
2193 let metadata = make_metadata(
2194 "remote-session",
2195 "Remote Thread",
2196 updated_at,
2197 folder_paths.clone(),
2198 );
2199
2200 cx.update(|cx| {
2201 let store = ThreadMetadataStore::global(cx);
2202 store.update(cx, |store, cx| {
2203 store.save(metadata, cx);
2204 });
2205 });
2206 cx.run_until_parked();
2207
2208 let workspace_db = cx.update(|cx| WorkspaceDb::global(cx));
2209 let workspace_id = workspace_db.next_id().await.unwrap();
2210 let serialized_paths = folder_paths.serialize();
2211 let remote_connection_id = 1_i64;
2212 workspace_db
2213 .write(move |conn| {
2214 let mut stmt = Statement::prepare(
2215 conn,
2216 "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)",
2217 )?;
2218 let mut next_index = stmt.bind(&remote_connection_id, 1)?;
2219 next_index = stmt.bind(&"wsl", next_index)?;
2220 next_index = stmt.bind(&Some("anth".to_string()), next_index)?;
2221 stmt.bind(&Some("Ubuntu".to_string()), next_index)?;
2222 stmt.exec()?;
2223
2224 let mut stmt = Statement::prepare(
2225 conn,
2226 "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1",
2227 )?;
2228 let mut next_index = stmt.bind(&workspace_id, 1)?;
2229 next_index = stmt.bind(&serialized_paths.paths, next_index)?;
2230 next_index = stmt.bind(&serialized_paths.order, next_index)?;
2231 stmt.bind(&Some(remote_connection_id as i32), next_index)?;
2232 stmt.exec()
2233 })
2234 .await
2235 .unwrap();
2236
2237 clear_thread_metadata_remote_connection_backfill(cx);
2238 cx.update(|cx| {
2239 migrate_thread_remote_connections(cx, Task::ready(Ok(())));
2240 });
2241 cx.run_until_parked();
2242
2243 let metadata = cx.update(|cx| {
2244 let store = ThreadMetadataStore::global(cx);
2245 store
2246 .read(cx)
2247 .entry_by_session(&acp::SessionId::new("remote-session"))
2248 .cloned()
2249 .expect("expected migrated metadata row")
2250 });
2251
2252 assert_eq!(
2253 metadata.remote_connection,
2254 Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
2255 distro_name: "Ubuntu".to_string(),
2256 user: Some("anth".to_string()),
2257 }))
2258 );
2259 }
2260
2261 #[gpui::test]
2262 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
2263 cx: &mut TestAppContext,
2264 ) {
2265 init_test(cx);
2266
2267 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
2268 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
2269 let now = Utc::now();
2270
2271 // Create 7 threads for project A and 3 for project B
2272 let mut threads_to_save = Vec::new();
2273 for i in 0..7 {
2274 threads_to_save.push((
2275 format!("a-session-{i}"),
2276 format!("Thread A{i}"),
2277 project_a_paths.clone(),
2278 now + chrono::Duration::seconds(i as i64),
2279 ));
2280 }
2281 for i in 0..3 {
2282 threads_to_save.push((
2283 format!("b-session-{i}"),
2284 format!("Thread B{i}"),
2285 project_b_paths.clone(),
2286 now + chrono::Duration::seconds(i as i64),
2287 ));
2288 }
2289
2290 for (session_id, title, paths, updated_at) in &threads_to_save {
2291 let save_task = cx.update(|cx| {
2292 let thread_store = ThreadStore::global(cx);
2293 let session_id = session_id.to_string();
2294 let title = title.to_string();
2295 let paths = paths.clone();
2296 thread_store.update(cx, |store, cx| {
2297 store.save_thread(
2298 acp::SessionId::new(session_id),
2299 make_db_thread(&title, *updated_at),
2300 paths,
2301 cx,
2302 )
2303 })
2304 });
2305 save_task.await.unwrap();
2306 cx.run_until_parked();
2307 }
2308
2309 run_store_migrations(cx);
2310
2311 let list = cx.update(|cx| {
2312 let store = ThreadMetadataStore::global(cx);
2313 store.read(cx).entries().cloned().collect::<Vec<_>>()
2314 });
2315
2316 assert_eq!(list.len(), 10);
2317
2318 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
2319 let mut project_a_entries: Vec<_> = list
2320 .iter()
2321 .filter(|m| *m.folder_paths() == project_a_paths)
2322 .collect();
2323 assert_eq!(project_a_entries.len(), 7);
2324 project_a_entries.sort_by_key(|entry| std::cmp::Reverse(entry.updated_at));
2325
2326 for entry in &project_a_entries[..5] {
2327 assert!(
2328 !entry.archived,
2329 "Expected {:?} to be unarchived (top 5 most recent)",
2330 entry.session_id
2331 );
2332 }
2333 for entry in &project_a_entries[5..] {
2334 assert!(
2335 entry.archived,
2336 "Expected {:?} to be archived (older than top 5)",
2337 entry.session_id
2338 );
2339 }
2340
2341 // Project B: all 3 should be unarchived (under the limit)
2342 let project_b_entries: Vec<_> = list
2343 .iter()
2344 .filter(|m| *m.folder_paths() == project_b_paths)
2345 .collect();
2346 assert_eq!(project_b_entries.len(), 3);
2347 assert!(project_b_entries.iter().all(|m| !m.archived));
2348 }
2349
2350 // Regression test for the race between `ThreadStore::reload` and
2351 // `migrate_thread_metadata`. `ThreadStore::new` constructs with an empty
2352 // in-memory cache and kicks off `reload()` as a fire-and-forget task. If
2353 // `migrate_thread_metadata` reads `ThreadStore::entries()` before that
2354 // reload completes, it observes an empty iterator and no-ops, even though
2355 // the on-disk legacy DB has threads to migrate. In production this
2356 // manifests as "my old threads disappeared after upgrading": the threads
2357 // are still in the legacy `threads.db`, but never make it into
2358 // `sidebar_threads`, so the new sidebar UI can't see them.
2359 #[gpui::test]
2360 async fn test_migration_awaits_thread_store_reload(cx: &mut TestAppContext) {
2361 init_test(cx);
2362
2363 // Seed the legacy threads DB via the ThreadStore (the only public
2364 // save path in this crate), then park to make sure the rows are on
2365 // disk and `ThreadStore`'s in-memory cache is populated.
2366 let project_paths = PathList::new(&[Path::new("/project-a")]);
2367 let now = Utc::now();
2368 for i in 0..3 {
2369 let save_task = cx.update(|cx| {
2370 let thread_store = ThreadStore::global(cx);
2371 let session_id = format!("legacy-session-{i}");
2372 let title = format!("Legacy Thread {i}");
2373 let updated_at = now + chrono::Duration::seconds(i as i64);
2374 let paths = project_paths.clone();
2375 thread_store.update(cx, |store, cx| {
2376 store.save_thread(
2377 acp::SessionId::new(session_id),
2378 make_db_thread(&title, updated_at),
2379 paths,
2380 cx,
2381 )
2382 })
2383 });
2384 save_task.await.unwrap();
2385 cx.run_until_parked();
2386 }
2387
2388 // Re-initialize `ThreadStore` so its in-memory cache is freshly empty
2389 // and a new async `reload` task is kicked off. This reproduces the
2390 // cold-boot state where the migration runs before the store has
2391 // populated itself from disk. The on-disk legacy DB still has the
2392 // three threads we saved above.
2393 cx.update(|cx| ThreadStore::init_global(cx));
2394
2395 // Crucially: do NOT run_until_parked here. If we parked, the reload
2396 // would complete, ThreadStore::entries() would return the 3 rows, and
2397 // the race would be hidden. We want the migration to run with
2398 // `ThreadStore::entries()` still returning an empty iterator.
2399 run_store_migrations(cx);
2400
2401 let list = cx.update(|cx| {
2402 let store = ThreadMetadataStore::global(cx);
2403 store.read(cx).entries().cloned().collect::<Vec<_>>()
2404 });
2405
2406 assert_eq!(
2407 list.len(),
2408 3,
2409 "Expected migration to pick up all 3 legacy threads even when \
2410 ThreadStore::reload has not yet completed, but got {} entries",
2411 list.len()
2412 );
2413 }
2414
2415 #[gpui::test]
2416 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
2417 init_test(cx);
2418
2419 let fs = FakeFs::new(cx.executor());
2420 let project = Project::test(fs, None::<&Path>, cx).await;
2421 let connection = StubAgentConnection::new();
2422
2423 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2424 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2425
2426 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2427 let session_id = thread.read_with(&vcx, |t, _| t.session_id().clone());
2428 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
2429
2430 // Draft threads no longer create metadata entries.
2431 cx.read(|cx| {
2432 let store = ThreadMetadataStore::global(cx).read(cx);
2433 assert_eq!(store.entry_ids().count(), 0);
2434 });
2435
2436 // Setting a title on an empty thread should be ignored by the
2437 // event handler (entries are empty), so no metadata is created.
2438 thread.update_in(&mut vcx, |thread, _window, cx| {
2439 thread.set_title("Draft Thread".into(), cx).detach();
2440 });
2441 vcx.run_until_parked();
2442
2443 cx.read(|cx| {
2444 let store = ThreadMetadataStore::global(cx).read(cx);
2445 assert_eq!(
2446 store.entry_ids().count(),
2447 0,
2448 "expected title updates on empty thread to not create metadata"
2449 );
2450 });
2451
2452 // Pushing content makes entries non-empty, so the event handler
2453 // should now update metadata with the real session_id.
2454 thread.update_in(&mut vcx, |thread, _window, cx| {
2455 thread.push_user_content_block(None, "Hello".into(), cx);
2456 });
2457 vcx.run_until_parked();
2458
2459 cx.read(|cx| {
2460 let store = ThreadMetadataStore::global(cx).read(cx);
2461 assert_eq!(store.entry_ids().count(), 1);
2462 assert_eq!(
2463 store.entry(thread_id).unwrap().session_id.as_ref(),
2464 Some(&session_id),
2465 );
2466 });
2467 }
2468
2469 #[gpui::test]
2470 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
2471 init_test(cx);
2472
2473 let fs = FakeFs::new(cx.executor());
2474 let project = Project::test(fs, None::<&Path>, cx).await;
2475 let connection = StubAgentConnection::new();
2476
2477 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2478 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2479
2480 let session_id = crate::test_support::active_session_id(&panel, &vcx);
2481 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2482
2483 thread.update_in(&mut vcx, |thread, _window, cx| {
2484 thread.push_user_content_block(None, "Hello".into(), cx);
2485 });
2486 vcx.run_until_parked();
2487
2488 cx.read(|cx| {
2489 let store = ThreadMetadataStore::global(cx).read(cx);
2490 assert_eq!(store.entry_ids().count(), 1);
2491 assert!(store.entry_by_session(&session_id).is_some());
2492 });
2493
2494 // Dropping the panel releases the ConversationView and its thread.
2495 drop(panel);
2496 cx.update(|_| {});
2497 cx.run_until_parked();
2498
2499 cx.read(|cx| {
2500 let store = ThreadMetadataStore::global(cx).read(cx);
2501 assert_eq!(store.entry_ids().count(), 1);
2502 assert!(store.entry_by_session(&session_id).is_some());
2503 });
2504 }
2505
2506 #[gpui::test]
2507 async fn test_threads_without_project_association_are_archived_by_default(
2508 cx: &mut TestAppContext,
2509 ) {
2510 init_test(cx);
2511
2512 let fs = FakeFs::new(cx.executor());
2513 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
2514 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
2515
2516 // Thread in project without worktree
2517 let (panel_no_wt, mut vcx_no_wt) = setup_panel_with_project(project_without_worktree, cx);
2518 crate::test_support::open_thread_with_connection(
2519 &panel_no_wt,
2520 StubAgentConnection::new(),
2521 &mut vcx_no_wt,
2522 );
2523 let thread_no_wt = panel_no_wt.read_with(&vcx_no_wt, |panel, cx| {
2524 panel.active_agent_thread(cx).unwrap()
2525 });
2526 thread_no_wt.update_in(&mut vcx_no_wt, |thread, _window, cx| {
2527 thread.push_user_content_block(None, "content".into(), cx);
2528 thread.set_title("No Project Thread".into(), cx).detach();
2529 });
2530 vcx_no_wt.run_until_parked();
2531 let session_without_worktree =
2532 crate::test_support::active_session_id(&panel_no_wt, &vcx_no_wt);
2533
2534 // Thread in project with worktree
2535 let (panel_wt, mut vcx_wt) = setup_panel_with_project(project_with_worktree, cx);
2536 crate::test_support::open_thread_with_connection(
2537 &panel_wt,
2538 StubAgentConnection::new(),
2539 &mut vcx_wt,
2540 );
2541 let thread_wt =
2542 panel_wt.read_with(&vcx_wt, |panel, cx| panel.active_agent_thread(cx).unwrap());
2543 thread_wt.update_in(&mut vcx_wt, |thread, _window, cx| {
2544 thread.push_user_content_block(None, "content".into(), cx);
2545 thread.set_title("Project Thread".into(), cx).detach();
2546 });
2547 vcx_wt.run_until_parked();
2548 let session_with_worktree = crate::test_support::active_session_id(&panel_wt, &vcx_wt);
2549
2550 cx.update(|cx| {
2551 let store = ThreadMetadataStore::global(cx);
2552 let store = store.read(cx);
2553
2554 let without_worktree = store
2555 .entry_by_session(&session_without_worktree)
2556 .expect("missing metadata for thread without project association");
2557 assert!(without_worktree.folder_paths().is_empty());
2558 assert!(
2559 without_worktree.archived,
2560 "expected thread without project association to be archived"
2561 );
2562
2563 let with_worktree = store
2564 .entry_by_session(&session_with_worktree)
2565 .expect("missing metadata for thread with project association");
2566 assert_eq!(
2567 *with_worktree.folder_paths(),
2568 PathList::new(&[Path::new("/project-a")])
2569 );
2570 assert!(
2571 !with_worktree.archived,
2572 "expected thread with project association to remain unarchived"
2573 );
2574 });
2575 }
2576
2577 #[gpui::test]
2578 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
2579 init_test(cx);
2580
2581 let fs = FakeFs::new(cx.executor());
2582 let project = Project::test(fs, None::<&Path>, cx).await;
2583 let connection = Rc::new(StubAgentConnection::new());
2584
2585 // Create a regular (non-subagent) thread through the panel.
2586 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
2587 crate::test_support::open_thread_with_connection(&panel, (*connection).clone(), &mut vcx);
2588
2589 let regular_thread =
2590 panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2591 let regular_session_id = regular_thread.read_with(&vcx, |t, _| t.session_id().clone());
2592
2593 regular_thread.update_in(&mut vcx, |thread, _window, cx| {
2594 thread.push_user_content_block(None, "content".into(), cx);
2595 thread.set_title("Regular Thread".into(), cx).detach();
2596 });
2597 vcx.run_until_parked();
2598
2599 // Create a standalone subagent AcpThread (not wrapped in a
2600 // ConversationView). The ThreadMetadataStore only observes
2601 // ConversationView events, so this thread's events should
2602 // have no effect on sidebar metadata.
2603 let subagent_session_id = acp::SessionId::new("subagent-session");
2604 let subagent_thread = cx.update(|cx| {
2605 let action_log = cx.new(|_| ActionLog::new(project.clone()));
2606 cx.new(|cx| {
2607 acp_thread::AcpThread::new(
2608 Some(regular_session_id.clone()),
2609 Some("Subagent Thread".into()),
2610 None,
2611 connection.clone(),
2612 project.clone(),
2613 action_log,
2614 subagent_session_id.clone(),
2615 watch::Receiver::constant(acp::PromptCapabilities::new()),
2616 cx,
2617 )
2618 })
2619 });
2620
2621 cx.update(|cx| {
2622 subagent_thread.update(cx, |thread, cx| {
2623 thread
2624 .set_title("Subagent Thread Title".into(), cx)
2625 .detach();
2626 });
2627 });
2628 cx.run_until_parked();
2629
2630 // Only the regular thread should appear in sidebar metadata.
2631 // The subagent thread is excluded because the metadata store
2632 // only observes ConversationView events.
2633 let list = cx.update(|cx| {
2634 let store = ThreadMetadataStore::global(cx);
2635 store.read(cx).entries().cloned().collect::<Vec<_>>()
2636 });
2637
2638 assert_eq!(
2639 list.len(),
2640 1,
2641 "Expected only the regular thread in sidebar metadata, \
2642 but found {} entries (subagent threads are leaking into the sidebar)",
2643 list.len(),
2644 );
2645 assert_eq!(list[0].session_id.as_ref().unwrap(), ®ular_session_id);
2646 assert_eq!(list[0].display_title(), "Regular Thread");
2647 }
2648
2649 #[test]
2650 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
2651 let now = Utc::now();
2652
2653 let meta = make_metadata("session-1", "First Thread", now, PathList::default());
2654 let thread_id = meta.thread_id;
2655 let operations = vec![DbOperation::Upsert(meta), DbOperation::Delete(thread_id)];
2656
2657 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
2658
2659 assert_eq!(deduped.len(), 1);
2660 assert_eq!(deduped[0], DbOperation::Delete(thread_id));
2661 }
2662
2663 #[test]
2664 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
2665 let now = Utc::now();
2666 let later = now + chrono::Duration::seconds(1);
2667
2668 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
2669 let shared_thread_id = old_metadata.thread_id;
2670 let new_metadata = ThreadMetadata {
2671 thread_id: shared_thread_id,
2672 ..make_metadata("session-1", "New Title", later, PathList::default())
2673 };
2674
2675 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2676 DbOperation::Upsert(old_metadata),
2677 DbOperation::Upsert(new_metadata.clone()),
2678 ]);
2679
2680 assert_eq!(deduped.len(), 1);
2681 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
2682 }
2683
2684 #[test]
2685 fn test_dedup_db_operations_preserves_distinct_sessions() {
2686 let now = Utc::now();
2687
2688 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
2689 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
2690 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2691 DbOperation::Upsert(metadata1.clone()),
2692 DbOperation::Upsert(metadata2.clone()),
2693 ]);
2694
2695 assert_eq!(deduped.len(), 2);
2696 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
2697 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
2698 }
2699
2700 #[gpui::test]
2701 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
2702 init_test(cx);
2703
2704 let paths = PathList::new(&[Path::new("/project-a")]);
2705 let now = Utc::now();
2706 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2707 let thread_id = metadata.thread_id;
2708
2709 cx.update(|cx| {
2710 let store = ThreadMetadataStore::global(cx);
2711 store.update(cx, |store, cx| {
2712 store.save(metadata, cx);
2713 });
2714 });
2715
2716 cx.run_until_parked();
2717
2718 cx.update(|cx| {
2719 let store = ThreadMetadataStore::global(cx);
2720 let store = store.read(cx);
2721
2722 let path_entries: Vec<_> = store
2723 .entries_for_path(&paths, None)
2724 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2725 .collect();
2726 assert_eq!(path_entries, vec!["session-1"]);
2727
2728 assert_eq!(store.archived_entries().count(), 0);
2729 });
2730
2731 cx.update(|cx| {
2732 let store = ThreadMetadataStore::global(cx);
2733 store.update(cx, |store, cx| {
2734 store.archive(thread_id, None, cx);
2735 });
2736 });
2737
2738 // Thread 1 should now be archived
2739 cx.run_until_parked();
2740
2741 cx.update(|cx| {
2742 let store = ThreadMetadataStore::global(cx);
2743 let store = store.read(cx);
2744
2745 let path_entries: Vec<_> = store
2746 .entries_for_path(&paths, None)
2747 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2748 .collect();
2749 assert!(path_entries.is_empty());
2750
2751 let archived: Vec<_> = store.archived_entries().collect();
2752 assert_eq!(archived.len(), 1);
2753 assert_eq!(
2754 archived[0].session_id.as_ref().unwrap().0.as_ref(),
2755 "session-1"
2756 );
2757 assert!(archived[0].archived);
2758 });
2759
2760 cx.update(|cx| {
2761 let store = ThreadMetadataStore::global(cx);
2762 store.update(cx, |store, cx| {
2763 store.unarchive(thread_id, cx);
2764 });
2765 });
2766
2767 cx.run_until_parked();
2768
2769 cx.update(|cx| {
2770 let store = ThreadMetadataStore::global(cx);
2771 let store = store.read(cx);
2772
2773 let path_entries: Vec<_> = store
2774 .entries_for_path(&paths, None)
2775 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2776 .collect();
2777 assert_eq!(path_entries, vec!["session-1"]);
2778
2779 assert_eq!(store.archived_entries().count(), 0);
2780 });
2781 }
2782
2783 #[gpui::test]
2784 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2785 init_test(cx);
2786
2787 let paths = PathList::new(&[Path::new("/project-a")]);
2788 let now = Utc::now();
2789
2790 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2791 let metadata2 = make_metadata(
2792 "session-2",
2793 "Archived Thread",
2794 now - chrono::Duration::seconds(1),
2795 paths.clone(),
2796 );
2797 let session2_thread_id = metadata2.thread_id;
2798
2799 cx.update(|cx| {
2800 let store = ThreadMetadataStore::global(cx);
2801 store.update(cx, |store, cx| {
2802 store.save(metadata1, cx);
2803 store.save(metadata2, cx);
2804 });
2805 });
2806
2807 cx.run_until_parked();
2808
2809 cx.update(|cx| {
2810 let store = ThreadMetadataStore::global(cx);
2811 store.update(cx, |store, cx| {
2812 store.archive(session2_thread_id, None, cx);
2813 });
2814 });
2815
2816 cx.run_until_parked();
2817
2818 cx.update(|cx| {
2819 let store = ThreadMetadataStore::global(cx);
2820 let store = store.read(cx);
2821
2822 let path_entries: Vec<_> = store
2823 .entries_for_path(&paths, None)
2824 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2825 .collect();
2826 assert_eq!(path_entries, vec!["session-1"]);
2827
2828 assert_eq!(store.entries().count(), 2);
2829
2830 let archived: Vec<_> = store
2831 .archived_entries()
2832 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2833 .collect();
2834 assert_eq!(archived, vec!["session-2"]);
2835 });
2836 }
2837
2838 #[gpui::test]
2839 async fn test_entries_filter_by_remote_connection(cx: &mut TestAppContext) {
2840 init_test(cx);
2841
2842 let main_paths = PathList::new(&[Path::new("/project-a")]);
2843 let linked_paths = PathList::new(&[Path::new("/wt-feature")]);
2844 let now = Utc::now();
2845
2846 let remote_a = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 1 });
2847 let remote_b = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 2 });
2848
2849 // Three threads at the same folder_paths but different hosts.
2850 let local_thread = make_metadata("local-session", "Local Thread", now, main_paths.clone());
2851
2852 let mut remote_a_thread = make_metadata(
2853 "remote-a-session",
2854 "Remote A Thread",
2855 now - chrono::Duration::seconds(1),
2856 main_paths.clone(),
2857 );
2858 remote_a_thread.remote_connection = Some(remote_a.clone());
2859
2860 let mut remote_b_thread = make_metadata(
2861 "remote-b-session",
2862 "Remote B Thread",
2863 now - chrono::Duration::seconds(2),
2864 main_paths.clone(),
2865 );
2866 remote_b_thread.remote_connection = Some(remote_b.clone());
2867
2868 let linked_worktree_paths =
2869 WorktreePaths::from_path_lists(main_paths.clone(), linked_paths).unwrap();
2870
2871 let local_linked_thread = ThreadMetadata {
2872 thread_id: ThreadId::new(),
2873 archived: false,
2874 session_id: Some(acp::SessionId::new("local-linked")),
2875 agent_id: agent::ZED_AGENT_ID.clone(),
2876 title: Some("Local Linked".into()),
2877 updated_at: now,
2878 created_at: Some(now),
2879 interacted_at: None,
2880 worktree_paths: linked_worktree_paths.clone(),
2881 remote_connection: None,
2882 };
2883
2884 let remote_linked_thread = ThreadMetadata {
2885 thread_id: ThreadId::new(),
2886 archived: false,
2887 session_id: Some(acp::SessionId::new("remote-linked")),
2888 agent_id: agent::ZED_AGENT_ID.clone(),
2889 title: Some("Remote Linked".into()),
2890 updated_at: now - chrono::Duration::seconds(1),
2891 created_at: Some(now - chrono::Duration::seconds(1)),
2892 interacted_at: None,
2893 worktree_paths: linked_worktree_paths,
2894 remote_connection: Some(remote_a.clone()),
2895 };
2896
2897 cx.update(|cx| {
2898 let store = ThreadMetadataStore::global(cx);
2899 store.update(cx, |store, cx| {
2900 store.save(local_thread, cx);
2901 store.save(remote_a_thread, cx);
2902 store.save(remote_b_thread, cx);
2903 store.save(local_linked_thread, cx);
2904 store.save(remote_linked_thread, cx);
2905 });
2906 });
2907 cx.run_until_parked();
2908
2909 cx.update(|cx| {
2910 let store = ThreadMetadataStore::global(cx);
2911 let store = store.read(cx);
2912
2913 let local_entries: Vec<_> = store
2914 .entries_for_path(&main_paths, None)
2915 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2916 .collect();
2917 assert_eq!(local_entries, vec!["local-session"]);
2918
2919 let remote_a_entries: Vec<_> = store
2920 .entries_for_path(&main_paths, Some(&remote_a))
2921 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2922 .collect();
2923 assert_eq!(remote_a_entries, vec!["remote-a-session"]);
2924
2925 let remote_b_entries: Vec<_> = store
2926 .entries_for_path(&main_paths, Some(&remote_b))
2927 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2928 .collect();
2929 assert_eq!(remote_b_entries, vec!["remote-b-session"]);
2930
2931 let mut local_main_entries: Vec<_> = store
2932 .entries_for_main_worktree_path(&main_paths, None)
2933 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2934 .collect();
2935 local_main_entries.sort();
2936 assert_eq!(local_main_entries, vec!["local-linked", "local-session"]);
2937
2938 let mut remote_main_entries: Vec<_> = store
2939 .entries_for_main_worktree_path(&main_paths, Some(&remote_a))
2940 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2941 .collect();
2942 remote_main_entries.sort();
2943 assert_eq!(
2944 remote_main_entries,
2945 vec!["remote-a-session", "remote-linked"]
2946 );
2947 });
2948 }
2949
2950 #[gpui::test]
2951 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2952 init_test(cx);
2953
2954 let paths = PathList::new(&[Path::new("/project-a")]);
2955 let now = Utc::now();
2956
2957 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2958 let m2 = make_metadata(
2959 "session-2",
2960 "Thread Two",
2961 now - chrono::Duration::seconds(1),
2962 paths.clone(),
2963 );
2964 let m3 = make_metadata(
2965 "session-3",
2966 "Thread Three",
2967 now - chrono::Duration::seconds(2),
2968 paths,
2969 );
2970
2971 cx.update(|cx| {
2972 let store = ThreadMetadataStore::global(cx);
2973 store.update(cx, |store, cx| {
2974 store.save_all(vec![m1, m2, m3], cx);
2975 });
2976 });
2977
2978 cx.run_until_parked();
2979
2980 cx.update(|cx| {
2981 let store = ThreadMetadataStore::global(cx);
2982 let store = store.read(cx);
2983
2984 assert_eq!(store.entries().count(), 3);
2985 assert!(
2986 store
2987 .entry_by_session(&acp::SessionId::new("session-1"))
2988 .is_some()
2989 );
2990 assert!(
2991 store
2992 .entry_by_session(&acp::SessionId::new("session-2"))
2993 .is_some()
2994 );
2995 assert!(
2996 store
2997 .entry_by_session(&acp::SessionId::new("session-3"))
2998 .is_some()
2999 );
3000
3001 assert_eq!(store.entry_ids().count(), 3);
3002 });
3003 }
3004
3005 #[gpui::test]
3006 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
3007 init_test(cx);
3008
3009 let paths = PathList::new(&[Path::new("/project-a")]);
3010 let now = Utc::now();
3011 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
3012 let thread_id = metadata.thread_id;
3013
3014 cx.update(|cx| {
3015 let store = ThreadMetadataStore::global(cx);
3016 store.update(cx, |store, cx| {
3017 store.save(metadata, cx);
3018 });
3019 });
3020
3021 cx.run_until_parked();
3022
3023 cx.update(|cx| {
3024 let store = ThreadMetadataStore::global(cx);
3025 store.update(cx, |store, cx| {
3026 store.archive(thread_id, None, cx);
3027 });
3028 });
3029
3030 cx.run_until_parked();
3031
3032 cx.update(|cx| {
3033 let store = ThreadMetadataStore::global(cx);
3034 store.update(cx, |store, cx| {
3035 let _ = store.reload(cx);
3036 });
3037 });
3038
3039 cx.run_until_parked();
3040
3041 cx.update(|cx| {
3042 let store = ThreadMetadataStore::global(cx);
3043 let store = store.read(cx);
3044
3045 let thread = store
3046 .entry_by_session(&acp::SessionId::new("session-1"))
3047 .expect("thread should exist after reload");
3048 assert!(thread.archived);
3049
3050 let path_entries: Vec<_> = store
3051 .entries_for_path(&paths, None)
3052 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
3053 .collect();
3054 assert!(path_entries.is_empty());
3055
3056 let archived: Vec<_> = store
3057 .archived_entries()
3058 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
3059 .collect();
3060 assert_eq!(archived, vec!["session-1"]);
3061 });
3062 }
3063
3064 #[gpui::test]
3065 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
3066 init_test(cx);
3067
3068 cx.run_until_parked();
3069
3070 cx.update(|cx| {
3071 let store = ThreadMetadataStore::global(cx);
3072 store.update(cx, |store, cx| {
3073 store.archive(ThreadId::new(), None, cx);
3074 });
3075 });
3076
3077 cx.run_until_parked();
3078
3079 cx.update(|cx| {
3080 let store = ThreadMetadataStore::global(cx);
3081 let store = store.read(cx);
3082
3083 assert!(store.is_empty());
3084 assert_eq!(store.entries().count(), 0);
3085 assert_eq!(store.archived_entries().count(), 0);
3086 });
3087 }
3088
3089 #[gpui::test]
3090 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
3091 init_test(cx);
3092
3093 let paths = PathList::new(&[Path::new("/project-a")]);
3094 let now = Utc::now();
3095 let metadata = make_metadata("session-1", "Thread 1", now, paths);
3096 let thread_id = metadata.thread_id;
3097
3098 cx.update(|cx| {
3099 let store = ThreadMetadataStore::global(cx);
3100 store.update(cx, |store, cx| {
3101 store.save(metadata.clone(), cx);
3102 store.archive(thread_id, None, cx);
3103 });
3104 });
3105
3106 cx.run_until_parked();
3107
3108 cx.update(|cx| {
3109 let store = ThreadMetadataStore::global(cx);
3110 let store = store.read(cx);
3111
3112 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
3113 pretty_assertions::assert_eq!(
3114 entries,
3115 vec![ThreadMetadata {
3116 archived: true,
3117 ..metadata
3118 }]
3119 );
3120 });
3121 }
3122
3123 #[gpui::test]
3124 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
3125 init_test(cx);
3126 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3127
3128 let id = store
3129 .read_with(cx, |store, cx| {
3130 store.create_archived_worktree(
3131 "/tmp/worktree".to_string(),
3132 "/home/user/repo".to_string(),
3133 Some("feature-branch".to_string()),
3134 "staged_aaa".to_string(),
3135 "unstaged_bbb".to_string(),
3136 "original_000".to_string(),
3137 cx,
3138 )
3139 })
3140 .await
3141 .unwrap();
3142
3143 let thread_id_1 = ThreadId::new();
3144
3145 store
3146 .read_with(cx, |store, cx| {
3147 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3148 })
3149 .await
3150 .unwrap();
3151
3152 let worktrees = store
3153 .read_with(cx, |store, cx| {
3154 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3155 })
3156 .await
3157 .unwrap();
3158
3159 assert_eq!(worktrees.len(), 1);
3160 let wt = &worktrees[0];
3161 assert_eq!(wt.id, id);
3162 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
3163 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
3164 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
3165 assert_eq!(wt.staged_commit_hash, "staged_aaa");
3166 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
3167 assert_eq!(wt.original_commit_hash, "original_000");
3168 }
3169
3170 #[gpui::test]
3171 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
3172 init_test(cx);
3173 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3174
3175 let id = store
3176 .read_with(cx, |store, cx| {
3177 store.create_archived_worktree(
3178 "/tmp/worktree".to_string(),
3179 "/home/user/repo".to_string(),
3180 Some("main".to_string()),
3181 "deadbeef".to_string(),
3182 "deadbeef".to_string(),
3183 "original_000".to_string(),
3184 cx,
3185 )
3186 })
3187 .await
3188 .unwrap();
3189
3190 let thread_id_1 = ThreadId::new();
3191
3192 store
3193 .read_with(cx, |store, cx| {
3194 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3195 })
3196 .await
3197 .unwrap();
3198
3199 store
3200 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
3201 .await
3202 .unwrap();
3203
3204 let worktrees = store
3205 .read_with(cx, |store, cx| {
3206 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3207 })
3208 .await
3209 .unwrap();
3210 assert!(worktrees.is_empty());
3211 }
3212
3213 #[gpui::test]
3214 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
3215 init_test(cx);
3216 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3217
3218 let id = store
3219 .read_with(cx, |store, cx| {
3220 store.create_archived_worktree(
3221 "/tmp/worktree".to_string(),
3222 "/home/user/repo".to_string(),
3223 None,
3224 "abc123".to_string(),
3225 "abc123".to_string(),
3226 "original_000".to_string(),
3227 cx,
3228 )
3229 })
3230 .await
3231 .unwrap();
3232
3233 let thread_id_1 = ThreadId::new();
3234 let thread_id_2 = ThreadId::new();
3235
3236 store
3237 .read_with(cx, |store, cx| {
3238 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3239 })
3240 .await
3241 .unwrap();
3242
3243 store
3244 .read_with(cx, |store, cx| {
3245 store.link_thread_to_archived_worktree(thread_id_2, id, cx)
3246 })
3247 .await
3248 .unwrap();
3249
3250 let wt1 = store
3251 .read_with(cx, |store, cx| {
3252 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3253 })
3254 .await
3255 .unwrap();
3256
3257 let wt2 = store
3258 .read_with(cx, |store, cx| {
3259 store.get_archived_worktrees_for_thread(thread_id_2, cx)
3260 })
3261 .await
3262 .unwrap();
3263
3264 assert_eq!(wt1.len(), 1);
3265 assert_eq!(wt2.len(), 1);
3266 assert_eq!(wt1[0].id, wt2[0].id);
3267 }
3268
3269 #[gpui::test]
3270 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
3271 init_test(cx);
3272 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3273
3274 let original_paths = PathList::new(&[
3275 Path::new("/projects/worktree-a"),
3276 Path::new("/projects/worktree-b"),
3277 Path::new("/other/unrelated"),
3278 ]);
3279 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3280 let thread_id = meta.thread_id;
3281
3282 store.update(cx, |store, cx| {
3283 store.save(meta, cx);
3284 });
3285
3286 let replacements = vec![
3287 (
3288 PathBuf::from("/projects/worktree-a"),
3289 PathBuf::from("/restored/worktree-a"),
3290 ),
3291 (
3292 PathBuf::from("/projects/worktree-b"),
3293 PathBuf::from("/restored/worktree-b"),
3294 ),
3295 ];
3296
3297 store.update(cx, |store, cx| {
3298 store.complete_worktree_restore(thread_id, &replacements, cx);
3299 });
3300
3301 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3302 let entry = entry.unwrap();
3303 let paths = entry.folder_paths().paths();
3304 assert_eq!(paths.len(), 3);
3305 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3306 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3307 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3308 }
3309
3310 #[gpui::test]
3311 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
3312 init_test(cx);
3313 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3314
3315 let original_paths =
3316 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3317 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3318 let thread_id = meta.thread_id;
3319
3320 store.update(cx, |store, cx| {
3321 store.save(meta, cx);
3322 });
3323
3324 let replacements = vec![
3325 (
3326 PathBuf::from("/projects/worktree-a"),
3327 PathBuf::from("/new/worktree-a"),
3328 ),
3329 (
3330 PathBuf::from("/nonexistent/path"),
3331 PathBuf::from("/should/not/appear"),
3332 ),
3333 ];
3334
3335 store.update(cx, |store, cx| {
3336 store.complete_worktree_restore(thread_id, &replacements, cx);
3337 });
3338
3339 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3340 let entry = entry.unwrap();
3341 let paths = entry.folder_paths().paths();
3342 assert_eq!(paths.len(), 2);
3343 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3344 assert!(paths.contains(&PathBuf::from("/other/path")));
3345 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3346 }
3347
3348 #[gpui::test]
3349 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
3350 init_test(cx);
3351 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3352
3353 let original_paths = PathList::new(&[
3354 Path::new("/projects/worktree-a"),
3355 Path::new("/projects/worktree-b"),
3356 Path::new("/other/unrelated"),
3357 ]);
3358 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3359 let thread_id = meta.thread_id;
3360
3361 store.update(cx, |store, cx| {
3362 store.save(meta, cx);
3363 });
3364
3365 let replacements = vec![
3366 (
3367 PathBuf::from("/projects/worktree-a"),
3368 PathBuf::from("/restored/worktree-a"),
3369 ),
3370 (
3371 PathBuf::from("/projects/worktree-b"),
3372 PathBuf::from("/restored/worktree-b"),
3373 ),
3374 ];
3375
3376 store.update(cx, |store, cx| {
3377 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3378 });
3379
3380 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3381 let entry = entry.unwrap();
3382 let paths = entry.folder_paths().paths();
3383 assert_eq!(paths.len(), 3);
3384 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3385 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3386 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3387 }
3388
3389 #[gpui::test]
3390 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
3391 init_test(cx);
3392 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3393
3394 let original_paths =
3395 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3396 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3397 let thread_id = meta.thread_id;
3398
3399 store.update(cx, |store, cx| {
3400 store.save(meta, cx);
3401 });
3402
3403 let replacements = vec![
3404 (
3405 PathBuf::from("/projects/worktree-a"),
3406 PathBuf::from("/new/worktree-a"),
3407 ),
3408 (
3409 PathBuf::from("/nonexistent/path"),
3410 PathBuf::from("/should/not/appear"),
3411 ),
3412 ];
3413
3414 store.update(cx, |store, cx| {
3415 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3416 });
3417
3418 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3419 let entry = entry.unwrap();
3420 let paths = entry.folder_paths().paths();
3421 assert_eq!(paths.len(), 2);
3422 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3423 assert!(paths.contains(&PathBuf::from("/other/path")));
3424 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3425 }
3426
3427 #[gpui::test]
3428 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
3429 init_test(cx);
3430 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3431
3432 let id1 = store
3433 .read_with(cx, |store, cx| {
3434 store.create_archived_worktree(
3435 "/projects/worktree-a".to_string(),
3436 "/home/user/repo".to_string(),
3437 Some("branch-a".to_string()),
3438 "staged_a".to_string(),
3439 "unstaged_a".to_string(),
3440 "original_000".to_string(),
3441 cx,
3442 )
3443 })
3444 .await
3445 .unwrap();
3446
3447 let id2 = store
3448 .read_with(cx, |store, cx| {
3449 store.create_archived_worktree(
3450 "/projects/worktree-b".to_string(),
3451 "/home/user/repo".to_string(),
3452 Some("branch-b".to_string()),
3453 "staged_b".to_string(),
3454 "unstaged_b".to_string(),
3455 "original_000".to_string(),
3456 cx,
3457 )
3458 })
3459 .await
3460 .unwrap();
3461
3462 let thread_id_1 = ThreadId::new();
3463
3464 store
3465 .read_with(cx, |store, cx| {
3466 store.link_thread_to_archived_worktree(thread_id_1, id1, cx)
3467 })
3468 .await
3469 .unwrap();
3470
3471 store
3472 .read_with(cx, |store, cx| {
3473 store.link_thread_to_archived_worktree(thread_id_1, id2, cx)
3474 })
3475 .await
3476 .unwrap();
3477
3478 let worktrees = store
3479 .read_with(cx, |store, cx| {
3480 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3481 })
3482 .await
3483 .unwrap();
3484
3485 assert_eq!(worktrees.len(), 2);
3486
3487 let paths: Vec<&Path> = worktrees
3488 .iter()
3489 .map(|w| w.worktree_path.as_path())
3490 .collect();
3491 assert!(paths.contains(&Path::new("/projects/worktree-a")));
3492 assert!(paths.contains(&Path::new("/projects/worktree-b")));
3493 }
3494
3495 // ── Migration tests ────────────────────────────────────────────────
3496
3497 #[test]
3498 fn test_thread_id_primary_key_migration_backfills_null_thread_ids() {
3499 use db::sqlez::connection::Connection;
3500
3501 let connection =
3502 Connection::open_memory(Some("test_thread_id_pk_migration_backfills_nulls"));
3503
3504 // Run migrations 0-6 (the old schema, before the thread_id PK migration).
3505 let old_migrations: &[&str] = &ThreadMetadataDb::MIGRATIONS[..7];
3506 connection
3507 .migrate(ThreadMetadataDb::NAME, old_migrations, &mut |_, _, _| false)
3508 .expect("old migrations should succeed");
3509
3510 // Insert rows: one with a thread_id, two without.
3511 connection
3512 .exec(
3513 "INSERT INTO sidebar_threads \
3514 (session_id, title, updated_at, thread_id) \
3515 VALUES ('has-tid', 'Has ThreadId', '2025-01-01T00:00:00Z', X'0102030405060708090A0B0C0D0E0F10')",
3516 )
3517 .unwrap()()
3518 .unwrap();
3519 connection
3520 .exec(
3521 "INSERT INTO sidebar_threads \
3522 (session_id, title, updated_at) \
3523 VALUES ('no-tid-1', 'No ThreadId 1', '2025-01-02T00:00:00Z')",
3524 )
3525 .unwrap()()
3526 .unwrap();
3527 connection
3528 .exec(
3529 "INSERT INTO sidebar_threads \
3530 (session_id, title, updated_at) \
3531 VALUES ('no-tid-2', 'No ThreadId 2', '2025-01-03T00:00:00Z')",
3532 )
3533 .unwrap()()
3534 .unwrap();
3535
3536 // Set up archived_git_worktrees + thread_archived_worktrees rows
3537 // referencing the session without a thread_id.
3538 connection
3539 .exec(
3540 "INSERT INTO archived_git_worktrees \
3541 (id, worktree_path, main_repo_path, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
3542 VALUES (1, '/wt', '/main', 'abc', 'def', '000')",
3543 )
3544 .unwrap()()
3545 .unwrap();
3546 connection
3547 .exec(
3548 "INSERT INTO thread_archived_worktrees \
3549 (session_id, archived_worktree_id) \
3550 VALUES ('no-tid-1', 1)",
3551 )
3552 .unwrap()()
3553 .unwrap();
3554
3555 // Run all current migrations. sqlez skips the already-applied ones and
3556 // runs the remaining migrations.
3557 run_thread_metadata_migrations(&connection);
3558
3559 // All 3 rows should survive with non-NULL thread_ids.
3560 let count: i64 = connection
3561 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM sidebar_threads")
3562 .unwrap()(())
3563 .unwrap()
3564 .unwrap();
3565 assert_eq!(count, 3, "all 3 rows should survive the migration");
3566
3567 let null_count: i64 = connection
3568 .select_row_bound::<(), i64>(
3569 "SELECT COUNT(*) FROM sidebar_threads WHERE thread_id IS NULL",
3570 )
3571 .unwrap()(())
3572 .unwrap()
3573 .unwrap();
3574 assert_eq!(
3575 null_count, 0,
3576 "no rows should have NULL thread_id after migration"
3577 );
3578
3579 // The row that already had a thread_id should keep its original value.
3580 let original_tid: Vec<u8> = connection
3581 .select_row_bound::<&str, Vec<u8>>(
3582 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3583 )
3584 .unwrap()("has-tid")
3585 .unwrap()
3586 .unwrap();
3587 assert_eq!(
3588 original_tid,
3589 vec![
3590 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
3591 0x0F, 0x10
3592 ],
3593 "pre-existing thread_id should be preserved"
3594 );
3595
3596 // The two rows that had NULL thread_ids should now have distinct non-empty blobs.
3597 let generated_tid_1: Vec<u8> = connection
3598 .select_row_bound::<&str, Vec<u8>>(
3599 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3600 )
3601 .unwrap()("no-tid-1")
3602 .unwrap()
3603 .unwrap();
3604 let generated_tid_2: Vec<u8> = connection
3605 .select_row_bound::<&str, Vec<u8>>(
3606 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3607 )
3608 .unwrap()("no-tid-2")
3609 .unwrap()
3610 .unwrap();
3611 assert_eq!(
3612 generated_tid_1.len(),
3613 16,
3614 "generated thread_id should be 16 bytes"
3615 );
3616 assert_eq!(
3617 generated_tid_2.len(),
3618 16,
3619 "generated thread_id should be 16 bytes"
3620 );
3621 assert_ne!(
3622 generated_tid_1, generated_tid_2,
3623 "each generated thread_id should be unique"
3624 );
3625
3626 // The thread_archived_worktrees join row should have migrated
3627 // using the backfilled thread_id from the session without a
3628 // pre-existing thread_id.
3629 let archived_count: i64 = connection
3630 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM thread_archived_worktrees")
3631 .unwrap()(())
3632 .unwrap()
3633 .unwrap();
3634 assert_eq!(
3635 archived_count, 1,
3636 "thread_archived_worktrees row should survive migration"
3637 );
3638
3639 // The thread_archived_worktrees row should reference the
3640 // backfilled thread_id of the 'no-tid-1' session.
3641 let archived_tid: Vec<u8> = connection
3642 .select_row_bound::<(), Vec<u8>>(
3643 "SELECT thread_id FROM thread_archived_worktrees LIMIT 1",
3644 )
3645 .unwrap()(())
3646 .unwrap()
3647 .unwrap();
3648 assert_eq!(
3649 archived_tid, generated_tid_1,
3650 "thread_archived_worktrees should reference the backfilled thread_id"
3651 );
3652 }
3653
3654 // ── ThreadWorktreePaths tests ──────────────────────────────────────
3655
3656 /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs.
3657 fn make_worktree_paths(pairs: &[(&str, &str)]) -> WorktreePaths {
3658 let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs
3659 .iter()
3660 .map(|(m, f)| (Path::new(*m), Path::new(*f)))
3661 .unzip();
3662 WorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders)).unwrap()
3663 }
3664
3665 #[test]
3666 fn test_thread_worktree_paths_full_add_then_remove_cycle() {
3667 // Full scenario from the issue:
3668 // 1. Start with linked worktree selectric → zed
3669 // 2. Add cloud
3670 // 3. Remove zed
3671
3672 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3673
3674 // Step 2: add cloud
3675 paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud"));
3676
3677 assert_eq!(paths.ordered_pairs().count(), 2);
3678 assert_eq!(
3679 paths.folder_path_list(),
3680 &PathList::new(&[
3681 Path::new("/worktrees/selectric/zed"),
3682 Path::new("/projects/cloud"),
3683 ])
3684 );
3685 assert_eq!(
3686 paths.main_worktree_path_list(),
3687 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),])
3688 );
3689
3690 // Step 3: remove zed
3691 paths.remove_main_path(Path::new("/projects/zed"));
3692
3693 assert_eq!(paths.ordered_pairs().count(), 1);
3694 assert_eq!(
3695 paths.folder_path_list(),
3696 &PathList::new(&[Path::new("/projects/cloud")])
3697 );
3698 assert_eq!(
3699 paths.main_worktree_path_list(),
3700 &PathList::new(&[Path::new("/projects/cloud")])
3701 );
3702 }
3703
3704 #[test]
3705 fn test_thread_worktree_paths_add_is_idempotent() {
3706 let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]);
3707
3708 paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed"));
3709
3710 assert_eq!(paths.ordered_pairs().count(), 1);
3711 }
3712
3713 #[test]
3714 fn test_thread_worktree_paths_remove_nonexistent_is_noop() {
3715 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3716
3717 paths.remove_main_path(Path::new("/projects/nonexistent"));
3718
3719 assert_eq!(paths.ordered_pairs().count(), 1);
3720 }
3721
3722 #[test]
3723 fn test_thread_worktree_paths_from_path_lists_preserves_association() {
3724 let folder = PathList::new(&[
3725 Path::new("/worktrees/selectric/zed"),
3726 Path::new("/projects/cloud"),
3727 ]);
3728 let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]);
3729
3730 let paths = WorktreePaths::from_path_lists(main, folder).unwrap();
3731
3732 let pairs: Vec<_> = paths
3733 .ordered_pairs()
3734 .map(|(m, f)| (m.clone(), f.clone()))
3735 .collect();
3736 assert_eq!(pairs.len(), 2);
3737 assert!(pairs.contains(&(
3738 PathBuf::from("/projects/zed"),
3739 PathBuf::from("/worktrees/selectric/zed")
3740 )));
3741 assert!(pairs.contains(&(
3742 PathBuf::from("/projects/cloud"),
3743 PathBuf::from("/projects/cloud")
3744 )));
3745 }
3746
3747 #[test]
3748 fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() {
3749 // Two linked worktrees of the same main repo: the main_worktree_path_list
3750 // deduplicates because PathList stores unique sorted paths, but
3751 // ordered_pairs still has both entries.
3752 let paths = make_worktree_paths(&[
3753 ("/projects/zed", "/worktrees/selectric/zed"),
3754 ("/projects/zed", "/worktrees/feature/zed"),
3755 ]);
3756
3757 // main_worktree_path_list has the duplicate main path twice
3758 // (PathList keeps all entries from its input)
3759 assert_eq!(paths.ordered_pairs().count(), 2);
3760 assert_eq!(
3761 paths.folder_path_list(),
3762 &PathList::new(&[
3763 Path::new("/worktrees/selectric/zed"),
3764 Path::new("/worktrees/feature/zed"),
3765 ])
3766 );
3767 assert_eq!(
3768 paths.main_worktree_path_list(),
3769 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),])
3770 );
3771 }
3772
3773 #[test]
3774 fn test_thread_worktree_paths_mismatched_lengths_returns_error() {
3775 let folder = PathList::new(&[
3776 Path::new("/worktrees/selectric/zed"),
3777 Path::new("/projects/cloud"),
3778 ]);
3779 let main = PathList::new(&[Path::new("/projects/zed")]);
3780
3781 let result = WorktreePaths::from_path_lists(main, folder);
3782 assert!(result.is_err());
3783 }
3784
3785 /// Regression test: archiving a thread created in a git worktree must
3786 /// preserve the thread's folder paths so that restoring it later does
3787 /// not prompt the user to re-associate a project.
3788 #[gpui::test]
3789 async fn test_archived_thread_retains_paths_after_worktree_removal(cx: &mut TestAppContext) {
3790 init_test(cx);
3791
3792 let fs = FakeFs::new(cx.executor());
3793 fs.insert_tree(
3794 "/worktrees/feature",
3795 serde_json::json!({ "src": { "main.rs": "" } }),
3796 )
3797 .await;
3798 let project = Project::test(fs, [Path::new("/worktrees/feature")], cx).await;
3799 let connection = StubAgentConnection::new();
3800
3801 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3802 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
3803
3804 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3805 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3806
3807 // Push content so the event handler saves metadata with the
3808 // project's worktree paths.
3809 thread.update_in(&mut vcx, |thread, _window, cx| {
3810 thread.push_user_content_block(None, "Hello".into(), cx);
3811 });
3812 vcx.run_until_parked();
3813
3814 // Verify paths were saved correctly.
3815 let (folder_paths_before, main_paths_before) = cx.read(|cx| {
3816 let store = ThreadMetadataStore::global(cx).read(cx);
3817 let entry = store.entry(thread_id).unwrap();
3818 assert!(
3819 !entry.folder_paths().is_empty(),
3820 "thread should have folder paths before archiving"
3821 );
3822 (
3823 entry.folder_paths().clone(),
3824 entry.main_worktree_paths().clone(),
3825 )
3826 });
3827
3828 // Archive the thread.
3829 cx.update(|cx| {
3830 ThreadMetadataStore::global(cx).update(cx, |store, cx| {
3831 store.archive(thread_id, None, cx);
3832 });
3833 });
3834 cx.run_until_parked();
3835
3836 // Remove the worktree from the project, simulating what the
3837 // archive flow does for linked git worktrees.
3838 let worktree_id = cx.update(|cx| {
3839 project
3840 .read(cx)
3841 .visible_worktrees(cx)
3842 .next()
3843 .unwrap()
3844 .read(cx)
3845 .id()
3846 });
3847 project.update(cx, |project, cx| {
3848 project.remove_worktree(worktree_id, cx);
3849 });
3850 cx.run_until_parked();
3851
3852 // Trigger a thread event after archiving + worktree removal.
3853 // In production this happens when an async title-generation task
3854 // completes after the thread was archived.
3855 thread.update_in(&mut vcx, |thread, _window, cx| {
3856 thread.set_title("Generated title".into(), cx).detach();
3857 });
3858 vcx.run_until_parked();
3859
3860 // The archived thread must still have its original folder paths.
3861 cx.read(|cx| {
3862 let store = ThreadMetadataStore::global(cx).read(cx);
3863 let entry = store.entry(thread_id).unwrap();
3864 assert!(entry.archived, "thread should still be archived");
3865 assert_eq!(
3866 entry.display_title().as_ref(),
3867 "Generated title",
3868 "title should still be updated for archived threads"
3869 );
3870 assert_eq!(
3871 entry.folder_paths(),
3872 &folder_paths_before,
3873 "archived thread must retain its folder paths after worktree \
3874 removal + subsequent thread event, otherwise restoring it \
3875 will prompt the user to re-associate a project"
3876 );
3877 assert_eq!(
3878 entry.main_worktree_paths(),
3879 &main_paths_before,
3880 "archived thread must retain its main worktree paths after \
3881 worktree removal + subsequent thread event"
3882 );
3883 });
3884 }
3885
3886 #[gpui::test]
3887 async fn test_collab_guest_threads_not_saved_to_metadata_store(cx: &mut TestAppContext) {
3888 init_test(cx);
3889
3890 let fs = FakeFs::new(cx.executor());
3891 let project = Project::test(fs, [Path::new("/project-a")], cx).await;
3892
3893 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3894 crate::test_support::open_thread_with_connection(
3895 &panel,
3896 StubAgentConnection::new(),
3897 &mut vcx,
3898 );
3899 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3900 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3901 thread.update_in(&mut vcx, |thread, _window, cx| {
3902 thread.push_user_content_block(None, "hello".into(), cx);
3903 thread.set_title("Thread".into(), cx).detach();
3904 });
3905 vcx.run_until_parked();
3906
3907 // Confirm the thread is in the store while the project is local.
3908 cx.update(|cx| {
3909 let store = ThreadMetadataStore::global(cx);
3910 assert!(
3911 store.read(cx).entry(thread_id).is_some(),
3912 "thread must be in the store while the project is local"
3913 );
3914 });
3915
3916 cx.update(|cx| {
3917 let store = ThreadMetadataStore::global(cx);
3918 store.update(cx, |store, cx| {
3919 store.delete(thread_id, cx);
3920 });
3921 });
3922 project.update(cx, |project, _cx| {
3923 project.mark_as_collab_for_testing();
3924 });
3925
3926 thread.update_in(&mut vcx, |thread, _window, cx| {
3927 thread.push_user_content_block(None, "more content".into(), cx);
3928 });
3929 vcx.run_until_parked();
3930
3931 cx.update(|cx| {
3932 let store = ThreadMetadataStore::global(cx);
3933 assert!(
3934 store.read(cx).entry(thread_id).is_none(),
3935 "threads must not be persisted while the project is a collab guest session"
3936 );
3937 });
3938 }
3939
3940 // When a worktree is added to a collab project, update_thread_work_dirs
3941 // fires with the new worktree paths. Without an is_via_collab() guard it
3942 // overwrites the stored paths of any retained or active local threads with
3943 // the new (expanded) path set, corrupting metadata that belonged to the
3944 // guest's own local project.
3945 #[gpui::test]
3946 async fn test_collab_guest_retained_thread_paths_not_overwritten_on_worktree_change(
3947 cx: &mut TestAppContext,
3948 ) {
3949 init_test(cx);
3950
3951 let fs = FakeFs::new(cx.executor());
3952 fs.insert_tree("/project-a", serde_json::json!({})).await;
3953 fs.insert_tree("/project-b", serde_json::json!({})).await;
3954 let project = Project::test(fs, [Path::new("/project-a")], cx).await;
3955
3956 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3957
3958 // Open thread A and give it content so its metadata is saved with /project-a.
3959 crate::test_support::open_thread_with_connection(
3960 &panel,
3961 StubAgentConnection::new(),
3962 &mut vcx,
3963 );
3964 let thread_a_id = crate::test_support::active_thread_id(&panel, &vcx);
3965 let thread_a = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3966 thread_a.update_in(&mut vcx, |thread, _window, cx| {
3967 thread.push_user_content_block(None, "hello".into(), cx);
3968 thread.set_title("Thread A".into(), cx).detach();
3969 });
3970 vcx.run_until_parked();
3971
3972 cx.update(|cx| {
3973 let store = ThreadMetadataStore::global(cx);
3974 let entry = store.read(cx).entry(thread_a_id).unwrap();
3975 assert_eq!(
3976 entry.folder_paths().paths(),
3977 &[std::path::PathBuf::from("/project-a")],
3978 "thread A must be saved with /project-a before collab"
3979 );
3980 });
3981
3982 // Open thread B, making thread A a retained thread in the panel.
3983 crate::test_support::open_thread_with_connection(
3984 &panel,
3985 StubAgentConnection::new(),
3986 &mut vcx,
3987 );
3988 vcx.run_until_parked();
3989
3990 // Transition the project into collab mode (simulates joining as a guest).
3991 project.update(cx, |project, _cx| {
3992 project.mark_as_collab_for_testing();
3993 });
3994
3995 // Add a second worktree. For a real collab guest this would be one of
3996 // the host's worktrees arriving via the collab protocol, but here we
3997 // use a local path because the test infrastructure cannot easily produce
3998 // a remote worktree with a fully-scanned root entry.
3999 //
4000 // This fires WorktreeAdded → update_thread_work_dirs. Without an
4001 // is_via_collab() guard that call overwrites the stored paths of
4002 // retained thread A from {/project-a} to {/project-a, /project-b},
4003 // polluting its metadata with a path it never belonged to.
4004 project
4005 .update(cx, |project, cx| {
4006 project.find_or_create_worktree(Path::new("/project-b"), true, cx)
4007 })
4008 .await
4009 .unwrap();
4010 vcx.run_until_parked();
4011
4012 cx.update(|cx| {
4013 let store = ThreadMetadataStore::global(cx);
4014 let entry = store
4015 .read(cx)
4016 .entry(thread_a_id)
4017 .expect("thread A must still exist in the store");
4018 assert_eq!(
4019 entry.folder_paths().paths(),
4020 &[std::path::PathBuf::from("/project-a")],
4021 "retained thread A's stored path must not be updated while the project is via collab"
4022 );
4023 });
4024 }
4025}