1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use agent::{ThreadStore, ZED_AGENT_ID};
7use agent_client_protocol as acp;
8use anyhow::Context as _;
9use chrono::{DateTime, Utc};
10use collections::{HashMap, HashSet};
11use db::{
12 kvp::KeyValueStore,
13 sqlez::{
14 bindable::{Bind, Column},
15 domain::Domain,
16 statement::Statement,
17 thread_safe_connection::ThreadSafeConnection,
18 },
19 sqlez_macros::sql,
20};
21use fs::Fs;
22use futures::{FutureExt, future::Shared};
23use gpui::{AppContext as _, Entity, Global, Subscription, Task};
24pub use project::WorktreePaths;
25use project::{AgentId, linked_worktree_short_name};
26use remote::{RemoteConnectionOptions, same_remote_connection_identity};
27use ui::{App, Context, SharedString, ThreadItemWorktreeInfo, WorktreeKind};
28use util::{ResultExt as _, debug_panic};
29use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
30
31use crate::DEFAULT_THREAD_TITLE;
32
33#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, serde::Serialize, serde::Deserialize)]
34pub struct ThreadId(uuid::Uuid);
35
36impl ThreadId {
37 pub fn new() -> Self {
38 Self(uuid::Uuid::new_v4())
39 }
40}
41
42impl Bind for ThreadId {
43 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
44 self.0.bind(statement, start_index)
45 }
46}
47
48impl Column for ThreadId {
49 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
50 let (uuid, next) = Column::column(statement, start_index)?;
51 Ok((ThreadId(uuid), next))
52 }
53}
54
55const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill";
56const THREAD_ID_MIGRATION_KEY: &str = "thread-metadata-thread-id-backfill";
57
58/// List all sidebar thread metadata from an arbitrary SQLite connection.
59///
60/// This is used to read thread metadata from another release channel's
61/// database without opening a full `ThreadSafeConnection`.
62pub(crate) fn list_thread_metadata_from_connection(
63 connection: &db::sqlez::connection::Connection,
64) -> anyhow::Result<Vec<ThreadMetadata>> {
65 connection.select::<ThreadMetadata>(ThreadMetadataDb::LIST_QUERY)?()
66}
67
68/// Run the `ThreadMetadataDb` migrations on a raw connection.
69///
70/// This is used in tests to set up the sidebar_threads schema in a
71/// temporary database.
72#[cfg(test)]
73pub(crate) fn run_thread_metadata_migrations(connection: &db::sqlez::connection::Connection) {
74 connection
75 .migrate(
76 ThreadMetadataDb::NAME,
77 ThreadMetadataDb::MIGRATIONS,
78 &mut |_, _, _| false,
79 )
80 .expect("thread metadata migrations should succeed");
81}
82
83pub fn init(cx: &mut App) {
84 ThreadMetadataStore::init_global(cx);
85 let migration_task = migrate_thread_metadata(cx);
86 migrate_thread_remote_connections(cx, migration_task);
87 migrate_thread_ids(cx);
88}
89
90/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
91/// We skip migrating threads that do not have a project.
92///
93/// TODO: Remove this after N weeks of shipping the sidebar
94fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
95 let store = ThreadMetadataStore::global(cx);
96 let db = store.read(cx).db.clone();
97
98 cx.spawn(async move |cx| {
99 let existing_list = db.list()?;
100 let is_first_migration = existing_list.is_empty();
101 let existing_session_ids: HashSet<Arc<str>> = existing_list
102 .into_iter()
103 .filter_map(|m| m.session_id.map(|s| s.0))
104 .collect();
105
106 let mut to_migrate = store.read_with(cx, |_store, cx| {
107 ThreadStore::global(cx)
108 .read(cx)
109 .entries()
110 .filter_map(|entry| {
111 if existing_session_ids.contains(&entry.id.0) {
112 return None;
113 }
114
115 Some(ThreadMetadata {
116 thread_id: ThreadId::new(),
117 session_id: Some(entry.id),
118 agent_id: ZED_AGENT_ID.clone(),
119 title: if entry.title.is_empty()
120 || entry.title.as_ref() == DEFAULT_THREAD_TITLE
121 {
122 None
123 } else {
124 Some(entry.title)
125 },
126 updated_at: entry.updated_at,
127 created_at: entry.created_at,
128 interacted_at: None,
129 worktree_paths: WorktreePaths::from_folder_paths(&entry.folder_paths),
130 remote_connection: None,
131 archived: true,
132 })
133 })
134 .collect::<Vec<_>>()
135 });
136
137 if to_migrate.is_empty() {
138 return anyhow::Ok(());
139 }
140
141 // On the first migration (no entries in DB yet), keep the 5 most
142 // recent threads per project unarchived.
143 if is_first_migration {
144 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
145 for entry in &mut to_migrate {
146 if entry.worktree_paths.is_empty() {
147 continue;
148 }
149 per_project
150 .entry(entry.worktree_paths.folder_path_list().clone())
151 .or_default()
152 .push(entry);
153 }
154 for entries in per_project.values_mut() {
155 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
156 for entry in entries.iter_mut().take(5) {
157 entry.archived = false;
158 }
159 }
160 }
161
162 log::info!("Migrating {} thread store entries", to_migrate.len());
163
164 // Manually save each entry to the database and call reload, otherwise
165 // we'll end up triggering lots of reloads after each save
166 for entry in to_migrate {
167 db.save(entry).await?;
168 }
169
170 log::info!("Finished migrating thread store entries");
171
172 let _ = store.update(cx, |store, cx| store.reload(cx));
173 anyhow::Ok(())
174 })
175}
176
177fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task<anyhow::Result<()>>) {
178 let store = ThreadMetadataStore::global(cx);
179 let db = store.read(cx).db.clone();
180 let kvp = KeyValueStore::global(cx);
181 let workspace_db = WorkspaceDb::global(cx);
182 let fs = <dyn Fs>::global(cx);
183
184 cx.spawn(async move |cx| -> anyhow::Result<()> {
185 migration_task.await?;
186
187 if kvp
188 .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)?
189 .is_some()
190 {
191 return Ok(());
192 }
193
194 let recent_workspaces = workspace_db.recent_project_workspaces(fs.as_ref()).await?;
195
196 let mut local_path_lists = HashSet::<PathList>::default();
197 let mut remote_path_lists = HashMap::<PathList, RemoteConnectionOptions>::default();
198
199 recent_workspaces
200 .iter()
201 .filter(|(_, location, path_list, _)| {
202 !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local)
203 })
204 .for_each(|(_, _, path_list, _)| {
205 local_path_lists.insert(path_list.clone());
206 });
207
208 for (_, location, path_list, _) in recent_workspaces {
209 match location {
210 SerializedWorkspaceLocation::Remote(remote_connection)
211 if !local_path_lists.contains(&path_list) =>
212 {
213 remote_path_lists
214 .entry(path_list)
215 .or_insert(remote_connection);
216 }
217 _ => {}
218 }
219 }
220
221 let mut reloaded = false;
222 for metadata in db.list()? {
223 if metadata.remote_connection.is_some() {
224 continue;
225 }
226
227 if let Some(remote_connection) = remote_path_lists
228 .get(metadata.folder_paths())
229 .or_else(|| remote_path_lists.get(metadata.main_worktree_paths()))
230 {
231 db.save(ThreadMetadata {
232 remote_connection: Some(remote_connection.clone()),
233 ..metadata
234 })
235 .await?;
236 reloaded = true;
237 }
238 }
239
240 let reloaded_task = reloaded
241 .then_some(store.update(cx, |store, cx| store.reload(cx)))
242 .unwrap_or(Task::ready(()).shared());
243
244 kvp.write_kvp(
245 THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(),
246 "1".to_string(),
247 )
248 .await?;
249 reloaded_task.await;
250
251 Ok(())
252 })
253 .detach_and_log_err(cx);
254}
255
256fn migrate_thread_ids(cx: &mut App) {
257 let store = ThreadMetadataStore::global(cx);
258 let db = store.read(cx).db.clone();
259 let kvp = KeyValueStore::global(cx);
260
261 cx.spawn(async move |cx| -> anyhow::Result<()> {
262 if kvp.read_kvp(THREAD_ID_MIGRATION_KEY)?.is_some() {
263 return Ok(());
264 }
265
266 let mut reloaded = false;
267 for metadata in db.list()? {
268 db.save(metadata).await?;
269 reloaded = true;
270 }
271
272 let reloaded_task = reloaded
273 .then_some(store.update(cx, |store, cx| store.reload(cx)))
274 .unwrap_or(Task::ready(()).shared());
275
276 kvp.write_kvp(THREAD_ID_MIGRATION_KEY.to_string(), "1".to_string())
277 .await?;
278 reloaded_task.await;
279
280 Ok(())
281 })
282 .detach_and_log_err(cx);
283}
284
285struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
286impl Global for GlobalThreadMetadataStore {}
287
288/// Lightweight metadata for any thread (native or ACP), enough to populate
289/// the sidebar list and route to the correct load path when clicked.
290#[derive(Debug, Clone, PartialEq)]
291pub struct ThreadMetadata {
292 pub thread_id: ThreadId,
293 pub session_id: Option<acp::SessionId>,
294 pub agent_id: AgentId,
295 pub title: Option<SharedString>,
296 pub updated_at: DateTime<Utc>,
297 pub created_at: Option<DateTime<Utc>>,
298 /// When a user last interacted to send a message (including queueing).
299 /// Doesn't include the time when a queued message is fired.
300 pub interacted_at: Option<DateTime<Utc>>,
301 pub worktree_paths: WorktreePaths,
302 pub remote_connection: Option<RemoteConnectionOptions>,
303 pub archived: bool,
304}
305
306impl ThreadMetadata {
307 pub fn display_title(&self) -> SharedString {
308 self.title
309 .clone()
310 .unwrap_or_else(|| crate::DEFAULT_THREAD_TITLE.into())
311 }
312
313 pub fn folder_paths(&self) -> &PathList {
314 self.worktree_paths.folder_path_list()
315 }
316 pub fn main_worktree_paths(&self) -> &PathList {
317 self.worktree_paths.main_worktree_path_list()
318 }
319}
320
321/// Derives worktree display info from a thread's stored path list.
322///
323/// For each path in the thread's `folder_paths`, produces a
324/// [`ThreadItemWorktreeInfo`] with a short display name, full path, and whether
325/// the worktree is the main checkout or a linked git worktree. When
326/// multiple main paths exist and a linked worktree's short name alone
327/// wouldn't identify which main project it belongs to, the main project
328/// name is prefixed for disambiguation (e.g. `project:feature`).
329pub fn worktree_info_from_thread_paths<S: std::hash::BuildHasher>(
330 worktree_paths: &WorktreePaths,
331 branch_names: &std::collections::HashMap<PathBuf, SharedString, S>,
332) -> Vec<ThreadItemWorktreeInfo> {
333 let mut infos: Vec<ThreadItemWorktreeInfo> = Vec::new();
334 let mut linked_short_names: Vec<(SharedString, SharedString)> = Vec::new();
335 let mut unique_main_count = HashSet::default();
336
337 for (main_path, folder_path) in worktree_paths.ordered_pairs() {
338 unique_main_count.insert(main_path.clone());
339 let is_linked = main_path != folder_path;
340
341 if is_linked {
342 let short_name = linked_worktree_short_name(main_path, folder_path).unwrap_or_default();
343 let project_name = main_path
344 .file_name()
345 .map(|n| SharedString::from(n.to_string_lossy().to_string()))
346 .unwrap_or_default();
347 linked_short_names.push((short_name.clone(), project_name));
348 infos.push(ThreadItemWorktreeInfo {
349 worktree_name: Some(short_name),
350 full_path: SharedString::from(folder_path.display().to_string()),
351 highlight_positions: Vec::new(),
352 kind: WorktreeKind::Linked,
353 branch_name: branch_names.get(folder_path).cloned(),
354 });
355 } else {
356 let Some(name) = folder_path.file_name() else {
357 continue;
358 };
359 infos.push(ThreadItemWorktreeInfo {
360 worktree_name: Some(SharedString::from(name.to_string_lossy().to_string())),
361 full_path: SharedString::from(folder_path.display().to_string()),
362 highlight_positions: Vec::new(),
363 kind: WorktreeKind::Main,
364 branch_name: branch_names.get(folder_path).cloned(),
365 });
366 }
367 }
368
369 // When the group has multiple main worktree paths and the thread's
370 // folder paths don't all share the same short name, prefix each
371 // linked worktree chip with its main project name so the user knows
372 // which project it belongs to.
373 let all_same_name = infos.len() > 1
374 && infos
375 .iter()
376 .all(|i| i.worktree_name == infos[0].worktree_name);
377
378 if unique_main_count.len() > 1 && !all_same_name {
379 for (info, (_short_name, project_name)) in infos
380 .iter_mut()
381 .filter(|i| i.kind == WorktreeKind::Linked)
382 .zip(linked_short_names.iter())
383 {
384 if let Some(name) = &info.worktree_name {
385 info.worktree_name = Some(SharedString::from(format!("{}:{}", project_name, name)));
386 }
387 }
388 }
389
390 infos
391}
392
393impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
394 fn from(meta: &ThreadMetadata) -> Self {
395 let session_id = meta
396 .session_id
397 .clone()
398 .unwrap_or_else(|| acp::SessionId::new(meta.thread_id.0.to_string()));
399 Self {
400 session_id,
401 work_dirs: Some(meta.folder_paths().clone()),
402 title: meta.title.clone(),
403 updated_at: Some(meta.updated_at),
404 created_at: meta.created_at,
405 meta: None,
406 }
407 }
408}
409
410/// Record of a git worktree that was archived (deleted from disk) when its
411/// last thread was archived.
412pub struct ArchivedGitWorktree {
413 /// Auto-incrementing primary key.
414 pub id: i64,
415 /// Absolute path to the directory of the worktree before it was deleted.
416 /// Used when restoring, to put the recreated worktree back where it was.
417 /// If the path already exists on disk, the worktree is assumed to be
418 /// already restored and is used as-is.
419 pub worktree_path: PathBuf,
420 /// Absolute path of the main repository ("main worktree") that owned this worktree.
421 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
422 /// If the main repo isn't found on disk, unarchiving fails because we only store
423 /// commit hashes, and without the actual git repo being available, we can't restore
424 /// the files.
425 pub main_repo_path: PathBuf,
426 /// Branch that was checked out in the worktree at archive time. `None` if
427 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
428 /// could happen if the user made a detached one outside of Zed.
429 /// On restore, we try to switch to this branch. If that fails (e.g. it's
430 /// checked out elsewhere), we auto-generate a new one.
431 pub branch_name: Option<String>,
432 /// SHA of the WIP commit that captures files that were staged (but not yet
433 /// committed) at the time of archiving. This commit can be empty if the
434 /// user had no staged files at the time. It sits directly on top of whatever
435 /// the user's last actual commit was.
436 pub staged_commit_hash: String,
437 /// SHA of the WIP commit that captures files that were unstaged (including
438 /// untracked) at the time of archiving. This commit can be empty if the user
439 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
440 /// After doing `git reset` past both of these commits, we're back in the state
441 /// we had before archiving, including what was staged, what was unstaged, and
442 /// what was committed.
443 pub unstaged_commit_hash: String,
444 /// SHA of the commit that HEAD pointed at before we created the two WIP
445 /// commits during archival. After resetting past the WIP commits during
446 /// restore, HEAD should land back on this commit. It also serves as a
447 /// pre-restore sanity check (abort if this commit no longer exists in the
448 /// repo) and as a fallback target if the WIP resets fail.
449 pub original_commit_hash: String,
450}
451
452/// The store holds all metadata needed to show threads in the sidebar/the archive.
453///
454/// Listens to ConversationView events and updates metadata when the root thread changes.
455pub struct ThreadMetadataStore {
456 db: ThreadMetadataDb,
457 threads: HashMap<ThreadId, ThreadMetadata>,
458 threads_by_paths: HashMap<PathList, HashSet<ThreadId>>,
459 threads_by_main_paths: HashMap<PathList, HashSet<ThreadId>>,
460 threads_by_session: HashMap<acp::SessionId, ThreadId>,
461 reload_task: Option<Shared<Task<()>>>,
462 conversation_subscriptions: HashMap<gpui::EntityId, Subscription>,
463 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
464 in_flight_archives: HashMap<ThreadId, (Task<()>, smol::channel::Sender<()>)>,
465 _db_operations_task: Task<()>,
466}
467
468#[derive(Debug, PartialEq)]
469enum DbOperation {
470 Upsert(ThreadMetadata),
471 Delete(ThreadId),
472}
473
474impl DbOperation {
475 fn id(&self) -> ThreadId {
476 match self {
477 DbOperation::Upsert(thread) => thread.thread_id,
478 DbOperation::Delete(thread_id) => *thread_id,
479 }
480 }
481}
482
483/// Override for the test DB name used by `ThreadMetadataStore::init_global`.
484/// When set as a GPUI global, `init_global` uses this name instead of
485/// deriving one from the thread name. This prevents data from leaking
486/// across proptest cases that share a thread name.
487#[cfg(any(test, feature = "test-support"))]
488pub struct TestMetadataDbName(pub String);
489#[cfg(any(test, feature = "test-support"))]
490impl gpui::Global for TestMetadataDbName {}
491
492#[cfg(any(test, feature = "test-support"))]
493impl TestMetadataDbName {
494 pub fn global(cx: &App) -> String {
495 cx.try_global::<Self>()
496 .map(|g| g.0.clone())
497 .unwrap_or_else(|| {
498 let thread = std::thread::current();
499 let test_name = thread.name().unwrap_or("unknown_test");
500 format!("THREAD_METADATA_DB_{}", test_name)
501 })
502 }
503}
504
505impl ThreadMetadataStore {
506 #[cfg(not(any(test, feature = "test-support")))]
507 pub fn init_global(cx: &mut App) {
508 if cx.has_global::<Self>() {
509 return;
510 }
511
512 let db = ThreadMetadataDb::global(cx);
513 let thread_store = cx.new(|cx| Self::new(db, cx));
514 cx.set_global(GlobalThreadMetadataStore(thread_store));
515 }
516
517 #[cfg(any(test, feature = "test-support"))]
518 pub fn init_global(cx: &mut App) {
519 let db_name = TestMetadataDbName::global(cx);
520 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
521 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
522 cx.set_global(GlobalThreadMetadataStore(thread_store));
523 }
524
525 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
526 cx.try_global::<GlobalThreadMetadataStore>()
527 .map(|store| store.0.clone())
528 }
529
530 pub fn global(cx: &App) -> Entity<Self> {
531 cx.global::<GlobalThreadMetadataStore>().0.clone()
532 }
533
534 pub fn is_empty(&self) -> bool {
535 self.threads.is_empty()
536 }
537
538 /// Returns all thread IDs.
539 pub fn entry_ids(&self) -> impl Iterator<Item = ThreadId> + '_ {
540 self.threads.keys().copied()
541 }
542
543 /// Returns the metadata for a specific thread, if it exists.
544 pub fn entry(&self, thread_id: ThreadId) -> Option<&ThreadMetadata> {
545 self.threads.get(&thread_id)
546 }
547
548 /// Returns the metadata for a thread identified by its ACP session ID.
549 pub fn entry_by_session(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
550 let thread_id = self.threads_by_session.get(session_id)?;
551 self.threads.get(thread_id)
552 }
553
554 /// Returns all threads.
555 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
556 self.threads.values()
557 }
558
559 /// Returns all archived threads.
560 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
561 self.entries().filter(|t| t.archived)
562 }
563
564 /// Returns all threads for the given path list and remote connection,
565 /// excluding archived threads.
566 ///
567 /// When `remote_connection` is `Some`, only threads whose persisted
568 /// `remote_connection` matches by normalized identity are returned.
569 /// When `None`, only local (non-remote) threads are returned.
570 pub fn entries_for_path<'a>(
571 &'a self,
572 path_list: &PathList,
573 remote_connection: Option<&'a RemoteConnectionOptions>,
574 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
575 self.threads_by_paths
576 .get(path_list)
577 .into_iter()
578 .flatten()
579 .filter_map(|s| self.threads.get(s))
580 .filter(|s| !s.archived)
581 .filter(move |s| {
582 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
583 })
584 }
585
586 /// Returns threads whose `main_worktree_paths` matches the given path list
587 /// and remote connection, excluding archived threads. This finds threads
588 /// that were opened in a linked worktree but are associated with the given
589 /// main worktree.
590 ///
591 /// When `remote_connection` is `Some`, only threads whose persisted
592 /// `remote_connection` matches by normalized identity are returned.
593 /// When `None`, only local (non-remote) threads are returned.
594 pub fn entries_for_main_worktree_path<'a>(
595 &'a self,
596 path_list: &PathList,
597 remote_connection: Option<&'a RemoteConnectionOptions>,
598 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
599 self.threads_by_main_paths
600 .get(path_list)
601 .into_iter()
602 .flatten()
603 .filter_map(|s| self.threads.get(s))
604 .filter(|s| !s.archived)
605 .filter(move |s| {
606 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
607 })
608 }
609
610 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
611 let db = self.db.clone();
612 self.reload_task.take();
613
614 let list_task = cx
615 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
616
617 let reload_task = cx
618 .spawn(async move |this, cx| {
619 let Some(rows) = list_task.await.log_err() else {
620 return;
621 };
622
623 this.update(cx, |this, cx| {
624 this.threads.clear();
625 this.threads_by_paths.clear();
626 this.threads_by_main_paths.clear();
627 this.threads_by_session.clear();
628
629 for row in rows {
630 this.cache_thread_metadata(row);
631 }
632
633 cx.notify();
634 })
635 .ok();
636 })
637 .shared();
638 self.reload_task = Some(reload_task.clone());
639 reload_task
640 }
641
642 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
643 for metadata in metadata {
644 self.save_internal(metadata);
645 }
646 cx.notify();
647 }
648
649 pub fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
650 self.save_internal(metadata);
651 cx.notify();
652 }
653
654 fn save_internal(&mut self, metadata: ThreadMetadata) {
655 if metadata.session_id.is_none() {
656 debug_panic!("cannot store thread metadata without a session_id");
657 return;
658 };
659
660 if let Some(thread) = self.threads.get(&metadata.thread_id) {
661 if thread.folder_paths() != metadata.folder_paths() {
662 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
663 thread_ids.remove(&metadata.thread_id);
664 }
665 }
666 if thread.main_worktree_paths() != metadata.main_worktree_paths()
667 && !thread.main_worktree_paths().is_empty()
668 {
669 if let Some(thread_ids) = self
670 .threads_by_main_paths
671 .get_mut(thread.main_worktree_paths())
672 {
673 thread_ids.remove(&metadata.thread_id);
674 }
675 }
676 }
677
678 self.cache_thread_metadata(metadata.clone());
679 self.pending_thread_ops_tx
680 .try_send(DbOperation::Upsert(metadata))
681 .log_err();
682 }
683
684 fn cache_thread_metadata(&mut self, metadata: ThreadMetadata) {
685 let Some(session_id) = metadata.session_id.as_ref() else {
686 debug_panic!("cannot store thread metadata without a session_id");
687 return;
688 };
689
690 self.threads_by_session
691 .insert(session_id.clone(), metadata.thread_id);
692
693 self.threads.insert(metadata.thread_id, metadata.clone());
694
695 self.threads_by_paths
696 .entry(metadata.folder_paths().clone())
697 .or_default()
698 .insert(metadata.thread_id);
699
700 if !metadata.main_worktree_paths().is_empty() {
701 self.threads_by_main_paths
702 .entry(metadata.main_worktree_paths().clone())
703 .or_default()
704 .insert(metadata.thread_id);
705 }
706 }
707
708 pub fn update_working_directories(
709 &mut self,
710 thread_id: ThreadId,
711 work_dirs: PathList,
712 cx: &mut Context<Self>,
713 ) {
714 if let Some(thread) = self.threads.get(&thread_id) {
715 debug_assert!(
716 !thread.archived,
717 "update_working_directories called on archived thread"
718 );
719 self.save_internal(ThreadMetadata {
720 worktree_paths: WorktreePaths::from_path_lists(
721 thread.main_worktree_paths().clone(),
722 work_dirs.clone(),
723 )
724 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&work_dirs)),
725 ..thread.clone()
726 });
727 cx.notify();
728 }
729 }
730
731 pub fn update_worktree_paths(
732 &mut self,
733 thread_ids: &[ThreadId],
734 worktree_paths: WorktreePaths,
735 cx: &mut Context<Self>,
736 ) {
737 let mut changed = false;
738 for &thread_id in thread_ids {
739 let Some(thread) = self.threads.get(&thread_id) else {
740 continue;
741 };
742 if thread.worktree_paths == worktree_paths {
743 continue;
744 }
745 // Don't overwrite paths for archived threads — the
746 // project may no longer include the worktree that was
747 // removed during the archive flow.
748 if thread.archived {
749 continue;
750 }
751 self.save_internal(ThreadMetadata {
752 worktree_paths: worktree_paths.clone(),
753 ..thread.clone()
754 });
755 changed = true;
756 }
757 if changed {
758 cx.notify();
759 }
760 }
761
762 pub fn update_interacted_at(
763 &mut self,
764 thread_id: &ThreadId,
765 time: DateTime<Utc>,
766 cx: &mut Context<Self>,
767 ) {
768 if let Some(thread) = self.threads.get(thread_id) {
769 self.save_internal(ThreadMetadata {
770 interacted_at: Some(time),
771 ..thread.clone()
772 });
773 cx.notify();
774 };
775 }
776
777 pub fn archive(
778 &mut self,
779 thread_id: ThreadId,
780 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
781 cx: &mut Context<Self>,
782 ) {
783 self.update_archived(thread_id, true, cx);
784
785 if let Some(job) = archive_job {
786 self.in_flight_archives.insert(thread_id, job);
787 }
788 }
789
790 pub fn unarchive(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
791 self.update_archived(thread_id, false, cx);
792 // Dropping the Sender triggers cancellation in the background task.
793 self.in_flight_archives.remove(&thread_id);
794 }
795
796 pub fn cleanup_completed_archive(&mut self, thread_id: ThreadId) {
797 self.in_flight_archives.remove(&thread_id);
798 }
799
800 /// Returns `true` if any unarchived thread other than `current_session_id`
801 /// references `path` in its folder paths. Used to determine whether a
802 /// worktree can safely be removed from disk.
803 pub fn path_is_referenced_by_other_unarchived_threads(
804 &self,
805 thread_id: ThreadId,
806 path: &Path,
807 remote_connection: Option<&RemoteConnectionOptions>,
808 ) -> bool {
809 self.entries().any(|thread| {
810 thread.thread_id != thread_id
811 && !thread.archived
812 && same_remote_connection_identity(
813 thread.remote_connection.as_ref(),
814 remote_connection,
815 )
816 && thread
817 .folder_paths()
818 .paths()
819 .iter()
820 .any(|other_path| other_path.as_path() == path)
821 })
822 }
823
824 /// Updates a thread's `folder_paths` after an archived worktree has been
825 /// restored to disk. The restored worktree may land at a different path
826 /// than it had before archival, so each `(old_path, new_path)` pair in
827 /// `path_replacements` is applied to the thread's stored folder paths.
828 pub fn update_restored_worktree_paths(
829 &mut self,
830 thread_id: ThreadId,
831 path_replacements: &[(PathBuf, PathBuf)],
832 cx: &mut Context<Self>,
833 ) {
834 if let Some(thread) = self.threads.get(&thread_id).cloned() {
835 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
836 for (old_path, new_path) in path_replacements {
837 if let Some(pos) = paths.iter().position(|p| p == old_path) {
838 paths[pos] = new_path.clone();
839 }
840 }
841 let new_folder_paths = PathList::new(&paths);
842 self.save_internal(ThreadMetadata {
843 worktree_paths: WorktreePaths::from_path_lists(
844 thread.main_worktree_paths().clone(),
845 new_folder_paths.clone(),
846 )
847 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
848 ..thread
849 });
850 cx.notify();
851 }
852 }
853
854 pub fn complete_worktree_restore(
855 &mut self,
856 thread_id: ThreadId,
857 path_replacements: &[(PathBuf, PathBuf)],
858 cx: &mut Context<Self>,
859 ) {
860 if let Some(thread) = self.threads.get(&thread_id).cloned() {
861 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
862 for (old_path, new_path) in path_replacements {
863 for path in &mut paths {
864 if path == old_path {
865 *path = new_path.clone();
866 }
867 }
868 }
869 let new_folder_paths = PathList::new(&paths);
870 self.save_internal(ThreadMetadata {
871 worktree_paths: WorktreePaths::from_path_lists(
872 thread.main_worktree_paths().clone(),
873 new_folder_paths.clone(),
874 )
875 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
876 ..thread
877 });
878 cx.notify();
879 }
880 }
881
882 /// Apply a mutation to the worktree paths of all threads whose current
883 /// `folder_paths` matches `current_folder_paths`, then re-index.
884 /// When `remote_connection` is provided, only threads with a matching
885 /// remote connection are affected.
886 pub fn change_worktree_paths(
887 &mut self,
888 current_folder_paths: &PathList,
889 remote_connection: Option<&RemoteConnectionOptions>,
890 mutate: impl Fn(&mut WorktreePaths),
891 cx: &mut Context<Self>,
892 ) {
893 let thread_ids: Vec<_> = self
894 .threads_by_paths
895 .get(current_folder_paths)
896 .into_iter()
897 .flatten()
898 .filter(|id| {
899 self.threads.get(id).is_some_and(|t| {
900 !t.archived
901 && same_remote_connection_identity(
902 t.remote_connection.as_ref(),
903 remote_connection,
904 )
905 })
906 })
907 .copied()
908 .collect();
909
910 self.mutate_thread_paths(&thread_ids, mutate, cx);
911 }
912
913 fn mutate_thread_paths(
914 &mut self,
915 thread_ids: &[ThreadId],
916 mutate: impl Fn(&mut WorktreePaths),
917 cx: &mut Context<Self>,
918 ) {
919 if thread_ids.is_empty() {
920 return;
921 }
922
923 for thread_id in thread_ids {
924 if let Some(thread) = self.threads.get_mut(thread_id) {
925 if let Some(ids) = self
926 .threads_by_main_paths
927 .get_mut(thread.main_worktree_paths())
928 {
929 ids.remove(thread_id);
930 }
931 if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
932 ids.remove(thread_id);
933 }
934
935 mutate(&mut thread.worktree_paths);
936
937 self.threads_by_main_paths
938 .entry(thread.main_worktree_paths().clone())
939 .or_default()
940 .insert(*thread_id);
941 self.threads_by_paths
942 .entry(thread.folder_paths().clone())
943 .or_default()
944 .insert(*thread_id);
945
946 self.pending_thread_ops_tx
947 .try_send(DbOperation::Upsert(thread.clone()))
948 .log_err();
949 }
950 }
951
952 cx.notify();
953 }
954
955 pub fn create_archived_worktree(
956 &self,
957 worktree_path: String,
958 main_repo_path: String,
959 branch_name: Option<String>,
960 staged_commit_hash: String,
961 unstaged_commit_hash: String,
962 original_commit_hash: String,
963 cx: &App,
964 ) -> Task<anyhow::Result<i64>> {
965 let db = self.db.clone();
966 cx.background_spawn(async move {
967 db.create_archived_worktree(
968 worktree_path,
969 main_repo_path,
970 branch_name,
971 staged_commit_hash,
972 unstaged_commit_hash,
973 original_commit_hash,
974 )
975 .await
976 })
977 }
978
979 pub fn link_thread_to_archived_worktree(
980 &self,
981 thread_id: ThreadId,
982 archived_worktree_id: i64,
983 cx: &App,
984 ) -> Task<anyhow::Result<()>> {
985 let db = self.db.clone();
986 cx.background_spawn(async move {
987 db.link_thread_to_archived_worktree(thread_id, archived_worktree_id)
988 .await
989 })
990 }
991
992 pub fn get_archived_worktrees_for_thread(
993 &self,
994 thread_id: ThreadId,
995 cx: &App,
996 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
997 let db = self.db.clone();
998 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(thread_id).await })
999 }
1000
1001 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
1002 let db = self.db.clone();
1003 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
1004 }
1005
1006 pub fn unlink_thread_from_all_archived_worktrees(
1007 &self,
1008 thread_id: ThreadId,
1009 cx: &App,
1010 ) -> Task<anyhow::Result<()>> {
1011 let db = self.db.clone();
1012 cx.background_spawn(async move {
1013 db.unlink_thread_from_all_archived_worktrees(thread_id)
1014 .await
1015 })
1016 }
1017
1018 pub fn is_archived_worktree_referenced(
1019 &self,
1020 archived_worktree_id: i64,
1021 cx: &App,
1022 ) -> Task<anyhow::Result<bool>> {
1023 let db = self.db.clone();
1024 cx.background_spawn(async move {
1025 db.is_archived_worktree_referenced(archived_worktree_id)
1026 .await
1027 })
1028 }
1029
1030 pub fn get_all_archived_branch_names(
1031 &self,
1032 cx: &App,
1033 ) -> Task<anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>>> {
1034 let db = self.db.clone();
1035 cx.background_spawn(async move { db.get_all_archived_branch_names() })
1036 }
1037
1038 fn update_archived(&mut self, thread_id: ThreadId, archived: bool, cx: &mut Context<Self>) {
1039 if let Some(thread) = self.threads.get(&thread_id) {
1040 self.save_internal(ThreadMetadata {
1041 archived,
1042 ..thread.clone()
1043 });
1044 cx.notify();
1045 }
1046 }
1047
1048 pub fn delete(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
1049 if let Some(thread) = self.threads.get(&thread_id) {
1050 if let Some(sid) = &thread.session_id {
1051 self.threads_by_session.remove(sid);
1052 }
1053 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
1054 thread_ids.remove(&thread_id);
1055 }
1056 if !thread.main_worktree_paths().is_empty() {
1057 if let Some(thread_ids) = self
1058 .threads_by_main_paths
1059 .get_mut(thread.main_worktree_paths())
1060 {
1061 thread_ids.remove(&thread_id);
1062 }
1063 }
1064 }
1065 self.threads.remove(&thread_id);
1066 self.pending_thread_ops_tx
1067 .try_send(DbOperation::Delete(thread_id))
1068 .log_err();
1069 cx.notify();
1070 }
1071
1072 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
1073 let weak_store = cx.weak_entity();
1074
1075 cx.observe_new::<crate::ConversationView>(move |_view, _window, cx| {
1076 let view_entity = cx.entity();
1077 let entity_id = view_entity.entity_id();
1078
1079 cx.on_release({
1080 let weak_store = weak_store.clone();
1081 move |_view, cx| {
1082 weak_store
1083 .update(cx, |store, _cx| {
1084 store.conversation_subscriptions.remove(&entity_id);
1085 })
1086 .ok();
1087 }
1088 })
1089 .detach();
1090
1091 weak_store
1092 .update(cx, |this, cx| {
1093 let subscription = cx.subscribe(&view_entity, Self::handle_conversation_event);
1094 this.conversation_subscriptions
1095 .insert(entity_id, subscription);
1096 })
1097 .ok();
1098 })
1099 .detach();
1100
1101 let (tx, rx) = smol::channel::unbounded();
1102 let _db_operations_task = cx.background_spawn({
1103 let db = db.clone();
1104 async move {
1105 while let Ok(first_update) = rx.recv().await {
1106 let mut updates = vec![first_update];
1107 while let Ok(update) = rx.try_recv() {
1108 updates.push(update);
1109 }
1110 let updates = Self::dedup_db_operations(updates);
1111 for operation in updates {
1112 match operation {
1113 DbOperation::Upsert(metadata) => {
1114 db.save(metadata).await.log_err();
1115 }
1116 DbOperation::Delete(thread_id) => {
1117 db.delete(thread_id).await.log_err();
1118 }
1119 }
1120 }
1121 }
1122 }
1123 });
1124
1125 let mut this = Self {
1126 db,
1127 threads: HashMap::default(),
1128 threads_by_paths: HashMap::default(),
1129 threads_by_main_paths: HashMap::default(),
1130 threads_by_session: HashMap::default(),
1131 reload_task: None,
1132 conversation_subscriptions: HashMap::default(),
1133 pending_thread_ops_tx: tx,
1134 in_flight_archives: HashMap::default(),
1135 _db_operations_task,
1136 };
1137 let _ = this.reload(cx);
1138 this
1139 }
1140
1141 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
1142 let mut ops = HashMap::default();
1143 for operation in operations.into_iter().rev() {
1144 if ops.contains_key(&operation.id()) {
1145 continue;
1146 }
1147 ops.insert(operation.id(), operation);
1148 }
1149 ops.into_values().collect()
1150 }
1151
1152 fn handle_conversation_event(
1153 &mut self,
1154 conversation_view: Entity<crate::ConversationView>,
1155 _event: &crate::conversation_view::RootThreadUpdated,
1156 cx: &mut Context<Self>,
1157 ) {
1158 let view = conversation_view.read(cx);
1159 let thread_id = view.thread_id;
1160 let Some(thread) = view.root_thread(cx) else {
1161 return;
1162 };
1163
1164 let thread_ref = thread.read(cx);
1165 if thread_ref.is_draft_thread() || thread_ref.project().read(cx).is_via_collab() {
1166 return;
1167 }
1168
1169 let existing_thread = self.entry(thread_id);
1170 let session_id = Some(thread_ref.session_id().clone());
1171 let title = thread_ref.title();
1172
1173 let updated_at = Utc::now();
1174
1175 let created_at = existing_thread
1176 .and_then(|t| t.created_at)
1177 .unwrap_or_else(|| updated_at);
1178
1179 let interacted_at = existing_thread
1180 .map(|t| t.interacted_at)
1181 .unwrap_or(Some(updated_at));
1182
1183 let agent_id = thread_ref.connection().agent_id();
1184
1185 // Preserve project-dependent fields for archived threads.
1186 // The worktree may already have been removed from the
1187 // project as part of the archive flow, so re-evaluating
1188 // these from the current project state would yield
1189 // empty/incorrect results.
1190 let (worktree_paths, remote_connection) =
1191 if let Some(existing) = existing_thread.filter(|t| t.archived) {
1192 (
1193 existing.worktree_paths.clone(),
1194 existing.remote_connection.clone(),
1195 )
1196 } else {
1197 let project = thread_ref.project().read(cx);
1198 let worktree_paths = project.worktree_paths(cx);
1199 let remote_connection = project.remote_connection_options(cx);
1200
1201 (worktree_paths, remote_connection)
1202 };
1203
1204 // Threads without a folder path (e.g. started in an empty
1205 // window) are archived by default so they don't get lost,
1206 // because they won't show up in the sidebar. Users can reload
1207 // them from the archive.
1208 let archived = existing_thread
1209 .map(|t| t.archived)
1210 .unwrap_or(worktree_paths.is_empty());
1211
1212 let metadata = ThreadMetadata {
1213 thread_id,
1214 session_id,
1215 agent_id,
1216 title,
1217 created_at: Some(created_at),
1218 interacted_at,
1219 updated_at,
1220 worktree_paths,
1221 remote_connection,
1222 archived,
1223 };
1224
1225 self.save(metadata, cx);
1226 }
1227}
1228
1229impl Global for ThreadMetadataStore {}
1230
1231struct ThreadMetadataDb(ThreadSafeConnection);
1232
1233impl Domain for ThreadMetadataDb {
1234 const NAME: &str = stringify!(ThreadMetadataDb);
1235
1236 const MIGRATIONS: &[&str] = &[
1237 sql!(
1238 CREATE TABLE IF NOT EXISTS sidebar_threads(
1239 session_id TEXT PRIMARY KEY,
1240 agent_id TEXT,
1241 title TEXT NOT NULL,
1242 updated_at TEXT NOT NULL,
1243 created_at TEXT,
1244 folder_paths TEXT,
1245 folder_paths_order TEXT
1246 ) STRICT;
1247 ),
1248 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
1249 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
1250 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
1251 sql!(
1252 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
1253 id INTEGER PRIMARY KEY,
1254 worktree_path TEXT NOT NULL,
1255 main_repo_path TEXT NOT NULL,
1256 branch_name TEXT,
1257 staged_commit_hash TEXT,
1258 unstaged_commit_hash TEXT,
1259 original_commit_hash TEXT
1260 ) STRICT;
1261
1262 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
1263 session_id TEXT NOT NULL,
1264 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1265 PRIMARY KEY (session_id, archived_worktree_id)
1266 ) STRICT;
1267 ),
1268 sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT),
1269 sql!(ALTER TABLE sidebar_threads ADD COLUMN thread_id BLOB),
1270 sql!(
1271 UPDATE sidebar_threads SET thread_id = randomblob(16) WHERE thread_id IS NULL;
1272
1273 CREATE TABLE thread_archived_worktrees_v2(
1274 thread_id BLOB NOT NULL,
1275 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1276 PRIMARY KEY (thread_id, archived_worktree_id)
1277 ) STRICT;
1278
1279 INSERT INTO thread_archived_worktrees_v2(thread_id, archived_worktree_id)
1280 SELECT s.thread_id, t.archived_worktree_id
1281 FROM thread_archived_worktrees t
1282 JOIN sidebar_threads s ON s.session_id = t.session_id;
1283
1284 DROP TABLE thread_archived_worktrees;
1285 ALTER TABLE thread_archived_worktrees_v2 RENAME TO thread_archived_worktrees;
1286
1287 CREATE TABLE sidebar_threads_v2(
1288 thread_id BLOB PRIMARY KEY,
1289 session_id TEXT,
1290 agent_id TEXT,
1291 title TEXT NOT NULL,
1292 updated_at TEXT NOT NULL,
1293 created_at TEXT,
1294 folder_paths TEXT,
1295 folder_paths_order TEXT,
1296 archived INTEGER DEFAULT 0,
1297 main_worktree_paths TEXT,
1298 main_worktree_paths_order TEXT,
1299 remote_connection TEXT
1300 ) STRICT;
1301
1302 INSERT INTO sidebar_threads_v2(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection)
1303 SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection
1304 FROM sidebar_threads;
1305
1306 DROP TABLE sidebar_threads;
1307 ALTER TABLE sidebar_threads_v2 RENAME TO sidebar_threads;
1308 ),
1309 sql!(
1310 DELETE FROM thread_archived_worktrees
1311 WHERE thread_id IN (
1312 SELECT thread_id FROM sidebar_threads WHERE session_id IS NULL
1313 );
1314
1315 DELETE FROM sidebar_threads WHERE session_id IS NULL;
1316
1317 DELETE FROM archived_git_worktrees
1318 WHERE id NOT IN (
1319 SELECT archived_worktree_id FROM thread_archived_worktrees
1320 );
1321 ),
1322 sql!(
1323 ALTER TABLE sidebar_threads ADD COLUMN interacted_at TEXT;
1324 ),
1325 ];
1326}
1327
1328db::static_connection!(ThreadMetadataDb, []);
1329
1330impl ThreadMetadataDb {
1331 #[allow(dead_code)]
1332 pub fn list_ids(&self) -> anyhow::Result<Vec<ThreadId>> {
1333 self.select::<ThreadId>(
1334 "SELECT thread_id FROM sidebar_threads \
1335 WHERE session_id IS NOT NULL \
1336 ORDER BY updated_at DESC",
1337 )?()
1338 }
1339
1340 const LIST_QUERY: &str = "SELECT thread_id, session_id, agent_id, title, updated_at, \
1341 created_at, interacted_at, folder_paths, folder_paths_order, archived, main_worktree_paths, \
1342 main_worktree_paths_order, remote_connection \
1343 FROM sidebar_threads \
1344 WHERE session_id IS NOT NULL \
1345 ORDER BY updated_at DESC";
1346
1347 /// List all sidebar thread metadata, ordered by updated_at descending.
1348 ///
1349 /// Only returns threads that have a `session_id`.
1350 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
1351 self.select::<ThreadMetadata>(Self::LIST_QUERY)?()
1352 }
1353
1354 /// Upsert metadata for a thread.
1355 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
1356 anyhow::ensure!(
1357 row.session_id.is_some(),
1358 "refusing to persist thread metadata without a session_id"
1359 );
1360
1361 let session_id = row.session_id.as_ref().map(|s| s.0.clone());
1362 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
1363 None
1364 } else {
1365 Some(row.agent_id.to_string())
1366 };
1367 let title = row
1368 .title
1369 .as_ref()
1370 .map(|t| t.to_string())
1371 .unwrap_or_default();
1372 let updated_at = row.updated_at.to_rfc3339();
1373 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
1374 let interacted_at = row.interacted_at.map(|dt| dt.to_rfc3339());
1375 let serialized = row.folder_paths().serialize();
1376 let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
1377 (None, None)
1378 } else {
1379 (Some(serialized.paths), Some(serialized.order))
1380 };
1381 let main_serialized = row.main_worktree_paths().serialize();
1382 let (main_worktree_paths, main_worktree_paths_order) =
1383 if row.main_worktree_paths().is_empty() {
1384 (None, None)
1385 } else {
1386 (Some(main_serialized.paths), Some(main_serialized.order))
1387 };
1388 let remote_connection = row
1389 .remote_connection
1390 .as_ref()
1391 .map(serde_json::to_string)
1392 .transpose()
1393 .context("serialize thread metadata remote connection")?;
1394 let thread_id = row.thread_id;
1395 let archived = row.archived;
1396
1397 self.write(move |conn| {
1398 let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, interacted_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
1399 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13) \
1400 ON CONFLICT(thread_id) DO UPDATE SET \
1401 session_id = excluded.session_id, \
1402 agent_id = excluded.agent_id, \
1403 title = excluded.title, \
1404 updated_at = excluded.updated_at, \
1405 created_at = excluded.created_at, \
1406 interacted_at = excluded.interacted_at, \
1407 folder_paths = excluded.folder_paths, \
1408 folder_paths_order = excluded.folder_paths_order, \
1409 archived = excluded.archived, \
1410 main_worktree_paths = excluded.main_worktree_paths, \
1411 main_worktree_paths_order = excluded.main_worktree_paths_order, \
1412 remote_connection = excluded.remote_connection";
1413 let mut stmt = Statement::prepare(conn, sql)?;
1414 let mut i = stmt.bind(&thread_id, 1)?;
1415 i = stmt.bind(&session_id, i)?;
1416 i = stmt.bind(&agent_id, i)?;
1417 i = stmt.bind(&title, i)?;
1418 i = stmt.bind(&updated_at, i)?;
1419 i = stmt.bind(&created_at, i)?;
1420 i = stmt.bind(&interacted_at, i)?;
1421 i = stmt.bind(&folder_paths, i)?;
1422 i = stmt.bind(&folder_paths_order, i)?;
1423 i = stmt.bind(&archived, i)?;
1424 i = stmt.bind(&main_worktree_paths, i)?;
1425 i = stmt.bind(&main_worktree_paths_order, i)?;
1426 stmt.bind(&remote_connection, i)?;
1427 stmt.exec()
1428 })
1429 .await
1430 }
1431
1432 /// Delete metadata for a single thread.
1433 pub async fn delete(&self, thread_id: ThreadId) -> anyhow::Result<()> {
1434 self.write(move |conn| {
1435 let mut stmt =
1436 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE thread_id = ?")?;
1437 stmt.bind(&thread_id, 1)?;
1438 stmt.exec()
1439 })
1440 .await
1441 }
1442
1443 pub async fn create_archived_worktree(
1444 &self,
1445 worktree_path: String,
1446 main_repo_path: String,
1447 branch_name: Option<String>,
1448 staged_commit_hash: String,
1449 unstaged_commit_hash: String,
1450 original_commit_hash: String,
1451 ) -> anyhow::Result<i64> {
1452 self.write(move |conn| {
1453 let mut stmt = Statement::prepare(
1454 conn,
1455 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
1456 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
1457 RETURNING id",
1458 )?;
1459 let mut i = stmt.bind(&worktree_path, 1)?;
1460 i = stmt.bind(&main_repo_path, i)?;
1461 i = stmt.bind(&branch_name, i)?;
1462 i = stmt.bind(&staged_commit_hash, i)?;
1463 i = stmt.bind(&unstaged_commit_hash, i)?;
1464 stmt.bind(&original_commit_hash, i)?;
1465 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
1466 })
1467 .await
1468 }
1469
1470 pub async fn link_thread_to_archived_worktree(
1471 &self,
1472 thread_id: ThreadId,
1473 archived_worktree_id: i64,
1474 ) -> anyhow::Result<()> {
1475 self.write(move |conn| {
1476 let mut stmt = Statement::prepare(
1477 conn,
1478 "INSERT INTO thread_archived_worktrees(thread_id, archived_worktree_id) \
1479 VALUES (?1, ?2)",
1480 )?;
1481 let i = stmt.bind(&thread_id, 1)?;
1482 stmt.bind(&archived_worktree_id, i)?;
1483 stmt.exec()
1484 })
1485 .await
1486 }
1487
1488 pub async fn get_archived_worktrees_for_thread(
1489 &self,
1490 thread_id: ThreadId,
1491 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
1492 self.select_bound::<ThreadId, ArchivedGitWorktree>(
1493 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
1494 FROM archived_git_worktrees a \
1495 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
1496 WHERE t.thread_id = ?1",
1497 )?(thread_id)
1498 }
1499
1500 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
1501 self.write(move |conn| {
1502 let mut stmt = Statement::prepare(
1503 conn,
1504 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
1505 )?;
1506 stmt.bind(&id, 1)?;
1507 stmt.exec()?;
1508
1509 let mut stmt =
1510 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
1511 stmt.bind(&id, 1)?;
1512 stmt.exec()
1513 })
1514 .await
1515 }
1516
1517 pub async fn unlink_thread_from_all_archived_worktrees(
1518 &self,
1519 thread_id: ThreadId,
1520 ) -> anyhow::Result<()> {
1521 self.write(move |conn| {
1522 let mut stmt = Statement::prepare(
1523 conn,
1524 "DELETE FROM thread_archived_worktrees WHERE thread_id = ?",
1525 )?;
1526 stmt.bind(&thread_id, 1)?;
1527 stmt.exec()
1528 })
1529 .await
1530 }
1531
1532 pub async fn is_archived_worktree_referenced(
1533 &self,
1534 archived_worktree_id: i64,
1535 ) -> anyhow::Result<bool> {
1536 self.select_row_bound::<i64, i64>(
1537 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1538 )?(archived_worktree_id)
1539 .map(|count| count.unwrap_or(0) > 0)
1540 }
1541
1542 pub fn get_all_archived_branch_names(
1543 &self,
1544 ) -> anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>> {
1545 let rows = self.select::<(ThreadId, String, String)>(
1546 "SELECT t.thread_id, a.worktree_path, a.branch_name \
1547 FROM thread_archived_worktrees t \
1548 JOIN archived_git_worktrees a ON a.id = t.archived_worktree_id \
1549 WHERE a.branch_name IS NOT NULL \
1550 ORDER BY a.id ASC",
1551 )?()?;
1552
1553 let mut result: HashMap<ThreadId, HashMap<PathBuf, String>> = HashMap::default();
1554 for (thread_id, worktree_path, branch_name) in rows {
1555 result
1556 .entry(thread_id)
1557 .or_default()
1558 .insert(PathBuf::from(worktree_path), branch_name);
1559 }
1560 Ok(result)
1561 }
1562}
1563
1564impl Column for ThreadMetadata {
1565 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1566 let (thread_id_uuid, next): (uuid::Uuid, i32) = Column::column(statement, start_index)?;
1567 let (id, next): (Option<Arc<str>>, i32) = Column::column(statement, next)?;
1568 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1569 let (title, next): (String, i32) = Column::column(statement, next)?;
1570 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1571 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1572 let (interacted_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1573 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1574 let (folder_paths_order_str, next): (Option<String>, i32) =
1575 Column::column(statement, next)?;
1576 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1577 let (main_worktree_paths_str, next): (Option<String>, i32) =
1578 Column::column(statement, next)?;
1579 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1580 Column::column(statement, next)?;
1581 let (remote_connection_json, next): (Option<String>, i32) =
1582 Column::column(statement, next)?;
1583
1584 let agent_id = agent_id
1585 .map(|id| AgentId::new(id))
1586 .unwrap_or(ZED_AGENT_ID.clone());
1587
1588 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1589 let created_at = created_at_str
1590 .as_deref()
1591 .map(DateTime::parse_from_rfc3339)
1592 .transpose()?
1593 .map(|dt| dt.with_timezone(&Utc));
1594
1595 let interacted_at = interacted_at_str
1596 .as_deref()
1597 .map(DateTime::parse_from_rfc3339)
1598 .transpose()?
1599 .map(|dt| dt.with_timezone(&Utc));
1600
1601 let folder_paths = folder_paths_str
1602 .map(|paths| {
1603 PathList::deserialize(&util::path_list::SerializedPathList {
1604 paths,
1605 order: folder_paths_order_str.unwrap_or_default(),
1606 })
1607 })
1608 .unwrap_or_default();
1609
1610 let main_worktree_paths = main_worktree_paths_str
1611 .map(|paths| {
1612 PathList::deserialize(&util::path_list::SerializedPathList {
1613 paths,
1614 order: main_worktree_paths_order_str.unwrap_or_default(),
1615 })
1616 })
1617 .unwrap_or_default();
1618
1619 let remote_connection = remote_connection_json
1620 .as_deref()
1621 .map(serde_json::from_str::<RemoteConnectionOptions>)
1622 .transpose()
1623 .context("deserialize thread metadata remote connection")?;
1624
1625 let worktree_paths = WorktreePaths::from_path_lists(main_worktree_paths, folder_paths)
1626 .unwrap_or_else(|_| WorktreePaths::default());
1627
1628 let thread_id = ThreadId(thread_id_uuid);
1629
1630 Ok((
1631 ThreadMetadata {
1632 thread_id,
1633 session_id: id.map(acp::SessionId::new),
1634 agent_id,
1635 title: if title.is_empty() || title == DEFAULT_THREAD_TITLE {
1636 None
1637 } else {
1638 Some(title.into())
1639 },
1640 updated_at,
1641 created_at,
1642 interacted_at,
1643 worktree_paths,
1644 remote_connection,
1645 archived,
1646 },
1647 next,
1648 ))
1649 }
1650}
1651
1652impl Column for ArchivedGitWorktree {
1653 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1654 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1655 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1656 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1657 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1658 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1659 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1660 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1661
1662 Ok((
1663 ArchivedGitWorktree {
1664 id,
1665 worktree_path: PathBuf::from(worktree_path_str),
1666 main_repo_path: PathBuf::from(main_repo_path_str),
1667 branch_name,
1668 staged_commit_hash,
1669 unstaged_commit_hash,
1670 original_commit_hash,
1671 },
1672 next,
1673 ))
1674 }
1675}
1676
1677#[cfg(test)]
1678mod tests {
1679 use super::*;
1680 use acp_thread::StubAgentConnection;
1681 use action_log::ActionLog;
1682 use agent::DbThread;
1683 use agent_client_protocol as acp;
1684
1685 use gpui::{TestAppContext, VisualTestContext};
1686 use project::FakeFs;
1687 use project::Project;
1688 use remote::WslConnectionOptions;
1689 use std::path::Path;
1690 use std::rc::Rc;
1691 use workspace::MultiWorkspace;
1692
1693 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1694 DbThread {
1695 title: title.to_string().into(),
1696 messages: Vec::new(),
1697 updated_at,
1698 detailed_summary: None,
1699 initial_project_snapshot: None,
1700 cumulative_token_usage: Default::default(),
1701 request_token_usage: Default::default(),
1702 model: None,
1703 profile: None,
1704 imported: false,
1705 subagent_context: None,
1706 speed: None,
1707 thinking_enabled: false,
1708 thinking_effort: None,
1709 draft_prompt: None,
1710 ui_scroll_position: None,
1711 }
1712 }
1713
1714 fn make_metadata(
1715 session_id: &str,
1716 title: &str,
1717 updated_at: DateTime<Utc>,
1718 folder_paths: PathList,
1719 ) -> ThreadMetadata {
1720 ThreadMetadata {
1721 thread_id: ThreadId::new(),
1722 archived: false,
1723 session_id: Some(acp::SessionId::new(session_id)),
1724 agent_id: agent::ZED_AGENT_ID.clone(),
1725 title: if title.is_empty() {
1726 None
1727 } else {
1728 Some(title.to_string().into())
1729 },
1730 updated_at,
1731 created_at: Some(updated_at),
1732 interacted_at: None,
1733 worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
1734 remote_connection: None,
1735 }
1736 }
1737
1738 fn init_test(cx: &mut TestAppContext) {
1739 let fs = FakeFs::new(cx.executor());
1740 cx.update(|cx| {
1741 let settings_store = settings::SettingsStore::test(cx);
1742 cx.set_global(settings_store);
1743 theme_settings::init(theme::LoadThemes::JustBase, cx);
1744 editor::init(cx);
1745 release_channel::init("0.0.0".parse().unwrap(), cx);
1746 prompt_store::init(cx);
1747 <dyn Fs>::set_global(fs, cx);
1748 ThreadMetadataStore::init_global(cx);
1749 ThreadStore::init_global(cx);
1750 language_model::LanguageModelRegistry::test(cx);
1751 });
1752 cx.run_until_parked();
1753 }
1754
1755 fn setup_panel_with_project(
1756 project: Entity<Project>,
1757 cx: &mut TestAppContext,
1758 ) -> (Entity<crate::AgentPanel>, VisualTestContext) {
1759 let multi_workspace =
1760 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1761 let workspace_entity = multi_workspace
1762 .read_with(cx, |mw, _cx| mw.workspace().clone())
1763 .unwrap();
1764 let mut vcx = VisualTestContext::from_window(multi_workspace.into(), cx);
1765 let panel = workspace_entity.update_in(&mut vcx, |workspace, window, cx| {
1766 cx.new(|cx| crate::AgentPanel::new(workspace, None, window, cx))
1767 });
1768 (panel, vcx)
1769 }
1770
1771 fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) {
1772 let kvp = cx.update(|cx| KeyValueStore::global(cx));
1773 smol::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string()))
1774 .unwrap();
1775 }
1776
1777 fn run_store_migrations(cx: &mut TestAppContext) {
1778 clear_thread_metadata_remote_connection_backfill(cx);
1779 cx.update(|cx| {
1780 let migration_task = migrate_thread_metadata(cx);
1781 migrate_thread_remote_connections(cx, migration_task);
1782 });
1783 cx.run_until_parked();
1784 }
1785
1786 #[gpui::test]
1787 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1788 let first_paths = PathList::new(&[Path::new("/project-a")]);
1789 let second_paths = PathList::new(&[Path::new("/project-b")]);
1790 let now = Utc::now();
1791 let older = now - chrono::Duration::seconds(1);
1792
1793 let thread = std::thread::current();
1794 let test_name = thread.name().unwrap_or("unknown_test");
1795 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1796 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1797 &db_name,
1798 )));
1799
1800 db.save(make_metadata(
1801 "session-1",
1802 "First Thread",
1803 now,
1804 first_paths.clone(),
1805 ))
1806 .await
1807 .unwrap();
1808 db.save(make_metadata(
1809 "session-2",
1810 "Second Thread",
1811 older,
1812 second_paths.clone(),
1813 ))
1814 .await
1815 .unwrap();
1816
1817 cx.update(|cx| {
1818 let settings_store = settings::SettingsStore::test(cx);
1819 cx.set_global(settings_store);
1820 ThreadMetadataStore::init_global(cx);
1821 });
1822
1823 cx.run_until_parked();
1824
1825 cx.update(|cx| {
1826 let store = ThreadMetadataStore::global(cx);
1827 let store = store.read(cx);
1828
1829 assert_eq!(store.entry_ids().count(), 2);
1830 assert!(
1831 store
1832 .entry_by_session(&acp::SessionId::new("session-1"))
1833 .is_some()
1834 );
1835 assert!(
1836 store
1837 .entry_by_session(&acp::SessionId::new("session-2"))
1838 .is_some()
1839 );
1840
1841 let first_path_entries: Vec<_> = store
1842 .entries_for_path(&first_paths, None)
1843 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1844 .collect();
1845 assert_eq!(first_path_entries, vec!["session-1"]);
1846
1847 let second_path_entries: Vec<_> = store
1848 .entries_for_path(&second_paths, None)
1849 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1850 .collect();
1851 assert_eq!(second_path_entries, vec!["session-2"]);
1852 });
1853 }
1854
1855 #[gpui::test]
1856 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1857 init_test(cx);
1858
1859 let first_paths = PathList::new(&[Path::new("/project-a")]);
1860 let second_paths = PathList::new(&[Path::new("/project-b")]);
1861 let initial_time = Utc::now();
1862 let updated_time = initial_time + chrono::Duration::seconds(1);
1863
1864 let initial_metadata = make_metadata(
1865 "session-1",
1866 "First Thread",
1867 initial_time,
1868 first_paths.clone(),
1869 );
1870 let session1_thread_id = initial_metadata.thread_id;
1871
1872 let second_metadata = make_metadata(
1873 "session-2",
1874 "Second Thread",
1875 initial_time,
1876 second_paths.clone(),
1877 );
1878 let session2_thread_id = second_metadata.thread_id;
1879
1880 cx.update(|cx| {
1881 let store = ThreadMetadataStore::global(cx);
1882 store.update(cx, |store, cx| {
1883 store.save(initial_metadata, cx);
1884 store.save(second_metadata, cx);
1885 });
1886 });
1887
1888 cx.run_until_parked();
1889
1890 cx.update(|cx| {
1891 let store = ThreadMetadataStore::global(cx);
1892 let store = store.read(cx);
1893
1894 let first_path_entries: Vec<_> = store
1895 .entries_for_path(&first_paths, None)
1896 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1897 .collect();
1898 assert_eq!(first_path_entries, vec!["session-1"]);
1899
1900 let second_path_entries: Vec<_> = store
1901 .entries_for_path(&second_paths, None)
1902 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1903 .collect();
1904 assert_eq!(second_path_entries, vec!["session-2"]);
1905 });
1906
1907 let moved_metadata = ThreadMetadata {
1908 thread_id: session1_thread_id,
1909 session_id: Some(acp::SessionId::new("session-1")),
1910 agent_id: agent::ZED_AGENT_ID.clone(),
1911 title: Some("First Thread".into()),
1912 updated_at: updated_time,
1913 created_at: Some(updated_time),
1914 interacted_at: None,
1915 worktree_paths: WorktreePaths::from_folder_paths(&second_paths),
1916 remote_connection: None,
1917 archived: false,
1918 };
1919
1920 cx.update(|cx| {
1921 let store = ThreadMetadataStore::global(cx);
1922 store.update(cx, |store, cx| {
1923 store.save(moved_metadata, cx);
1924 });
1925 });
1926
1927 cx.run_until_parked();
1928
1929 cx.update(|cx| {
1930 let store = ThreadMetadataStore::global(cx);
1931 let store = store.read(cx);
1932
1933 assert_eq!(store.entry_ids().count(), 2);
1934 assert!(
1935 store
1936 .entry_by_session(&acp::SessionId::new("session-1"))
1937 .is_some()
1938 );
1939 assert!(
1940 store
1941 .entry_by_session(&acp::SessionId::new("session-2"))
1942 .is_some()
1943 );
1944
1945 let first_path_entries: Vec<_> = store
1946 .entries_for_path(&first_paths, None)
1947 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1948 .collect();
1949 assert!(first_path_entries.is_empty());
1950
1951 let second_path_entries: Vec<_> = store
1952 .entries_for_path(&second_paths, None)
1953 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1954 .collect();
1955 assert_eq!(second_path_entries.len(), 2);
1956 assert!(second_path_entries.contains(&"session-1".to_string()));
1957 assert!(second_path_entries.contains(&"session-2".to_string()));
1958 });
1959
1960 cx.update(|cx| {
1961 let store = ThreadMetadataStore::global(cx);
1962 store.update(cx, |store, cx| {
1963 store.delete(session2_thread_id, cx);
1964 });
1965 });
1966
1967 cx.run_until_parked();
1968
1969 cx.update(|cx| {
1970 let store = ThreadMetadataStore::global(cx);
1971 let store = store.read(cx);
1972
1973 assert_eq!(store.entry_ids().count(), 1);
1974
1975 let second_path_entries: Vec<_> = store
1976 .entries_for_path(&second_paths, None)
1977 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1978 .collect();
1979 assert_eq!(second_path_entries, vec!["session-1"]);
1980 });
1981 }
1982
1983 #[gpui::test]
1984 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1985 init_test(cx);
1986
1987 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1988 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1989 let now = Utc::now();
1990
1991 let existing_metadata = ThreadMetadata {
1992 thread_id: ThreadId::new(),
1993 session_id: Some(acp::SessionId::new("a-session-0")),
1994 agent_id: agent::ZED_AGENT_ID.clone(),
1995 title: Some("Existing Metadata".into()),
1996 updated_at: now - chrono::Duration::seconds(10),
1997 created_at: Some(now - chrono::Duration::seconds(10)),
1998 interacted_at: None,
1999 worktree_paths: WorktreePaths::from_folder_paths(&project_a_paths),
2000 remote_connection: None,
2001 archived: false,
2002 };
2003
2004 cx.update(|cx| {
2005 let store = ThreadMetadataStore::global(cx);
2006 store.update(cx, |store, cx| {
2007 store.save(existing_metadata, cx);
2008 });
2009 });
2010 cx.run_until_parked();
2011
2012 let threads_to_save = vec![
2013 (
2014 "a-session-0",
2015 "Thread A0 From Native Store",
2016 project_a_paths.clone(),
2017 now,
2018 ),
2019 (
2020 "a-session-1",
2021 "Thread A1",
2022 project_a_paths.clone(),
2023 now + chrono::Duration::seconds(1),
2024 ),
2025 (
2026 "b-session-0",
2027 "Thread B0",
2028 project_b_paths.clone(),
2029 now + chrono::Duration::seconds(2),
2030 ),
2031 (
2032 "projectless",
2033 "Projectless",
2034 PathList::default(),
2035 now + chrono::Duration::seconds(3),
2036 ),
2037 ];
2038
2039 for (session_id, title, paths, updated_at) in &threads_to_save {
2040 let save_task = cx.update(|cx| {
2041 let thread_store = ThreadStore::global(cx);
2042 let session_id = session_id.to_string();
2043 let title = title.to_string();
2044 let paths = paths.clone();
2045 thread_store.update(cx, |store, cx| {
2046 store.save_thread(
2047 acp::SessionId::new(session_id),
2048 make_db_thread(&title, *updated_at),
2049 paths,
2050 cx,
2051 )
2052 })
2053 });
2054 save_task.await.unwrap();
2055 cx.run_until_parked();
2056 }
2057
2058 run_store_migrations(cx);
2059
2060 let list = cx.update(|cx| {
2061 let store = ThreadMetadataStore::global(cx);
2062 store.read(cx).entries().cloned().collect::<Vec<_>>()
2063 });
2064
2065 assert_eq!(list.len(), 4);
2066 assert!(
2067 list.iter()
2068 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
2069 );
2070
2071 let existing_metadata = list
2072 .iter()
2073 .find(|metadata| {
2074 metadata
2075 .session_id
2076 .as_ref()
2077 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2078 })
2079 .unwrap();
2080 assert_eq!(existing_metadata.display_title(), "Existing Metadata");
2081 assert!(!existing_metadata.archived);
2082
2083 let migrated_session_ids: Vec<_> = list
2084 .iter()
2085 .filter_map(|metadata| metadata.session_id.as_ref().map(|s| s.0.to_string()))
2086 .collect();
2087 assert!(migrated_session_ids.iter().any(|s| s == "a-session-1"));
2088 assert!(migrated_session_ids.iter().any(|s| s == "b-session-0"));
2089 assert!(migrated_session_ids.iter().any(|s| s == "projectless"));
2090
2091 let migrated_entries: Vec<_> = list
2092 .iter()
2093 .filter(|metadata| {
2094 !metadata
2095 .session_id
2096 .as_ref()
2097 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2098 })
2099 .collect();
2100 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
2101 }
2102
2103 #[gpui::test]
2104 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
2105 cx: &mut TestAppContext,
2106 ) {
2107 init_test(cx);
2108
2109 let project_paths = PathList::new(&[Path::new("/project-a")]);
2110 let existing_updated_at = Utc::now();
2111
2112 let existing_metadata = ThreadMetadata {
2113 thread_id: ThreadId::new(),
2114 session_id: Some(acp::SessionId::new("existing-session")),
2115 agent_id: agent::ZED_AGENT_ID.clone(),
2116 title: Some("Existing Metadata".into()),
2117 updated_at: existing_updated_at,
2118 created_at: Some(existing_updated_at),
2119 interacted_at: None,
2120 worktree_paths: WorktreePaths::from_folder_paths(&project_paths),
2121 remote_connection: None,
2122 archived: false,
2123 };
2124
2125 cx.update(|cx| {
2126 let store = ThreadMetadataStore::global(cx);
2127 store.update(cx, |store, cx| {
2128 store.save(existing_metadata, cx);
2129 });
2130 });
2131 cx.run_until_parked();
2132
2133 let save_task = cx.update(|cx| {
2134 let thread_store = ThreadStore::global(cx);
2135 thread_store.update(cx, |store, cx| {
2136 store.save_thread(
2137 acp::SessionId::new("existing-session"),
2138 make_db_thread(
2139 "Updated Native Thread Title",
2140 existing_updated_at + chrono::Duration::seconds(1),
2141 ),
2142 project_paths.clone(),
2143 cx,
2144 )
2145 })
2146 });
2147 save_task.await.unwrap();
2148 cx.run_until_parked();
2149
2150 run_store_migrations(cx);
2151
2152 let list = cx.update(|cx| {
2153 let store = ThreadMetadataStore::global(cx);
2154 store.read(cx).entries().cloned().collect::<Vec<_>>()
2155 });
2156
2157 assert_eq!(list.len(), 1);
2158 assert_eq!(
2159 list[0].session_id.as_ref().unwrap().0.as_ref(),
2160 "existing-session"
2161 );
2162 }
2163
2164 #[gpui::test]
2165 async fn test_migrate_thread_remote_connections_backfills_from_workspace_db(
2166 cx: &mut TestAppContext,
2167 ) {
2168 init_test(cx);
2169
2170 let folder_paths = PathList::new(&[Path::new("/remote-project")]);
2171 let updated_at = Utc::now();
2172 let metadata = make_metadata(
2173 "remote-session",
2174 "Remote Thread",
2175 updated_at,
2176 folder_paths.clone(),
2177 );
2178
2179 cx.update(|cx| {
2180 let store = ThreadMetadataStore::global(cx);
2181 store.update(cx, |store, cx| {
2182 store.save(metadata, cx);
2183 });
2184 });
2185 cx.run_until_parked();
2186
2187 let workspace_db = cx.update(|cx| WorkspaceDb::global(cx));
2188 let workspace_id = workspace_db.next_id().await.unwrap();
2189 let serialized_paths = folder_paths.serialize();
2190 let remote_connection_id = 1_i64;
2191 workspace_db
2192 .write(move |conn| {
2193 let mut stmt = Statement::prepare(
2194 conn,
2195 "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)",
2196 )?;
2197 let mut next_index = stmt.bind(&remote_connection_id, 1)?;
2198 next_index = stmt.bind(&"wsl", next_index)?;
2199 next_index = stmt.bind(&Some("anth".to_string()), next_index)?;
2200 stmt.bind(&Some("Ubuntu".to_string()), next_index)?;
2201 stmt.exec()?;
2202
2203 let mut stmt = Statement::prepare(
2204 conn,
2205 "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1",
2206 )?;
2207 let mut next_index = stmt.bind(&workspace_id, 1)?;
2208 next_index = stmt.bind(&serialized_paths.paths, next_index)?;
2209 next_index = stmt.bind(&serialized_paths.order, next_index)?;
2210 stmt.bind(&Some(remote_connection_id as i32), next_index)?;
2211 stmt.exec()
2212 })
2213 .await
2214 .unwrap();
2215
2216 clear_thread_metadata_remote_connection_backfill(cx);
2217 cx.update(|cx| {
2218 migrate_thread_remote_connections(cx, Task::ready(Ok(())));
2219 });
2220 cx.run_until_parked();
2221
2222 let metadata = cx.update(|cx| {
2223 let store = ThreadMetadataStore::global(cx);
2224 store
2225 .read(cx)
2226 .entry_by_session(&acp::SessionId::new("remote-session"))
2227 .cloned()
2228 .expect("expected migrated metadata row")
2229 });
2230
2231 assert_eq!(
2232 metadata.remote_connection,
2233 Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
2234 distro_name: "Ubuntu".to_string(),
2235 user: Some("anth".to_string()),
2236 }))
2237 );
2238 }
2239
2240 #[gpui::test]
2241 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
2242 cx: &mut TestAppContext,
2243 ) {
2244 init_test(cx);
2245
2246 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
2247 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
2248 let now = Utc::now();
2249
2250 // Create 7 threads for project A and 3 for project B
2251 let mut threads_to_save = Vec::new();
2252 for i in 0..7 {
2253 threads_to_save.push((
2254 format!("a-session-{i}"),
2255 format!("Thread A{i}"),
2256 project_a_paths.clone(),
2257 now + chrono::Duration::seconds(i as i64),
2258 ));
2259 }
2260 for i in 0..3 {
2261 threads_to_save.push((
2262 format!("b-session-{i}"),
2263 format!("Thread B{i}"),
2264 project_b_paths.clone(),
2265 now + chrono::Duration::seconds(i as i64),
2266 ));
2267 }
2268
2269 for (session_id, title, paths, updated_at) in &threads_to_save {
2270 let save_task = cx.update(|cx| {
2271 let thread_store = ThreadStore::global(cx);
2272 let session_id = session_id.to_string();
2273 let title = title.to_string();
2274 let paths = paths.clone();
2275 thread_store.update(cx, |store, cx| {
2276 store.save_thread(
2277 acp::SessionId::new(session_id),
2278 make_db_thread(&title, *updated_at),
2279 paths,
2280 cx,
2281 )
2282 })
2283 });
2284 save_task.await.unwrap();
2285 cx.run_until_parked();
2286 }
2287
2288 run_store_migrations(cx);
2289
2290 let list = cx.update(|cx| {
2291 let store = ThreadMetadataStore::global(cx);
2292 store.read(cx).entries().cloned().collect::<Vec<_>>()
2293 });
2294
2295 assert_eq!(list.len(), 10);
2296
2297 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
2298 let mut project_a_entries: Vec<_> = list
2299 .iter()
2300 .filter(|m| *m.folder_paths() == project_a_paths)
2301 .collect();
2302 assert_eq!(project_a_entries.len(), 7);
2303 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
2304
2305 for entry in &project_a_entries[..5] {
2306 assert!(
2307 !entry.archived,
2308 "Expected {:?} to be unarchived (top 5 most recent)",
2309 entry.session_id
2310 );
2311 }
2312 for entry in &project_a_entries[5..] {
2313 assert!(
2314 entry.archived,
2315 "Expected {:?} to be archived (older than top 5)",
2316 entry.session_id
2317 );
2318 }
2319
2320 // Project B: all 3 should be unarchived (under the limit)
2321 let project_b_entries: Vec<_> = list
2322 .iter()
2323 .filter(|m| *m.folder_paths() == project_b_paths)
2324 .collect();
2325 assert_eq!(project_b_entries.len(), 3);
2326 assert!(project_b_entries.iter().all(|m| !m.archived));
2327 }
2328
2329 #[gpui::test]
2330 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
2331 init_test(cx);
2332
2333 let fs = FakeFs::new(cx.executor());
2334 let project = Project::test(fs, None::<&Path>, cx).await;
2335 let connection = StubAgentConnection::new();
2336
2337 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2338 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2339
2340 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2341 let session_id = thread.read_with(&vcx, |t, _| t.session_id().clone());
2342 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
2343
2344 // Draft threads no longer create metadata entries.
2345 cx.read(|cx| {
2346 let store = ThreadMetadataStore::global(cx).read(cx);
2347 assert_eq!(store.entry_ids().count(), 0);
2348 });
2349
2350 // Setting a title on an empty thread should be ignored by the
2351 // event handler (entries are empty), so no metadata is created.
2352 thread.update_in(&mut vcx, |thread, _window, cx| {
2353 thread.set_title("Draft Thread".into(), cx).detach();
2354 });
2355 vcx.run_until_parked();
2356
2357 cx.read(|cx| {
2358 let store = ThreadMetadataStore::global(cx).read(cx);
2359 assert_eq!(
2360 store.entry_ids().count(),
2361 0,
2362 "expected title updates on empty thread to not create metadata"
2363 );
2364 });
2365
2366 // Pushing content makes entries non-empty, so the event handler
2367 // should now update metadata with the real session_id.
2368 thread.update_in(&mut vcx, |thread, _window, cx| {
2369 thread.push_user_content_block(None, "Hello".into(), cx);
2370 });
2371 vcx.run_until_parked();
2372
2373 cx.read(|cx| {
2374 let store = ThreadMetadataStore::global(cx).read(cx);
2375 assert_eq!(store.entry_ids().count(), 1);
2376 assert_eq!(
2377 store.entry(thread_id).unwrap().session_id.as_ref(),
2378 Some(&session_id),
2379 );
2380 });
2381 }
2382
2383 #[gpui::test]
2384 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
2385 init_test(cx);
2386
2387 let fs = FakeFs::new(cx.executor());
2388 let project = Project::test(fs, None::<&Path>, cx).await;
2389 let connection = StubAgentConnection::new();
2390
2391 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2392 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2393
2394 let session_id = crate::test_support::active_session_id(&panel, &vcx);
2395 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2396
2397 thread.update_in(&mut vcx, |thread, _window, cx| {
2398 thread.push_user_content_block(None, "Hello".into(), cx);
2399 });
2400 vcx.run_until_parked();
2401
2402 cx.read(|cx| {
2403 let store = ThreadMetadataStore::global(cx).read(cx);
2404 assert_eq!(store.entry_ids().count(), 1);
2405 assert!(store.entry_by_session(&session_id).is_some());
2406 });
2407
2408 // Dropping the panel releases the ConversationView and its thread.
2409 drop(panel);
2410 cx.update(|_| {});
2411 cx.run_until_parked();
2412
2413 cx.read(|cx| {
2414 let store = ThreadMetadataStore::global(cx).read(cx);
2415 assert_eq!(store.entry_ids().count(), 1);
2416 assert!(store.entry_by_session(&session_id).is_some());
2417 });
2418 }
2419
2420 #[gpui::test]
2421 async fn test_threads_without_project_association_are_archived_by_default(
2422 cx: &mut TestAppContext,
2423 ) {
2424 init_test(cx);
2425
2426 let fs = FakeFs::new(cx.executor());
2427 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
2428 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
2429
2430 // Thread in project without worktree
2431 let (panel_no_wt, mut vcx_no_wt) = setup_panel_with_project(project_without_worktree, cx);
2432 crate::test_support::open_thread_with_connection(
2433 &panel_no_wt,
2434 StubAgentConnection::new(),
2435 &mut vcx_no_wt,
2436 );
2437 let thread_no_wt = panel_no_wt.read_with(&vcx_no_wt, |panel, cx| {
2438 panel.active_agent_thread(cx).unwrap()
2439 });
2440 thread_no_wt.update_in(&mut vcx_no_wt, |thread, _window, cx| {
2441 thread.push_user_content_block(None, "content".into(), cx);
2442 thread.set_title("No Project Thread".into(), cx).detach();
2443 });
2444 vcx_no_wt.run_until_parked();
2445 let session_without_worktree =
2446 crate::test_support::active_session_id(&panel_no_wt, &vcx_no_wt);
2447
2448 // Thread in project with worktree
2449 let (panel_wt, mut vcx_wt) = setup_panel_with_project(project_with_worktree, cx);
2450 crate::test_support::open_thread_with_connection(
2451 &panel_wt,
2452 StubAgentConnection::new(),
2453 &mut vcx_wt,
2454 );
2455 let thread_wt =
2456 panel_wt.read_with(&vcx_wt, |panel, cx| panel.active_agent_thread(cx).unwrap());
2457 thread_wt.update_in(&mut vcx_wt, |thread, _window, cx| {
2458 thread.push_user_content_block(None, "content".into(), cx);
2459 thread.set_title("Project Thread".into(), cx).detach();
2460 });
2461 vcx_wt.run_until_parked();
2462 let session_with_worktree = crate::test_support::active_session_id(&panel_wt, &vcx_wt);
2463
2464 cx.update(|cx| {
2465 let store = ThreadMetadataStore::global(cx);
2466 let store = store.read(cx);
2467
2468 let without_worktree = store
2469 .entry_by_session(&session_without_worktree)
2470 .expect("missing metadata for thread without project association");
2471 assert!(without_worktree.folder_paths().is_empty());
2472 assert!(
2473 without_worktree.archived,
2474 "expected thread without project association to be archived"
2475 );
2476
2477 let with_worktree = store
2478 .entry_by_session(&session_with_worktree)
2479 .expect("missing metadata for thread with project association");
2480 assert_eq!(
2481 *with_worktree.folder_paths(),
2482 PathList::new(&[Path::new("/project-a")])
2483 );
2484 assert!(
2485 !with_worktree.archived,
2486 "expected thread with project association to remain unarchived"
2487 );
2488 });
2489 }
2490
2491 #[gpui::test]
2492 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
2493 init_test(cx);
2494
2495 let fs = FakeFs::new(cx.executor());
2496 let project = Project::test(fs, None::<&Path>, cx).await;
2497 let connection = Rc::new(StubAgentConnection::new());
2498
2499 // Create a regular (non-subagent) thread through the panel.
2500 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
2501 crate::test_support::open_thread_with_connection(&panel, (*connection).clone(), &mut vcx);
2502
2503 let regular_thread =
2504 panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2505 let regular_session_id = regular_thread.read_with(&vcx, |t, _| t.session_id().clone());
2506
2507 regular_thread.update_in(&mut vcx, |thread, _window, cx| {
2508 thread.push_user_content_block(None, "content".into(), cx);
2509 thread.set_title("Regular Thread".into(), cx).detach();
2510 });
2511 vcx.run_until_parked();
2512
2513 // Create a standalone subagent AcpThread (not wrapped in a
2514 // ConversationView). The ThreadMetadataStore only observes
2515 // ConversationView events, so this thread's events should
2516 // have no effect on sidebar metadata.
2517 let subagent_session_id = acp::SessionId::new("subagent-session");
2518 let subagent_thread = cx.update(|cx| {
2519 let action_log = cx.new(|_| ActionLog::new(project.clone()));
2520 cx.new(|cx| {
2521 acp_thread::AcpThread::new(
2522 Some(regular_session_id.clone()),
2523 Some("Subagent Thread".into()),
2524 None,
2525 connection.clone(),
2526 project.clone(),
2527 action_log,
2528 subagent_session_id.clone(),
2529 watch::Receiver::constant(acp::PromptCapabilities::new()),
2530 cx,
2531 )
2532 })
2533 });
2534
2535 cx.update(|cx| {
2536 subagent_thread.update(cx, |thread, cx| {
2537 thread
2538 .set_title("Subagent Thread Title".into(), cx)
2539 .detach();
2540 });
2541 });
2542 cx.run_until_parked();
2543
2544 // Only the regular thread should appear in sidebar metadata.
2545 // The subagent thread is excluded because the metadata store
2546 // only observes ConversationView events.
2547 let list = cx.update(|cx| {
2548 let store = ThreadMetadataStore::global(cx);
2549 store.read(cx).entries().cloned().collect::<Vec<_>>()
2550 });
2551
2552 assert_eq!(
2553 list.len(),
2554 1,
2555 "Expected only the regular thread in sidebar metadata, \
2556 but found {} entries (subagent threads are leaking into the sidebar)",
2557 list.len(),
2558 );
2559 assert_eq!(list[0].session_id.as_ref().unwrap(), ®ular_session_id);
2560 assert_eq!(list[0].display_title(), "Regular Thread");
2561 }
2562
2563 #[test]
2564 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
2565 let now = Utc::now();
2566
2567 let meta = make_metadata("session-1", "First Thread", now, PathList::default());
2568 let thread_id = meta.thread_id;
2569 let operations = vec![DbOperation::Upsert(meta), DbOperation::Delete(thread_id)];
2570
2571 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
2572
2573 assert_eq!(deduped.len(), 1);
2574 assert_eq!(deduped[0], DbOperation::Delete(thread_id));
2575 }
2576
2577 #[test]
2578 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
2579 let now = Utc::now();
2580 let later = now + chrono::Duration::seconds(1);
2581
2582 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
2583 let shared_thread_id = old_metadata.thread_id;
2584 let new_metadata = ThreadMetadata {
2585 thread_id: shared_thread_id,
2586 ..make_metadata("session-1", "New Title", later, PathList::default())
2587 };
2588
2589 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2590 DbOperation::Upsert(old_metadata),
2591 DbOperation::Upsert(new_metadata.clone()),
2592 ]);
2593
2594 assert_eq!(deduped.len(), 1);
2595 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
2596 }
2597
2598 #[test]
2599 fn test_dedup_db_operations_preserves_distinct_sessions() {
2600 let now = Utc::now();
2601
2602 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
2603 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
2604 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2605 DbOperation::Upsert(metadata1.clone()),
2606 DbOperation::Upsert(metadata2.clone()),
2607 ]);
2608
2609 assert_eq!(deduped.len(), 2);
2610 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
2611 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
2612 }
2613
2614 #[gpui::test]
2615 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
2616 init_test(cx);
2617
2618 let paths = PathList::new(&[Path::new("/project-a")]);
2619 let now = Utc::now();
2620 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2621 let thread_id = metadata.thread_id;
2622
2623 cx.update(|cx| {
2624 let store = ThreadMetadataStore::global(cx);
2625 store.update(cx, |store, cx| {
2626 store.save(metadata, cx);
2627 });
2628 });
2629
2630 cx.run_until_parked();
2631
2632 cx.update(|cx| {
2633 let store = ThreadMetadataStore::global(cx);
2634 let store = store.read(cx);
2635
2636 let path_entries: Vec<_> = store
2637 .entries_for_path(&paths, None)
2638 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2639 .collect();
2640 assert_eq!(path_entries, vec!["session-1"]);
2641
2642 assert_eq!(store.archived_entries().count(), 0);
2643 });
2644
2645 cx.update(|cx| {
2646 let store = ThreadMetadataStore::global(cx);
2647 store.update(cx, |store, cx| {
2648 store.archive(thread_id, None, cx);
2649 });
2650 });
2651
2652 // Thread 1 should now be archived
2653 cx.run_until_parked();
2654
2655 cx.update(|cx| {
2656 let store = ThreadMetadataStore::global(cx);
2657 let store = store.read(cx);
2658
2659 let path_entries: Vec<_> = store
2660 .entries_for_path(&paths, None)
2661 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2662 .collect();
2663 assert!(path_entries.is_empty());
2664
2665 let archived: Vec<_> = store.archived_entries().collect();
2666 assert_eq!(archived.len(), 1);
2667 assert_eq!(
2668 archived[0].session_id.as_ref().unwrap().0.as_ref(),
2669 "session-1"
2670 );
2671 assert!(archived[0].archived);
2672 });
2673
2674 cx.update(|cx| {
2675 let store = ThreadMetadataStore::global(cx);
2676 store.update(cx, |store, cx| {
2677 store.unarchive(thread_id, cx);
2678 });
2679 });
2680
2681 cx.run_until_parked();
2682
2683 cx.update(|cx| {
2684 let store = ThreadMetadataStore::global(cx);
2685 let store = store.read(cx);
2686
2687 let path_entries: Vec<_> = store
2688 .entries_for_path(&paths, None)
2689 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2690 .collect();
2691 assert_eq!(path_entries, vec!["session-1"]);
2692
2693 assert_eq!(store.archived_entries().count(), 0);
2694 });
2695 }
2696
2697 #[gpui::test]
2698 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2699 init_test(cx);
2700
2701 let paths = PathList::new(&[Path::new("/project-a")]);
2702 let now = Utc::now();
2703
2704 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2705 let metadata2 = make_metadata(
2706 "session-2",
2707 "Archived Thread",
2708 now - chrono::Duration::seconds(1),
2709 paths.clone(),
2710 );
2711 let session2_thread_id = metadata2.thread_id;
2712
2713 cx.update(|cx| {
2714 let store = ThreadMetadataStore::global(cx);
2715 store.update(cx, |store, cx| {
2716 store.save(metadata1, cx);
2717 store.save(metadata2, cx);
2718 });
2719 });
2720
2721 cx.run_until_parked();
2722
2723 cx.update(|cx| {
2724 let store = ThreadMetadataStore::global(cx);
2725 store.update(cx, |store, cx| {
2726 store.archive(session2_thread_id, None, cx);
2727 });
2728 });
2729
2730 cx.run_until_parked();
2731
2732 cx.update(|cx| {
2733 let store = ThreadMetadataStore::global(cx);
2734 let store = store.read(cx);
2735
2736 let path_entries: Vec<_> = store
2737 .entries_for_path(&paths, None)
2738 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2739 .collect();
2740 assert_eq!(path_entries, vec!["session-1"]);
2741
2742 assert_eq!(store.entries().count(), 2);
2743
2744 let archived: Vec<_> = store
2745 .archived_entries()
2746 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2747 .collect();
2748 assert_eq!(archived, vec!["session-2"]);
2749 });
2750 }
2751
2752 #[gpui::test]
2753 async fn test_entries_filter_by_remote_connection(cx: &mut TestAppContext) {
2754 init_test(cx);
2755
2756 let main_paths = PathList::new(&[Path::new("/project-a")]);
2757 let linked_paths = PathList::new(&[Path::new("/wt-feature")]);
2758 let now = Utc::now();
2759
2760 let remote_a = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 1 });
2761 let remote_b = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 2 });
2762
2763 // Three threads at the same folder_paths but different hosts.
2764 let local_thread = make_metadata("local-session", "Local Thread", now, main_paths.clone());
2765
2766 let mut remote_a_thread = make_metadata(
2767 "remote-a-session",
2768 "Remote A Thread",
2769 now - chrono::Duration::seconds(1),
2770 main_paths.clone(),
2771 );
2772 remote_a_thread.remote_connection = Some(remote_a.clone());
2773
2774 let mut remote_b_thread = make_metadata(
2775 "remote-b-session",
2776 "Remote B Thread",
2777 now - chrono::Duration::seconds(2),
2778 main_paths.clone(),
2779 );
2780 remote_b_thread.remote_connection = Some(remote_b.clone());
2781
2782 let linked_worktree_paths =
2783 WorktreePaths::from_path_lists(main_paths.clone(), linked_paths).unwrap();
2784
2785 let local_linked_thread = ThreadMetadata {
2786 thread_id: ThreadId::new(),
2787 archived: false,
2788 session_id: Some(acp::SessionId::new("local-linked")),
2789 agent_id: agent::ZED_AGENT_ID.clone(),
2790 title: Some("Local Linked".into()),
2791 updated_at: now,
2792 created_at: Some(now),
2793 interacted_at: None,
2794 worktree_paths: linked_worktree_paths.clone(),
2795 remote_connection: None,
2796 };
2797
2798 let remote_linked_thread = ThreadMetadata {
2799 thread_id: ThreadId::new(),
2800 archived: false,
2801 session_id: Some(acp::SessionId::new("remote-linked")),
2802 agent_id: agent::ZED_AGENT_ID.clone(),
2803 title: Some("Remote Linked".into()),
2804 updated_at: now - chrono::Duration::seconds(1),
2805 created_at: Some(now - chrono::Duration::seconds(1)),
2806 interacted_at: None,
2807 worktree_paths: linked_worktree_paths,
2808 remote_connection: Some(remote_a.clone()),
2809 };
2810
2811 cx.update(|cx| {
2812 let store = ThreadMetadataStore::global(cx);
2813 store.update(cx, |store, cx| {
2814 store.save(local_thread, cx);
2815 store.save(remote_a_thread, cx);
2816 store.save(remote_b_thread, cx);
2817 store.save(local_linked_thread, cx);
2818 store.save(remote_linked_thread, cx);
2819 });
2820 });
2821 cx.run_until_parked();
2822
2823 cx.update(|cx| {
2824 let store = ThreadMetadataStore::global(cx);
2825 let store = store.read(cx);
2826
2827 let local_entries: Vec<_> = store
2828 .entries_for_path(&main_paths, None)
2829 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2830 .collect();
2831 assert_eq!(local_entries, vec!["local-session"]);
2832
2833 let remote_a_entries: Vec<_> = store
2834 .entries_for_path(&main_paths, Some(&remote_a))
2835 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2836 .collect();
2837 assert_eq!(remote_a_entries, vec!["remote-a-session"]);
2838
2839 let remote_b_entries: Vec<_> = store
2840 .entries_for_path(&main_paths, Some(&remote_b))
2841 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2842 .collect();
2843 assert_eq!(remote_b_entries, vec!["remote-b-session"]);
2844
2845 let mut local_main_entries: Vec<_> = store
2846 .entries_for_main_worktree_path(&main_paths, None)
2847 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2848 .collect();
2849 local_main_entries.sort();
2850 assert_eq!(local_main_entries, vec!["local-linked", "local-session"]);
2851
2852 let mut remote_main_entries: Vec<_> = store
2853 .entries_for_main_worktree_path(&main_paths, Some(&remote_a))
2854 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2855 .collect();
2856 remote_main_entries.sort();
2857 assert_eq!(
2858 remote_main_entries,
2859 vec!["remote-a-session", "remote-linked"]
2860 );
2861 });
2862 }
2863
2864 #[gpui::test]
2865 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2866 init_test(cx);
2867
2868 let paths = PathList::new(&[Path::new("/project-a")]);
2869 let now = Utc::now();
2870
2871 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2872 let m2 = make_metadata(
2873 "session-2",
2874 "Thread Two",
2875 now - chrono::Duration::seconds(1),
2876 paths.clone(),
2877 );
2878 let m3 = make_metadata(
2879 "session-3",
2880 "Thread Three",
2881 now - chrono::Duration::seconds(2),
2882 paths,
2883 );
2884
2885 cx.update(|cx| {
2886 let store = ThreadMetadataStore::global(cx);
2887 store.update(cx, |store, cx| {
2888 store.save_all(vec![m1, m2, m3], cx);
2889 });
2890 });
2891
2892 cx.run_until_parked();
2893
2894 cx.update(|cx| {
2895 let store = ThreadMetadataStore::global(cx);
2896 let store = store.read(cx);
2897
2898 assert_eq!(store.entries().count(), 3);
2899 assert!(
2900 store
2901 .entry_by_session(&acp::SessionId::new("session-1"))
2902 .is_some()
2903 );
2904 assert!(
2905 store
2906 .entry_by_session(&acp::SessionId::new("session-2"))
2907 .is_some()
2908 );
2909 assert!(
2910 store
2911 .entry_by_session(&acp::SessionId::new("session-3"))
2912 .is_some()
2913 );
2914
2915 assert_eq!(store.entry_ids().count(), 3);
2916 });
2917 }
2918
2919 #[gpui::test]
2920 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2921 init_test(cx);
2922
2923 let paths = PathList::new(&[Path::new("/project-a")]);
2924 let now = Utc::now();
2925 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2926 let thread_id = metadata.thread_id;
2927
2928 cx.update(|cx| {
2929 let store = ThreadMetadataStore::global(cx);
2930 store.update(cx, |store, cx| {
2931 store.save(metadata, cx);
2932 });
2933 });
2934
2935 cx.run_until_parked();
2936
2937 cx.update(|cx| {
2938 let store = ThreadMetadataStore::global(cx);
2939 store.update(cx, |store, cx| {
2940 store.archive(thread_id, None, cx);
2941 });
2942 });
2943
2944 cx.run_until_parked();
2945
2946 cx.update(|cx| {
2947 let store = ThreadMetadataStore::global(cx);
2948 store.update(cx, |store, cx| {
2949 let _ = store.reload(cx);
2950 });
2951 });
2952
2953 cx.run_until_parked();
2954
2955 cx.update(|cx| {
2956 let store = ThreadMetadataStore::global(cx);
2957 let store = store.read(cx);
2958
2959 let thread = store
2960 .entry_by_session(&acp::SessionId::new("session-1"))
2961 .expect("thread should exist after reload");
2962 assert!(thread.archived);
2963
2964 let path_entries: Vec<_> = store
2965 .entries_for_path(&paths, None)
2966 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2967 .collect();
2968 assert!(path_entries.is_empty());
2969
2970 let archived: Vec<_> = store
2971 .archived_entries()
2972 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2973 .collect();
2974 assert_eq!(archived, vec!["session-1"]);
2975 });
2976 }
2977
2978 #[gpui::test]
2979 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2980 init_test(cx);
2981
2982 cx.run_until_parked();
2983
2984 cx.update(|cx| {
2985 let store = ThreadMetadataStore::global(cx);
2986 store.update(cx, |store, cx| {
2987 store.archive(ThreadId::new(), None, cx);
2988 });
2989 });
2990
2991 cx.run_until_parked();
2992
2993 cx.update(|cx| {
2994 let store = ThreadMetadataStore::global(cx);
2995 let store = store.read(cx);
2996
2997 assert!(store.is_empty());
2998 assert_eq!(store.entries().count(), 0);
2999 assert_eq!(store.archived_entries().count(), 0);
3000 });
3001 }
3002
3003 #[gpui::test]
3004 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
3005 init_test(cx);
3006
3007 let paths = PathList::new(&[Path::new("/project-a")]);
3008 let now = Utc::now();
3009 let metadata = make_metadata("session-1", "Thread 1", now, paths);
3010 let thread_id = metadata.thread_id;
3011
3012 cx.update(|cx| {
3013 let store = ThreadMetadataStore::global(cx);
3014 store.update(cx, |store, cx| {
3015 store.save(metadata.clone(), cx);
3016 store.archive(thread_id, None, cx);
3017 });
3018 });
3019
3020 cx.run_until_parked();
3021
3022 cx.update(|cx| {
3023 let store = ThreadMetadataStore::global(cx);
3024 let store = store.read(cx);
3025
3026 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
3027 pretty_assertions::assert_eq!(
3028 entries,
3029 vec![ThreadMetadata {
3030 archived: true,
3031 ..metadata
3032 }]
3033 );
3034 });
3035 }
3036
3037 #[gpui::test]
3038 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
3039 init_test(cx);
3040 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3041
3042 let id = store
3043 .read_with(cx, |store, cx| {
3044 store.create_archived_worktree(
3045 "/tmp/worktree".to_string(),
3046 "/home/user/repo".to_string(),
3047 Some("feature-branch".to_string()),
3048 "staged_aaa".to_string(),
3049 "unstaged_bbb".to_string(),
3050 "original_000".to_string(),
3051 cx,
3052 )
3053 })
3054 .await
3055 .unwrap();
3056
3057 let thread_id_1 = ThreadId::new();
3058
3059 store
3060 .read_with(cx, |store, cx| {
3061 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3062 })
3063 .await
3064 .unwrap();
3065
3066 let worktrees = store
3067 .read_with(cx, |store, cx| {
3068 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3069 })
3070 .await
3071 .unwrap();
3072
3073 assert_eq!(worktrees.len(), 1);
3074 let wt = &worktrees[0];
3075 assert_eq!(wt.id, id);
3076 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
3077 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
3078 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
3079 assert_eq!(wt.staged_commit_hash, "staged_aaa");
3080 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
3081 assert_eq!(wt.original_commit_hash, "original_000");
3082 }
3083
3084 #[gpui::test]
3085 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
3086 init_test(cx);
3087 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3088
3089 let id = store
3090 .read_with(cx, |store, cx| {
3091 store.create_archived_worktree(
3092 "/tmp/worktree".to_string(),
3093 "/home/user/repo".to_string(),
3094 Some("main".to_string()),
3095 "deadbeef".to_string(),
3096 "deadbeef".to_string(),
3097 "original_000".to_string(),
3098 cx,
3099 )
3100 })
3101 .await
3102 .unwrap();
3103
3104 let thread_id_1 = ThreadId::new();
3105
3106 store
3107 .read_with(cx, |store, cx| {
3108 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3109 })
3110 .await
3111 .unwrap();
3112
3113 store
3114 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
3115 .await
3116 .unwrap();
3117
3118 let worktrees = store
3119 .read_with(cx, |store, cx| {
3120 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3121 })
3122 .await
3123 .unwrap();
3124 assert!(worktrees.is_empty());
3125 }
3126
3127 #[gpui::test]
3128 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
3129 init_test(cx);
3130 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3131
3132 let id = store
3133 .read_with(cx, |store, cx| {
3134 store.create_archived_worktree(
3135 "/tmp/worktree".to_string(),
3136 "/home/user/repo".to_string(),
3137 None,
3138 "abc123".to_string(),
3139 "abc123".to_string(),
3140 "original_000".to_string(),
3141 cx,
3142 )
3143 })
3144 .await
3145 .unwrap();
3146
3147 let thread_id_1 = ThreadId::new();
3148 let thread_id_2 = ThreadId::new();
3149
3150 store
3151 .read_with(cx, |store, cx| {
3152 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3153 })
3154 .await
3155 .unwrap();
3156
3157 store
3158 .read_with(cx, |store, cx| {
3159 store.link_thread_to_archived_worktree(thread_id_2, id, cx)
3160 })
3161 .await
3162 .unwrap();
3163
3164 let wt1 = store
3165 .read_with(cx, |store, cx| {
3166 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3167 })
3168 .await
3169 .unwrap();
3170
3171 let wt2 = store
3172 .read_with(cx, |store, cx| {
3173 store.get_archived_worktrees_for_thread(thread_id_2, cx)
3174 })
3175 .await
3176 .unwrap();
3177
3178 assert_eq!(wt1.len(), 1);
3179 assert_eq!(wt2.len(), 1);
3180 assert_eq!(wt1[0].id, wt2[0].id);
3181 }
3182
3183 #[gpui::test]
3184 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
3185 init_test(cx);
3186 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3187
3188 let original_paths = PathList::new(&[
3189 Path::new("/projects/worktree-a"),
3190 Path::new("/projects/worktree-b"),
3191 Path::new("/other/unrelated"),
3192 ]);
3193 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3194 let thread_id = meta.thread_id;
3195
3196 store.update(cx, |store, cx| {
3197 store.save(meta, cx);
3198 });
3199
3200 let replacements = vec![
3201 (
3202 PathBuf::from("/projects/worktree-a"),
3203 PathBuf::from("/restored/worktree-a"),
3204 ),
3205 (
3206 PathBuf::from("/projects/worktree-b"),
3207 PathBuf::from("/restored/worktree-b"),
3208 ),
3209 ];
3210
3211 store.update(cx, |store, cx| {
3212 store.complete_worktree_restore(thread_id, &replacements, cx);
3213 });
3214
3215 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3216 let entry = entry.unwrap();
3217 let paths = entry.folder_paths().paths();
3218 assert_eq!(paths.len(), 3);
3219 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3220 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3221 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3222 }
3223
3224 #[gpui::test]
3225 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
3226 init_test(cx);
3227 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3228
3229 let original_paths =
3230 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3231 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3232 let thread_id = meta.thread_id;
3233
3234 store.update(cx, |store, cx| {
3235 store.save(meta, cx);
3236 });
3237
3238 let replacements = vec![
3239 (
3240 PathBuf::from("/projects/worktree-a"),
3241 PathBuf::from("/new/worktree-a"),
3242 ),
3243 (
3244 PathBuf::from("/nonexistent/path"),
3245 PathBuf::from("/should/not/appear"),
3246 ),
3247 ];
3248
3249 store.update(cx, |store, cx| {
3250 store.complete_worktree_restore(thread_id, &replacements, cx);
3251 });
3252
3253 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3254 let entry = entry.unwrap();
3255 let paths = entry.folder_paths().paths();
3256 assert_eq!(paths.len(), 2);
3257 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3258 assert!(paths.contains(&PathBuf::from("/other/path")));
3259 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3260 }
3261
3262 #[gpui::test]
3263 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
3264 init_test(cx);
3265 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3266
3267 let original_paths = PathList::new(&[
3268 Path::new("/projects/worktree-a"),
3269 Path::new("/projects/worktree-b"),
3270 Path::new("/other/unrelated"),
3271 ]);
3272 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3273 let thread_id = meta.thread_id;
3274
3275 store.update(cx, |store, cx| {
3276 store.save(meta, cx);
3277 });
3278
3279 let replacements = vec![
3280 (
3281 PathBuf::from("/projects/worktree-a"),
3282 PathBuf::from("/restored/worktree-a"),
3283 ),
3284 (
3285 PathBuf::from("/projects/worktree-b"),
3286 PathBuf::from("/restored/worktree-b"),
3287 ),
3288 ];
3289
3290 store.update(cx, |store, cx| {
3291 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3292 });
3293
3294 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3295 let entry = entry.unwrap();
3296 let paths = entry.folder_paths().paths();
3297 assert_eq!(paths.len(), 3);
3298 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3299 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3300 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3301 }
3302
3303 #[gpui::test]
3304 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
3305 init_test(cx);
3306 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3307
3308 let original_paths =
3309 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3310 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3311 let thread_id = meta.thread_id;
3312
3313 store.update(cx, |store, cx| {
3314 store.save(meta, cx);
3315 });
3316
3317 let replacements = vec![
3318 (
3319 PathBuf::from("/projects/worktree-a"),
3320 PathBuf::from("/new/worktree-a"),
3321 ),
3322 (
3323 PathBuf::from("/nonexistent/path"),
3324 PathBuf::from("/should/not/appear"),
3325 ),
3326 ];
3327
3328 store.update(cx, |store, cx| {
3329 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3330 });
3331
3332 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3333 let entry = entry.unwrap();
3334 let paths = entry.folder_paths().paths();
3335 assert_eq!(paths.len(), 2);
3336 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3337 assert!(paths.contains(&PathBuf::from("/other/path")));
3338 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3339 }
3340
3341 #[gpui::test]
3342 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
3343 init_test(cx);
3344 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3345
3346 let id1 = store
3347 .read_with(cx, |store, cx| {
3348 store.create_archived_worktree(
3349 "/projects/worktree-a".to_string(),
3350 "/home/user/repo".to_string(),
3351 Some("branch-a".to_string()),
3352 "staged_a".to_string(),
3353 "unstaged_a".to_string(),
3354 "original_000".to_string(),
3355 cx,
3356 )
3357 })
3358 .await
3359 .unwrap();
3360
3361 let id2 = store
3362 .read_with(cx, |store, cx| {
3363 store.create_archived_worktree(
3364 "/projects/worktree-b".to_string(),
3365 "/home/user/repo".to_string(),
3366 Some("branch-b".to_string()),
3367 "staged_b".to_string(),
3368 "unstaged_b".to_string(),
3369 "original_000".to_string(),
3370 cx,
3371 )
3372 })
3373 .await
3374 .unwrap();
3375
3376 let thread_id_1 = ThreadId::new();
3377
3378 store
3379 .read_with(cx, |store, cx| {
3380 store.link_thread_to_archived_worktree(thread_id_1, id1, cx)
3381 })
3382 .await
3383 .unwrap();
3384
3385 store
3386 .read_with(cx, |store, cx| {
3387 store.link_thread_to_archived_worktree(thread_id_1, id2, cx)
3388 })
3389 .await
3390 .unwrap();
3391
3392 let worktrees = store
3393 .read_with(cx, |store, cx| {
3394 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3395 })
3396 .await
3397 .unwrap();
3398
3399 assert_eq!(worktrees.len(), 2);
3400
3401 let paths: Vec<&Path> = worktrees
3402 .iter()
3403 .map(|w| w.worktree_path.as_path())
3404 .collect();
3405 assert!(paths.contains(&Path::new("/projects/worktree-a")));
3406 assert!(paths.contains(&Path::new("/projects/worktree-b")));
3407 }
3408
3409 // ── Migration tests ────────────────────────────────────────────────
3410
3411 #[test]
3412 fn test_thread_id_primary_key_migration_backfills_null_thread_ids() {
3413 use db::sqlez::connection::Connection;
3414
3415 let connection =
3416 Connection::open_memory(Some("test_thread_id_pk_migration_backfills_nulls"));
3417
3418 // Run migrations 0-6 (the old schema, before the thread_id PK migration).
3419 let old_migrations: &[&str] = &ThreadMetadataDb::MIGRATIONS[..7];
3420 connection
3421 .migrate(ThreadMetadataDb::NAME, old_migrations, &mut |_, _, _| false)
3422 .expect("old migrations should succeed");
3423
3424 // Insert rows: one with a thread_id, two without.
3425 connection
3426 .exec(
3427 "INSERT INTO sidebar_threads \
3428 (session_id, title, updated_at, thread_id) \
3429 VALUES ('has-tid', 'Has ThreadId', '2025-01-01T00:00:00Z', X'0102030405060708090A0B0C0D0E0F10')",
3430 )
3431 .unwrap()()
3432 .unwrap();
3433 connection
3434 .exec(
3435 "INSERT INTO sidebar_threads \
3436 (session_id, title, updated_at) \
3437 VALUES ('no-tid-1', 'No ThreadId 1', '2025-01-02T00:00:00Z')",
3438 )
3439 .unwrap()()
3440 .unwrap();
3441 connection
3442 .exec(
3443 "INSERT INTO sidebar_threads \
3444 (session_id, title, updated_at) \
3445 VALUES ('no-tid-2', 'No ThreadId 2', '2025-01-03T00:00:00Z')",
3446 )
3447 .unwrap()()
3448 .unwrap();
3449
3450 // Set up archived_git_worktrees + thread_archived_worktrees rows
3451 // referencing the session without a thread_id.
3452 connection
3453 .exec(
3454 "INSERT INTO archived_git_worktrees \
3455 (id, worktree_path, main_repo_path, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
3456 VALUES (1, '/wt', '/main', 'abc', 'def', '000')",
3457 )
3458 .unwrap()()
3459 .unwrap();
3460 connection
3461 .exec(
3462 "INSERT INTO thread_archived_worktrees \
3463 (session_id, archived_worktree_id) \
3464 VALUES ('no-tid-1', 1)",
3465 )
3466 .unwrap()()
3467 .unwrap();
3468
3469 // Run all current migrations. sqlez skips the already-applied ones and
3470 // runs the remaining migrations.
3471 run_thread_metadata_migrations(&connection);
3472
3473 // All 3 rows should survive with non-NULL thread_ids.
3474 let count: i64 = connection
3475 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM sidebar_threads")
3476 .unwrap()(())
3477 .unwrap()
3478 .unwrap();
3479 assert_eq!(count, 3, "all 3 rows should survive the migration");
3480
3481 let null_count: i64 = connection
3482 .select_row_bound::<(), i64>(
3483 "SELECT COUNT(*) FROM sidebar_threads WHERE thread_id IS NULL",
3484 )
3485 .unwrap()(())
3486 .unwrap()
3487 .unwrap();
3488 assert_eq!(
3489 null_count, 0,
3490 "no rows should have NULL thread_id after migration"
3491 );
3492
3493 // The row that already had a thread_id should keep its original value.
3494 let original_tid: Vec<u8> = connection
3495 .select_row_bound::<&str, Vec<u8>>(
3496 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3497 )
3498 .unwrap()("has-tid")
3499 .unwrap()
3500 .unwrap();
3501 assert_eq!(
3502 original_tid,
3503 vec![
3504 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
3505 0x0F, 0x10
3506 ],
3507 "pre-existing thread_id should be preserved"
3508 );
3509
3510 // The two rows that had NULL thread_ids should now have distinct non-empty blobs.
3511 let generated_tid_1: Vec<u8> = connection
3512 .select_row_bound::<&str, Vec<u8>>(
3513 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3514 )
3515 .unwrap()("no-tid-1")
3516 .unwrap()
3517 .unwrap();
3518 let generated_tid_2: Vec<u8> = connection
3519 .select_row_bound::<&str, Vec<u8>>(
3520 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3521 )
3522 .unwrap()("no-tid-2")
3523 .unwrap()
3524 .unwrap();
3525 assert_eq!(
3526 generated_tid_1.len(),
3527 16,
3528 "generated thread_id should be 16 bytes"
3529 );
3530 assert_eq!(
3531 generated_tid_2.len(),
3532 16,
3533 "generated thread_id should be 16 bytes"
3534 );
3535 assert_ne!(
3536 generated_tid_1, generated_tid_2,
3537 "each generated thread_id should be unique"
3538 );
3539
3540 // The thread_archived_worktrees join row should have migrated
3541 // using the backfilled thread_id from the session without a
3542 // pre-existing thread_id.
3543 let archived_count: i64 = connection
3544 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM thread_archived_worktrees")
3545 .unwrap()(())
3546 .unwrap()
3547 .unwrap();
3548 assert_eq!(
3549 archived_count, 1,
3550 "thread_archived_worktrees row should survive migration"
3551 );
3552
3553 // The thread_archived_worktrees row should reference the
3554 // backfilled thread_id of the 'no-tid-1' session.
3555 let archived_tid: Vec<u8> = connection
3556 .select_row_bound::<(), Vec<u8>>(
3557 "SELECT thread_id FROM thread_archived_worktrees LIMIT 1",
3558 )
3559 .unwrap()(())
3560 .unwrap()
3561 .unwrap();
3562 assert_eq!(
3563 archived_tid, generated_tid_1,
3564 "thread_archived_worktrees should reference the backfilled thread_id"
3565 );
3566 }
3567
3568 // ── ThreadWorktreePaths tests ──────────────────────────────────────
3569
3570 /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs.
3571 fn make_worktree_paths(pairs: &[(&str, &str)]) -> WorktreePaths {
3572 let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs
3573 .iter()
3574 .map(|(m, f)| (Path::new(*m), Path::new(*f)))
3575 .unzip();
3576 WorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders)).unwrap()
3577 }
3578
3579 #[test]
3580 fn test_thread_worktree_paths_full_add_then_remove_cycle() {
3581 // Full scenario from the issue:
3582 // 1. Start with linked worktree selectric → zed
3583 // 2. Add cloud
3584 // 3. Remove zed
3585
3586 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3587
3588 // Step 2: add cloud
3589 paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud"));
3590
3591 assert_eq!(paths.ordered_pairs().count(), 2);
3592 assert_eq!(
3593 paths.folder_path_list(),
3594 &PathList::new(&[
3595 Path::new("/worktrees/selectric/zed"),
3596 Path::new("/projects/cloud"),
3597 ])
3598 );
3599 assert_eq!(
3600 paths.main_worktree_path_list(),
3601 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),])
3602 );
3603
3604 // Step 3: remove zed
3605 paths.remove_main_path(Path::new("/projects/zed"));
3606
3607 assert_eq!(paths.ordered_pairs().count(), 1);
3608 assert_eq!(
3609 paths.folder_path_list(),
3610 &PathList::new(&[Path::new("/projects/cloud")])
3611 );
3612 assert_eq!(
3613 paths.main_worktree_path_list(),
3614 &PathList::new(&[Path::new("/projects/cloud")])
3615 );
3616 }
3617
3618 #[test]
3619 fn test_thread_worktree_paths_add_is_idempotent() {
3620 let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]);
3621
3622 paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed"));
3623
3624 assert_eq!(paths.ordered_pairs().count(), 1);
3625 }
3626
3627 #[test]
3628 fn test_thread_worktree_paths_remove_nonexistent_is_noop() {
3629 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3630
3631 paths.remove_main_path(Path::new("/projects/nonexistent"));
3632
3633 assert_eq!(paths.ordered_pairs().count(), 1);
3634 }
3635
3636 #[test]
3637 fn test_thread_worktree_paths_from_path_lists_preserves_association() {
3638 let folder = PathList::new(&[
3639 Path::new("/worktrees/selectric/zed"),
3640 Path::new("/projects/cloud"),
3641 ]);
3642 let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]);
3643
3644 let paths = WorktreePaths::from_path_lists(main, folder).unwrap();
3645
3646 let pairs: Vec<_> = paths
3647 .ordered_pairs()
3648 .map(|(m, f)| (m.clone(), f.clone()))
3649 .collect();
3650 assert_eq!(pairs.len(), 2);
3651 assert!(pairs.contains(&(
3652 PathBuf::from("/projects/zed"),
3653 PathBuf::from("/worktrees/selectric/zed")
3654 )));
3655 assert!(pairs.contains(&(
3656 PathBuf::from("/projects/cloud"),
3657 PathBuf::from("/projects/cloud")
3658 )));
3659 }
3660
3661 #[test]
3662 fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() {
3663 // Two linked worktrees of the same main repo: the main_worktree_path_list
3664 // deduplicates because PathList stores unique sorted paths, but
3665 // ordered_pairs still has both entries.
3666 let paths = make_worktree_paths(&[
3667 ("/projects/zed", "/worktrees/selectric/zed"),
3668 ("/projects/zed", "/worktrees/feature/zed"),
3669 ]);
3670
3671 // main_worktree_path_list has the duplicate main path twice
3672 // (PathList keeps all entries from its input)
3673 assert_eq!(paths.ordered_pairs().count(), 2);
3674 assert_eq!(
3675 paths.folder_path_list(),
3676 &PathList::new(&[
3677 Path::new("/worktrees/selectric/zed"),
3678 Path::new("/worktrees/feature/zed"),
3679 ])
3680 );
3681 assert_eq!(
3682 paths.main_worktree_path_list(),
3683 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),])
3684 );
3685 }
3686
3687 #[test]
3688 fn test_thread_worktree_paths_mismatched_lengths_returns_error() {
3689 let folder = PathList::new(&[
3690 Path::new("/worktrees/selectric/zed"),
3691 Path::new("/projects/cloud"),
3692 ]);
3693 let main = PathList::new(&[Path::new("/projects/zed")]);
3694
3695 let result = WorktreePaths::from_path_lists(main, folder);
3696 assert!(result.is_err());
3697 }
3698
3699 /// Regression test: archiving a thread created in a git worktree must
3700 /// preserve the thread's folder paths so that restoring it later does
3701 /// not prompt the user to re-associate a project.
3702 #[gpui::test]
3703 async fn test_archived_thread_retains_paths_after_worktree_removal(cx: &mut TestAppContext) {
3704 init_test(cx);
3705
3706 let fs = FakeFs::new(cx.executor());
3707 fs.insert_tree(
3708 "/worktrees/feature",
3709 serde_json::json!({ "src": { "main.rs": "" } }),
3710 )
3711 .await;
3712 let project = Project::test(fs, [Path::new("/worktrees/feature")], cx).await;
3713 let connection = StubAgentConnection::new();
3714
3715 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3716 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
3717
3718 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3719 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3720
3721 // Push content so the event handler saves metadata with the
3722 // project's worktree paths.
3723 thread.update_in(&mut vcx, |thread, _window, cx| {
3724 thread.push_user_content_block(None, "Hello".into(), cx);
3725 });
3726 vcx.run_until_parked();
3727
3728 // Verify paths were saved correctly.
3729 let (folder_paths_before, main_paths_before) = cx.read(|cx| {
3730 let store = ThreadMetadataStore::global(cx).read(cx);
3731 let entry = store.entry(thread_id).unwrap();
3732 assert!(
3733 !entry.folder_paths().is_empty(),
3734 "thread should have folder paths before archiving"
3735 );
3736 (
3737 entry.folder_paths().clone(),
3738 entry.main_worktree_paths().clone(),
3739 )
3740 });
3741
3742 // Archive the thread.
3743 cx.update(|cx| {
3744 ThreadMetadataStore::global(cx).update(cx, |store, cx| {
3745 store.archive(thread_id, None, cx);
3746 });
3747 });
3748 cx.run_until_parked();
3749
3750 // Remove the worktree from the project, simulating what the
3751 // archive flow does for linked git worktrees.
3752 let worktree_id = cx.update(|cx| {
3753 project
3754 .read(cx)
3755 .visible_worktrees(cx)
3756 .next()
3757 .unwrap()
3758 .read(cx)
3759 .id()
3760 });
3761 project.update(cx, |project, cx| {
3762 project.remove_worktree(worktree_id, cx);
3763 });
3764 cx.run_until_parked();
3765
3766 // Trigger a thread event after archiving + worktree removal.
3767 // In production this happens when an async title-generation task
3768 // completes after the thread was archived.
3769 thread.update_in(&mut vcx, |thread, _window, cx| {
3770 thread.set_title("Generated title".into(), cx).detach();
3771 });
3772 vcx.run_until_parked();
3773
3774 // The archived thread must still have its original folder paths.
3775 cx.read(|cx| {
3776 let store = ThreadMetadataStore::global(cx).read(cx);
3777 let entry = store.entry(thread_id).unwrap();
3778 assert!(entry.archived, "thread should still be archived");
3779 assert_eq!(
3780 entry.display_title().as_ref(),
3781 "Generated title",
3782 "title should still be updated for archived threads"
3783 );
3784 assert_eq!(
3785 entry.folder_paths(),
3786 &folder_paths_before,
3787 "archived thread must retain its folder paths after worktree \
3788 removal + subsequent thread event, otherwise restoring it \
3789 will prompt the user to re-associate a project"
3790 );
3791 assert_eq!(
3792 entry.main_worktree_paths(),
3793 &main_paths_before,
3794 "archived thread must retain its main worktree paths after \
3795 worktree removal + subsequent thread event"
3796 );
3797 });
3798 }
3799
3800 #[gpui::test]
3801 async fn test_collab_guest_threads_not_saved_to_metadata_store(cx: &mut TestAppContext) {
3802 init_test(cx);
3803
3804 let fs = FakeFs::new(cx.executor());
3805 let project = Project::test(fs, [Path::new("/project-a")], cx).await;
3806
3807 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3808 crate::test_support::open_thread_with_connection(
3809 &panel,
3810 StubAgentConnection::new(),
3811 &mut vcx,
3812 );
3813 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3814 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3815 thread.update_in(&mut vcx, |thread, _window, cx| {
3816 thread.push_user_content_block(None, "hello".into(), cx);
3817 thread.set_title("Thread".into(), cx).detach();
3818 });
3819 vcx.run_until_parked();
3820
3821 // Confirm the thread is in the store while the project is local.
3822 cx.update(|cx| {
3823 let store = ThreadMetadataStore::global(cx);
3824 assert!(
3825 store.read(cx).entry(thread_id).is_some(),
3826 "thread must be in the store while the project is local"
3827 );
3828 });
3829
3830 cx.update(|cx| {
3831 let store = ThreadMetadataStore::global(cx);
3832 store.update(cx, |store, cx| {
3833 store.delete(thread_id, cx);
3834 });
3835 });
3836 project.update(cx, |project, _cx| {
3837 project.mark_as_collab_for_testing();
3838 });
3839
3840 thread.update_in(&mut vcx, |thread, _window, cx| {
3841 thread.push_user_content_block(None, "more content".into(), cx);
3842 });
3843 vcx.run_until_parked();
3844
3845 cx.update(|cx| {
3846 let store = ThreadMetadataStore::global(cx);
3847 assert!(
3848 store.read(cx).entry(thread_id).is_none(),
3849 "threads must not be persisted while the project is a collab guest session"
3850 );
3851 });
3852 }
3853
3854 // When a worktree is added to a collab project, update_thread_work_dirs
3855 // fires with the new worktree paths. Without an is_via_collab() guard it
3856 // overwrites the stored paths of any retained or active local threads with
3857 // the new (expanded) path set, corrupting metadata that belonged to the
3858 // guest's own local project.
3859 #[gpui::test]
3860 async fn test_collab_guest_retained_thread_paths_not_overwritten_on_worktree_change(
3861 cx: &mut TestAppContext,
3862 ) {
3863 init_test(cx);
3864
3865 let fs = FakeFs::new(cx.executor());
3866 fs.insert_tree("/project-a", serde_json::json!({})).await;
3867 fs.insert_tree("/project-b", serde_json::json!({})).await;
3868 let project = Project::test(fs, [Path::new("/project-a")], cx).await;
3869
3870 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3871
3872 // Open thread A and give it content so its metadata is saved with /project-a.
3873 crate::test_support::open_thread_with_connection(
3874 &panel,
3875 StubAgentConnection::new(),
3876 &mut vcx,
3877 );
3878 let thread_a_id = crate::test_support::active_thread_id(&panel, &vcx);
3879 let thread_a = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3880 thread_a.update_in(&mut vcx, |thread, _window, cx| {
3881 thread.push_user_content_block(None, "hello".into(), cx);
3882 thread.set_title("Thread A".into(), cx).detach();
3883 });
3884 vcx.run_until_parked();
3885
3886 cx.update(|cx| {
3887 let store = ThreadMetadataStore::global(cx);
3888 let entry = store.read(cx).entry(thread_a_id).unwrap();
3889 assert_eq!(
3890 entry.folder_paths().paths(),
3891 &[std::path::PathBuf::from("/project-a")],
3892 "thread A must be saved with /project-a before collab"
3893 );
3894 });
3895
3896 // Open thread B, making thread A a retained thread in the panel.
3897 crate::test_support::open_thread_with_connection(
3898 &panel,
3899 StubAgentConnection::new(),
3900 &mut vcx,
3901 );
3902 vcx.run_until_parked();
3903
3904 // Transition the project into collab mode (simulates joining as a guest).
3905 project.update(cx, |project, _cx| {
3906 project.mark_as_collab_for_testing();
3907 });
3908
3909 // Add a second worktree. For a real collab guest this would be one of
3910 // the host's worktrees arriving via the collab protocol, but here we
3911 // use a local path because the test infrastructure cannot easily produce
3912 // a remote worktree with a fully-scanned root entry.
3913 //
3914 // This fires WorktreeAdded → update_thread_work_dirs. Without an
3915 // is_via_collab() guard that call overwrites the stored paths of
3916 // retained thread A from {/project-a} to {/project-a, /project-b},
3917 // polluting its metadata with a path it never belonged to.
3918 project
3919 .update(cx, |project, cx| {
3920 project.find_or_create_worktree(Path::new("/project-b"), true, cx)
3921 })
3922 .await
3923 .unwrap();
3924 vcx.run_until_parked();
3925
3926 cx.update(|cx| {
3927 let store = ThreadMetadataStore::global(cx);
3928 let entry = store
3929 .read(cx)
3930 .entry(thread_a_id)
3931 .expect("thread A must still exist in the store");
3932 assert_eq!(
3933 entry.folder_paths().paths(),
3934 &[std::path::PathBuf::from("/project-a")],
3935 "retained thread A's stored path must not be updated while the project is via collab"
3936 );
3937 });
3938 }
3939}