1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use agent::{ThreadStore, ZED_AGENT_ID};
7use agent_client_protocol as acp;
8use anyhow::Context as _;
9use chrono::{DateTime, Utc};
10use collections::{HashMap, HashSet};
11use db::{
12 kvp::KeyValueStore,
13 sqlez::{
14 bindable::{Bind, Column},
15 domain::Domain,
16 statement::Statement,
17 thread_safe_connection::ThreadSafeConnection,
18 },
19 sqlez_macros::sql,
20};
21use fs::Fs;
22use futures::{FutureExt, future::Shared};
23use gpui::{AppContext as _, Entity, Global, Subscription, Task};
24pub use project::WorktreePaths;
25use project::{AgentId, linked_worktree_short_name};
26use remote::{RemoteConnectionOptions, same_remote_connection_identity};
27use ui::{App, Context, SharedString, ThreadItemWorktreeInfo, WorktreeKind};
28use util::ResultExt as _;
29use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
30
31use crate::DEFAULT_THREAD_TITLE;
32
33#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, serde::Serialize, serde::Deserialize)]
34pub struct ThreadId(uuid::Uuid);
35
36impl ThreadId {
37 pub fn new() -> Self {
38 Self(uuid::Uuid::new_v4())
39 }
40}
41
42impl Bind for ThreadId {
43 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
44 self.0.bind(statement, start_index)
45 }
46}
47
48impl Column for ThreadId {
49 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
50 let (uuid, next) = Column::column(statement, start_index)?;
51 Ok((ThreadId(uuid), next))
52 }
53}
54
55const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill";
56const THREAD_ID_MIGRATION_KEY: &str = "thread-metadata-thread-id-backfill";
57
58pub fn init(cx: &mut App) {
59 ThreadMetadataStore::init_global(cx);
60 let migration_task = migrate_thread_metadata(cx);
61 migrate_thread_remote_connections(cx, migration_task);
62 migrate_thread_ids(cx);
63}
64
65/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
66/// We skip migrating threads that do not have a project.
67///
68/// TODO: Remove this after N weeks of shipping the sidebar
69fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
70 let store = ThreadMetadataStore::global(cx);
71 let db = store.read(cx).db.clone();
72
73 cx.spawn(async move |cx| {
74 let existing_list = db.list()?;
75 let is_first_migration = existing_list.is_empty();
76 let existing_session_ids: HashSet<Arc<str>> = existing_list
77 .into_iter()
78 .filter_map(|m| m.session_id.map(|s| s.0))
79 .collect();
80
81 let mut to_migrate = store.read_with(cx, |_store, cx| {
82 ThreadStore::global(cx)
83 .read(cx)
84 .entries()
85 .filter_map(|entry| {
86 if existing_session_ids.contains(&entry.id.0) {
87 return None;
88 }
89
90 Some(ThreadMetadata {
91 thread_id: ThreadId::new(),
92 session_id: Some(entry.id),
93 agent_id: ZED_AGENT_ID.clone(),
94 title: if entry.title.is_empty()
95 || entry.title.as_ref() == DEFAULT_THREAD_TITLE
96 {
97 None
98 } else {
99 Some(entry.title)
100 },
101 updated_at: entry.updated_at,
102 created_at: entry.created_at,
103 worktree_paths: WorktreePaths::from_folder_paths(&entry.folder_paths),
104 remote_connection: None,
105 archived: true,
106 })
107 })
108 .collect::<Vec<_>>()
109 });
110
111 if to_migrate.is_empty() {
112 return anyhow::Ok(());
113 }
114
115 // On the first migration (no entries in DB yet), keep the 5 most
116 // recent threads per project unarchived.
117 if is_first_migration {
118 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
119 for entry in &mut to_migrate {
120 if entry.worktree_paths.is_empty() {
121 continue;
122 }
123 per_project
124 .entry(entry.worktree_paths.folder_path_list().clone())
125 .or_default()
126 .push(entry);
127 }
128 for entries in per_project.values_mut() {
129 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
130 for entry in entries.iter_mut().take(5) {
131 entry.archived = false;
132 }
133 }
134 }
135
136 log::info!("Migrating {} thread store entries", to_migrate.len());
137
138 // Manually save each entry to the database and call reload, otherwise
139 // we'll end up triggering lots of reloads after each save
140 for entry in to_migrate {
141 db.save(entry).await?;
142 }
143
144 log::info!("Finished migrating thread store entries");
145
146 let _ = store.update(cx, |store, cx| store.reload(cx));
147 anyhow::Ok(())
148 })
149}
150
151fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task<anyhow::Result<()>>) {
152 let store = ThreadMetadataStore::global(cx);
153 let db = store.read(cx).db.clone();
154 let kvp = KeyValueStore::global(cx);
155 let workspace_db = WorkspaceDb::global(cx);
156 let fs = <dyn Fs>::global(cx);
157
158 cx.spawn(async move |cx| -> anyhow::Result<()> {
159 migration_task.await?;
160
161 if kvp
162 .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)?
163 .is_some()
164 {
165 return Ok(());
166 }
167
168 let recent_workspaces = workspace_db.recent_workspaces_on_disk(fs.as_ref()).await?;
169
170 let mut local_path_lists = HashSet::<PathList>::default();
171 let mut remote_path_lists = HashMap::<PathList, RemoteConnectionOptions>::default();
172
173 recent_workspaces
174 .iter()
175 .filter(|(_, location, path_list, _)| {
176 !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local)
177 })
178 .for_each(|(_, _, path_list, _)| {
179 local_path_lists.insert(path_list.clone());
180 });
181
182 for (_, location, path_list, _) in recent_workspaces {
183 match location {
184 SerializedWorkspaceLocation::Remote(remote_connection)
185 if !local_path_lists.contains(&path_list) =>
186 {
187 remote_path_lists
188 .entry(path_list)
189 .or_insert(remote_connection);
190 }
191 _ => {}
192 }
193 }
194
195 let mut reloaded = false;
196 for metadata in db.list()? {
197 if metadata.remote_connection.is_some() {
198 continue;
199 }
200
201 if let Some(remote_connection) = remote_path_lists
202 .get(metadata.folder_paths())
203 .or_else(|| remote_path_lists.get(metadata.main_worktree_paths()))
204 {
205 db.save(ThreadMetadata {
206 remote_connection: Some(remote_connection.clone()),
207 ..metadata
208 })
209 .await?;
210 reloaded = true;
211 }
212 }
213
214 let reloaded_task = reloaded
215 .then_some(store.update(cx, |store, cx| store.reload(cx)))
216 .unwrap_or(Task::ready(()).shared());
217
218 kvp.write_kvp(
219 THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(),
220 "1".to_string(),
221 )
222 .await?;
223 reloaded_task.await;
224
225 Ok(())
226 })
227 .detach_and_log_err(cx);
228}
229
230fn migrate_thread_ids(cx: &mut App) {
231 let store = ThreadMetadataStore::global(cx);
232 let db = store.read(cx).db.clone();
233 let kvp = KeyValueStore::global(cx);
234
235 cx.spawn(async move |cx| -> anyhow::Result<()> {
236 if kvp.read_kvp(THREAD_ID_MIGRATION_KEY)?.is_some() {
237 return Ok(());
238 }
239
240 let mut reloaded = false;
241 for metadata in db.list()? {
242 db.save(metadata).await?;
243 reloaded = true;
244 }
245
246 let reloaded_task = reloaded
247 .then_some(store.update(cx, |store, cx| store.reload(cx)))
248 .unwrap_or(Task::ready(()).shared());
249
250 kvp.write_kvp(THREAD_ID_MIGRATION_KEY.to_string(), "1".to_string())
251 .await?;
252 reloaded_task.await;
253
254 Ok(())
255 })
256 .detach_and_log_err(cx);
257}
258
259struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
260impl Global for GlobalThreadMetadataStore {}
261
262/// Lightweight metadata for any thread (native or ACP), enough to populate
263/// the sidebar list and route to the correct load path when clicked.
264#[derive(Debug, Clone, PartialEq)]
265pub struct ThreadMetadata {
266 pub thread_id: ThreadId,
267 pub session_id: Option<acp::SessionId>,
268 pub agent_id: AgentId,
269 pub title: Option<SharedString>,
270 pub updated_at: DateTime<Utc>,
271 pub created_at: Option<DateTime<Utc>>,
272 pub worktree_paths: WorktreePaths,
273 pub remote_connection: Option<RemoteConnectionOptions>,
274 pub archived: bool,
275}
276
277impl ThreadMetadata {
278 pub fn display_title(&self) -> SharedString {
279 self.title
280 .clone()
281 .unwrap_or_else(|| crate::DEFAULT_THREAD_TITLE.into())
282 }
283
284 pub fn folder_paths(&self) -> &PathList {
285 self.worktree_paths.folder_path_list()
286 }
287 pub fn main_worktree_paths(&self) -> &PathList {
288 self.worktree_paths.main_worktree_path_list()
289 }
290}
291
292/// Derives worktree display info from a thread's stored path list.
293///
294/// For each path in the thread's `folder_paths`, produces a
295/// [`ThreadItemWorktreeInfo`] with a short display name, full path, and whether
296/// the worktree is the main checkout or a linked git worktree. When
297/// multiple main paths exist and a linked worktree's short name alone
298/// wouldn't identify which main project it belongs to, the main project
299/// name is prefixed for disambiguation (e.g. `project:feature`).
300pub fn worktree_info_from_thread_paths<S: std::hash::BuildHasher>(
301 worktree_paths: &WorktreePaths,
302 branch_names: &std::collections::HashMap<PathBuf, SharedString, S>,
303) -> Vec<ThreadItemWorktreeInfo> {
304 let mut infos: Vec<ThreadItemWorktreeInfo> = Vec::new();
305 let mut linked_short_names: Vec<(SharedString, SharedString)> = Vec::new();
306 let mut unique_main_count = HashSet::default();
307
308 for (main_path, folder_path) in worktree_paths.ordered_pairs() {
309 unique_main_count.insert(main_path.clone());
310 let is_linked = main_path != folder_path;
311
312 if is_linked {
313 let short_name = linked_worktree_short_name(main_path, folder_path).unwrap_or_default();
314 let project_name = main_path
315 .file_name()
316 .map(|n| SharedString::from(n.to_string_lossy().to_string()))
317 .unwrap_or_default();
318 linked_short_names.push((short_name.clone(), project_name));
319 infos.push(ThreadItemWorktreeInfo {
320 name: short_name,
321 full_path: SharedString::from(folder_path.display().to_string()),
322 highlight_positions: Vec::new(),
323 kind: WorktreeKind::Linked,
324 branch_name: branch_names.get(folder_path).cloned(),
325 });
326 } else {
327 let Some(name) = folder_path.file_name() else {
328 continue;
329 };
330 infos.push(ThreadItemWorktreeInfo {
331 name: SharedString::from(name.to_string_lossy().to_string()),
332 full_path: SharedString::from(folder_path.display().to_string()),
333 highlight_positions: Vec::new(),
334 kind: WorktreeKind::Main,
335 branch_name: branch_names.get(folder_path).cloned(),
336 });
337 }
338 }
339
340 // When the group has multiple main worktree paths and the thread's
341 // folder paths don't all share the same short name, prefix each
342 // linked worktree chip with its main project name so the user knows
343 // which project it belongs to.
344 let all_same_name = infos.len() > 1 && infos.iter().all(|i| i.name == infos[0].name);
345
346 if unique_main_count.len() > 1 && !all_same_name {
347 for (info, (_short_name, project_name)) in infos
348 .iter_mut()
349 .filter(|i| i.kind == WorktreeKind::Linked)
350 .zip(linked_short_names.iter())
351 {
352 info.name = SharedString::from(format!("{}:{}", project_name, info.name));
353 }
354 }
355
356 infos
357}
358
359impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
360 fn from(meta: &ThreadMetadata) -> Self {
361 let session_id = meta
362 .session_id
363 .clone()
364 .unwrap_or_else(|| acp::SessionId::new(meta.thread_id.0.to_string()));
365 Self {
366 session_id,
367 work_dirs: Some(meta.folder_paths().clone()),
368 title: meta.title.clone(),
369 updated_at: Some(meta.updated_at),
370 created_at: meta.created_at,
371 meta: None,
372 }
373 }
374}
375
376/// Record of a git worktree that was archived (deleted from disk) when its
377/// last thread was archived.
378pub struct ArchivedGitWorktree {
379 /// Auto-incrementing primary key.
380 pub id: i64,
381 /// Absolute path to the directory of the worktree before it was deleted.
382 /// Used when restoring, to put the recreated worktree back where it was.
383 /// If the path already exists on disk, the worktree is assumed to be
384 /// already restored and is used as-is.
385 pub worktree_path: PathBuf,
386 /// Absolute path of the main repository ("main worktree") that owned this worktree.
387 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
388 /// If the main repo isn't found on disk, unarchiving fails because we only store
389 /// commit hashes, and without the actual git repo being available, we can't restore
390 /// the files.
391 pub main_repo_path: PathBuf,
392 /// Branch that was checked out in the worktree at archive time. `None` if
393 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
394 /// could happen if the user made a detached one outside of Zed.
395 /// On restore, we try to switch to this branch. If that fails (e.g. it's
396 /// checked out elsewhere), we auto-generate a new one.
397 pub branch_name: Option<String>,
398 /// SHA of the WIP commit that captures files that were staged (but not yet
399 /// committed) at the time of archiving. This commit can be empty if the
400 /// user had no staged files at the time. It sits directly on top of whatever
401 /// the user's last actual commit was.
402 pub staged_commit_hash: String,
403 /// SHA of the WIP commit that captures files that were unstaged (including
404 /// untracked) at the time of archiving. This commit can be empty if the user
405 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
406 /// After doing `git reset` past both of these commits, we're back in the state
407 /// we had before archiving, including what was staged, what was unstaged, and
408 /// what was committed.
409 pub unstaged_commit_hash: String,
410 /// SHA of the commit that HEAD pointed at before we created the two WIP
411 /// commits during archival. After resetting past the WIP commits during
412 /// restore, HEAD should land back on this commit. It also serves as a
413 /// pre-restore sanity check (abort if this commit no longer exists in the
414 /// repo) and as a fallback target if the WIP resets fail.
415 pub original_commit_hash: String,
416}
417
418/// The store holds all metadata needed to show threads in the sidebar/the archive.
419///
420/// Listens to ConversationView events and updates metadata when the root thread changes.
421pub struct ThreadMetadataStore {
422 db: ThreadMetadataDb,
423 threads: HashMap<ThreadId, ThreadMetadata>,
424 threads_by_paths: HashMap<PathList, HashSet<ThreadId>>,
425 threads_by_main_paths: HashMap<PathList, HashSet<ThreadId>>,
426 threads_by_session: HashMap<acp::SessionId, ThreadId>,
427 reload_task: Option<Shared<Task<()>>>,
428 conversation_subscriptions: HashMap<gpui::EntityId, Subscription>,
429 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
430 in_flight_archives: HashMap<ThreadId, (Task<()>, smol::channel::Sender<()>)>,
431 _db_operations_task: Task<()>,
432}
433
434#[derive(Debug, PartialEq)]
435enum DbOperation {
436 Upsert(ThreadMetadata),
437 Delete(ThreadId),
438}
439
440impl DbOperation {
441 fn id(&self) -> ThreadId {
442 match self {
443 DbOperation::Upsert(thread) => thread.thread_id,
444 DbOperation::Delete(thread_id) => *thread_id,
445 }
446 }
447}
448
449/// Override for the test DB name used by `ThreadMetadataStore::init_global`.
450/// When set as a GPUI global, `init_global` uses this name instead of
451/// deriving one from the thread name. This prevents data from leaking
452/// across proptest cases that share a thread name.
453#[cfg(any(test, feature = "test-support"))]
454pub struct TestMetadataDbName(pub String);
455#[cfg(any(test, feature = "test-support"))]
456impl gpui::Global for TestMetadataDbName {}
457
458#[cfg(any(test, feature = "test-support"))]
459impl TestMetadataDbName {
460 pub fn global(cx: &App) -> String {
461 cx.try_global::<Self>()
462 .map(|g| g.0.clone())
463 .unwrap_or_else(|| {
464 let thread = std::thread::current();
465 let test_name = thread.name().unwrap_or("unknown_test");
466 format!("THREAD_METADATA_DB_{}", test_name)
467 })
468 }
469}
470
471impl ThreadMetadataStore {
472 #[cfg(not(any(test, feature = "test-support")))]
473 pub fn init_global(cx: &mut App) {
474 if cx.has_global::<Self>() {
475 return;
476 }
477
478 let db = ThreadMetadataDb::global(cx);
479 let thread_store = cx.new(|cx| Self::new(db, cx));
480 cx.set_global(GlobalThreadMetadataStore(thread_store));
481 }
482
483 #[cfg(any(test, feature = "test-support"))]
484 pub fn init_global(cx: &mut App) {
485 let db_name = TestMetadataDbName::global(cx);
486 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
487 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
488 cx.set_global(GlobalThreadMetadataStore(thread_store));
489 }
490
491 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
492 cx.try_global::<GlobalThreadMetadataStore>()
493 .map(|store| store.0.clone())
494 }
495
496 pub fn global(cx: &App) -> Entity<Self> {
497 cx.global::<GlobalThreadMetadataStore>().0.clone()
498 }
499
500 pub fn is_empty(&self) -> bool {
501 self.threads.is_empty()
502 }
503
504 /// Returns all thread IDs.
505 pub fn entry_ids(&self) -> impl Iterator<Item = ThreadId> + '_ {
506 self.threads.keys().copied()
507 }
508
509 /// Returns the metadata for a specific thread, if it exists.
510 pub fn entry(&self, thread_id: ThreadId) -> Option<&ThreadMetadata> {
511 self.threads.get(&thread_id)
512 }
513
514 /// Returns the metadata for a thread identified by its ACP session ID.
515 pub fn entry_by_session(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
516 let thread_id = self.threads_by_session.get(session_id)?;
517 self.threads.get(thread_id)
518 }
519
520 /// Returns all threads.
521 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
522 self.threads.values()
523 }
524
525 /// Returns all archived threads.
526 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
527 self.entries().filter(|t| t.archived)
528 }
529
530 /// Returns all threads for the given path list and remote connection,
531 /// excluding archived threads.
532 ///
533 /// When `remote_connection` is `Some`, only threads whose persisted
534 /// `remote_connection` matches by normalized identity are returned.
535 /// When `None`, only local (non-remote) threads are returned.
536 pub fn entries_for_path<'a>(
537 &'a self,
538 path_list: &PathList,
539 remote_connection: Option<&'a RemoteConnectionOptions>,
540 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
541 self.threads_by_paths
542 .get(path_list)
543 .into_iter()
544 .flatten()
545 .filter_map(|s| self.threads.get(s))
546 .filter(|s| !s.archived)
547 .filter(move |s| {
548 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
549 })
550 }
551
552 /// Returns threads whose `main_worktree_paths` matches the given path list
553 /// and remote connection, excluding archived threads. This finds threads
554 /// that were opened in a linked worktree but are associated with the given
555 /// main worktree.
556 ///
557 /// When `remote_connection` is `Some`, only threads whose persisted
558 /// `remote_connection` matches by normalized identity are returned.
559 /// When `None`, only local (non-remote) threads are returned.
560 pub fn entries_for_main_worktree_path<'a>(
561 &'a self,
562 path_list: &PathList,
563 remote_connection: Option<&'a RemoteConnectionOptions>,
564 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
565 self.threads_by_main_paths
566 .get(path_list)
567 .into_iter()
568 .flatten()
569 .filter_map(|s| self.threads.get(s))
570 .filter(|s| !s.archived)
571 .filter(move |s| {
572 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
573 })
574 }
575
576 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
577 let db = self.db.clone();
578 self.reload_task.take();
579
580 let list_task = cx
581 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
582
583 let reload_task = cx
584 .spawn(async move |this, cx| {
585 let Some(rows) = list_task.await.log_err() else {
586 return;
587 };
588
589 this.update(cx, |this, cx| {
590 this.threads.clear();
591 this.threads_by_paths.clear();
592 this.threads_by_main_paths.clear();
593 this.threads_by_session.clear();
594
595 for row in rows {
596 if let Some(sid) = &row.session_id {
597 this.threads_by_session.insert(sid.clone(), row.thread_id);
598 }
599 this.threads_by_paths
600 .entry(row.folder_paths().clone())
601 .or_default()
602 .insert(row.thread_id);
603 if !row.main_worktree_paths().is_empty() {
604 this.threads_by_main_paths
605 .entry(row.main_worktree_paths().clone())
606 .or_default()
607 .insert(row.thread_id);
608 }
609 this.threads.insert(row.thread_id, row);
610 }
611
612 cx.notify();
613 })
614 .ok();
615 })
616 .shared();
617 self.reload_task = Some(reload_task.clone());
618 reload_task
619 }
620
621 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
622 for metadata in metadata {
623 self.save_internal(metadata);
624 }
625 cx.notify();
626 }
627
628 pub fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
629 self.save_internal(metadata);
630 cx.notify();
631 }
632
633 fn save_internal(&mut self, metadata: ThreadMetadata) {
634 if let Some(thread) = self.threads.get(&metadata.thread_id) {
635 if thread.folder_paths() != metadata.folder_paths() {
636 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
637 thread_ids.remove(&metadata.thread_id);
638 }
639 }
640 if thread.main_worktree_paths() != metadata.main_worktree_paths()
641 && !thread.main_worktree_paths().is_empty()
642 {
643 if let Some(thread_ids) = self
644 .threads_by_main_paths
645 .get_mut(thread.main_worktree_paths())
646 {
647 thread_ids.remove(&metadata.thread_id);
648 }
649 }
650 }
651
652 if let Some(sid) = &metadata.session_id {
653 self.threads_by_session
654 .insert(sid.clone(), metadata.thread_id);
655 }
656
657 self.threads.insert(metadata.thread_id, metadata.clone());
658
659 self.threads_by_paths
660 .entry(metadata.folder_paths().clone())
661 .or_default()
662 .insert(metadata.thread_id);
663
664 if !metadata.main_worktree_paths().is_empty() {
665 self.threads_by_main_paths
666 .entry(metadata.main_worktree_paths().clone())
667 .or_default()
668 .insert(metadata.thread_id);
669 }
670
671 self.pending_thread_ops_tx
672 .try_send(DbOperation::Upsert(metadata))
673 .log_err();
674 }
675
676 pub fn update_working_directories(
677 &mut self,
678 thread_id: ThreadId,
679 work_dirs: PathList,
680 cx: &mut Context<Self>,
681 ) {
682 if let Some(thread) = self.threads.get(&thread_id) {
683 debug_assert!(
684 !thread.archived,
685 "update_working_directories called on archived thread"
686 );
687 self.save_internal(ThreadMetadata {
688 worktree_paths: WorktreePaths::from_path_lists(
689 thread.main_worktree_paths().clone(),
690 work_dirs.clone(),
691 )
692 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&work_dirs)),
693 ..thread.clone()
694 });
695 cx.notify();
696 }
697 }
698
699 pub fn update_worktree_paths(
700 &mut self,
701 thread_ids: &[ThreadId],
702 worktree_paths: WorktreePaths,
703 cx: &mut Context<Self>,
704 ) {
705 let mut changed = false;
706 for &thread_id in thread_ids {
707 let Some(thread) = self.threads.get(&thread_id) else {
708 continue;
709 };
710 if thread.worktree_paths == worktree_paths {
711 continue;
712 }
713 // Don't overwrite paths for archived threads — the
714 // project may no longer include the worktree that was
715 // removed during the archive flow.
716 if thread.archived {
717 continue;
718 }
719 self.save_internal(ThreadMetadata {
720 worktree_paths: worktree_paths.clone(),
721 ..thread.clone()
722 });
723 changed = true;
724 }
725 if changed {
726 cx.notify();
727 }
728 }
729
730 pub fn archive(
731 &mut self,
732 thread_id: ThreadId,
733 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
734 cx: &mut Context<Self>,
735 ) {
736 self.update_archived(thread_id, true, cx);
737
738 if let Some(job) = archive_job {
739 self.in_flight_archives.insert(thread_id, job);
740 }
741 }
742
743 pub fn unarchive(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
744 self.update_archived(thread_id, false, cx);
745 // Dropping the Sender triggers cancellation in the background task.
746 self.in_flight_archives.remove(&thread_id);
747 }
748
749 pub fn cleanup_completed_archive(&mut self, thread_id: ThreadId) {
750 self.in_flight_archives.remove(&thread_id);
751 }
752
753 /// Returns `true` if any unarchived thread other than `current_session_id`
754 /// references `path` in its folder paths. Used to determine whether a
755 /// worktree can safely be removed from disk.
756 pub fn path_is_referenced_by_other_unarchived_threads(
757 &self,
758 thread_id: ThreadId,
759 path: &Path,
760 remote_connection: Option<&RemoteConnectionOptions>,
761 ) -> bool {
762 self.entries().any(|thread| {
763 thread.thread_id != thread_id
764 && !thread.archived
765 && same_remote_connection_identity(
766 thread.remote_connection.as_ref(),
767 remote_connection,
768 )
769 && thread
770 .folder_paths()
771 .paths()
772 .iter()
773 .any(|other_path| other_path.as_path() == path)
774 })
775 }
776
777 /// Updates a thread's `folder_paths` after an archived worktree has been
778 /// restored to disk. The restored worktree may land at a different path
779 /// than it had before archival, so each `(old_path, new_path)` pair in
780 /// `path_replacements` is applied to the thread's stored folder paths.
781 pub fn update_restored_worktree_paths(
782 &mut self,
783 thread_id: ThreadId,
784 path_replacements: &[(PathBuf, PathBuf)],
785 cx: &mut Context<Self>,
786 ) {
787 if let Some(thread) = self.threads.get(&thread_id).cloned() {
788 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
789 for (old_path, new_path) in path_replacements {
790 if let Some(pos) = paths.iter().position(|p| p == old_path) {
791 paths[pos] = new_path.clone();
792 }
793 }
794 let new_folder_paths = PathList::new(&paths);
795 self.save_internal(ThreadMetadata {
796 worktree_paths: WorktreePaths::from_path_lists(
797 thread.main_worktree_paths().clone(),
798 new_folder_paths.clone(),
799 )
800 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
801 ..thread
802 });
803 cx.notify();
804 }
805 }
806
807 pub fn complete_worktree_restore(
808 &mut self,
809 thread_id: ThreadId,
810 path_replacements: &[(PathBuf, PathBuf)],
811 cx: &mut Context<Self>,
812 ) {
813 if let Some(thread) = self.threads.get(&thread_id).cloned() {
814 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
815 for (old_path, new_path) in path_replacements {
816 for path in &mut paths {
817 if path == old_path {
818 *path = new_path.clone();
819 }
820 }
821 }
822 let new_folder_paths = PathList::new(&paths);
823 self.save_internal(ThreadMetadata {
824 worktree_paths: WorktreePaths::from_path_lists(
825 thread.main_worktree_paths().clone(),
826 new_folder_paths.clone(),
827 )
828 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
829 ..thread
830 });
831 cx.notify();
832 }
833 }
834
835 /// Apply a mutation to the worktree paths of all threads whose current
836 /// `folder_paths` matches `current_folder_paths`, then re-index.
837 /// When `remote_connection` is provided, only threads with a matching
838 /// remote connection are affected.
839 pub fn change_worktree_paths(
840 &mut self,
841 current_folder_paths: &PathList,
842 remote_connection: Option<&RemoteConnectionOptions>,
843 mutate: impl Fn(&mut WorktreePaths),
844 cx: &mut Context<Self>,
845 ) {
846 let thread_ids: Vec<_> = self
847 .threads_by_paths
848 .get(current_folder_paths)
849 .into_iter()
850 .flatten()
851 .filter(|id| {
852 self.threads.get(id).is_some_and(|t| {
853 !t.archived
854 && same_remote_connection_identity(
855 t.remote_connection.as_ref(),
856 remote_connection,
857 )
858 })
859 })
860 .copied()
861 .collect();
862
863 self.mutate_thread_paths(&thread_ids, mutate, cx);
864 }
865
866 fn mutate_thread_paths(
867 &mut self,
868 thread_ids: &[ThreadId],
869 mutate: impl Fn(&mut WorktreePaths),
870 cx: &mut Context<Self>,
871 ) {
872 if thread_ids.is_empty() {
873 return;
874 }
875
876 for thread_id in thread_ids {
877 if let Some(thread) = self.threads.get_mut(thread_id) {
878 if let Some(ids) = self
879 .threads_by_main_paths
880 .get_mut(thread.main_worktree_paths())
881 {
882 ids.remove(thread_id);
883 }
884 if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
885 ids.remove(thread_id);
886 }
887
888 mutate(&mut thread.worktree_paths);
889
890 self.threads_by_main_paths
891 .entry(thread.main_worktree_paths().clone())
892 .or_default()
893 .insert(*thread_id);
894 self.threads_by_paths
895 .entry(thread.folder_paths().clone())
896 .or_default()
897 .insert(*thread_id);
898
899 self.pending_thread_ops_tx
900 .try_send(DbOperation::Upsert(thread.clone()))
901 .log_err();
902 }
903 }
904
905 cx.notify();
906 }
907
908 pub fn create_archived_worktree(
909 &self,
910 worktree_path: String,
911 main_repo_path: String,
912 branch_name: Option<String>,
913 staged_commit_hash: String,
914 unstaged_commit_hash: String,
915 original_commit_hash: String,
916 cx: &App,
917 ) -> Task<anyhow::Result<i64>> {
918 let db = self.db.clone();
919 cx.background_spawn(async move {
920 db.create_archived_worktree(
921 worktree_path,
922 main_repo_path,
923 branch_name,
924 staged_commit_hash,
925 unstaged_commit_hash,
926 original_commit_hash,
927 )
928 .await
929 })
930 }
931
932 pub fn link_thread_to_archived_worktree(
933 &self,
934 thread_id: ThreadId,
935 archived_worktree_id: i64,
936 cx: &App,
937 ) -> Task<anyhow::Result<()>> {
938 let db = self.db.clone();
939 cx.background_spawn(async move {
940 db.link_thread_to_archived_worktree(thread_id, archived_worktree_id)
941 .await
942 })
943 }
944
945 pub fn get_archived_worktrees_for_thread(
946 &self,
947 thread_id: ThreadId,
948 cx: &App,
949 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
950 let db = self.db.clone();
951 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(thread_id).await })
952 }
953
954 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
955 let db = self.db.clone();
956 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
957 }
958
959 pub fn unlink_thread_from_all_archived_worktrees(
960 &self,
961 thread_id: ThreadId,
962 cx: &App,
963 ) -> Task<anyhow::Result<()>> {
964 let db = self.db.clone();
965 cx.background_spawn(async move {
966 db.unlink_thread_from_all_archived_worktrees(thread_id)
967 .await
968 })
969 }
970
971 pub fn is_archived_worktree_referenced(
972 &self,
973 archived_worktree_id: i64,
974 cx: &App,
975 ) -> Task<anyhow::Result<bool>> {
976 let db = self.db.clone();
977 cx.background_spawn(async move {
978 db.is_archived_worktree_referenced(archived_worktree_id)
979 .await
980 })
981 }
982
983 pub fn get_all_archived_branch_names(
984 &self,
985 cx: &App,
986 ) -> Task<anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>>> {
987 let db = self.db.clone();
988 cx.background_spawn(async move { db.get_all_archived_branch_names() })
989 }
990
991 fn update_archived(&mut self, thread_id: ThreadId, archived: bool, cx: &mut Context<Self>) {
992 if let Some(thread) = self.threads.get(&thread_id) {
993 self.save_internal(ThreadMetadata {
994 archived,
995 ..thread.clone()
996 });
997 cx.notify();
998 }
999 }
1000
1001 pub fn delete(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
1002 if let Some(thread) = self.threads.get(&thread_id) {
1003 if let Some(sid) = &thread.session_id {
1004 self.threads_by_session.remove(sid);
1005 }
1006 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
1007 thread_ids.remove(&thread_id);
1008 }
1009 if !thread.main_worktree_paths().is_empty() {
1010 if let Some(thread_ids) = self
1011 .threads_by_main_paths
1012 .get_mut(thread.main_worktree_paths())
1013 {
1014 thread_ids.remove(&thread_id);
1015 }
1016 }
1017 }
1018 self.threads.remove(&thread_id);
1019 self.pending_thread_ops_tx
1020 .try_send(DbOperation::Delete(thread_id))
1021 .log_err();
1022 cx.notify();
1023 }
1024
1025 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
1026 let weak_store = cx.weak_entity();
1027
1028 cx.observe_new::<crate::ConversationView>(move |_view, _window, cx| {
1029 let view_entity = cx.entity();
1030 let entity_id = view_entity.entity_id();
1031
1032 cx.on_release({
1033 let weak_store = weak_store.clone();
1034 move |_view, cx| {
1035 weak_store
1036 .update(cx, |store, _cx| {
1037 store.conversation_subscriptions.remove(&entity_id);
1038 })
1039 .ok();
1040 }
1041 })
1042 .detach();
1043
1044 weak_store
1045 .update(cx, |this, cx| {
1046 let subscription = cx.subscribe(&view_entity, Self::handle_conversation_event);
1047 this.conversation_subscriptions
1048 .insert(entity_id, subscription);
1049 })
1050 .ok();
1051 })
1052 .detach();
1053
1054 let (tx, rx) = smol::channel::unbounded();
1055 let _db_operations_task = cx.background_spawn({
1056 let db = db.clone();
1057 async move {
1058 while let Ok(first_update) = rx.recv().await {
1059 let mut updates = vec![first_update];
1060 while let Ok(update) = rx.try_recv() {
1061 updates.push(update);
1062 }
1063 let updates = Self::dedup_db_operations(updates);
1064 for operation in updates {
1065 match operation {
1066 DbOperation::Upsert(metadata) => {
1067 db.save(metadata).await.log_err();
1068 }
1069 DbOperation::Delete(thread_id) => {
1070 db.delete(thread_id).await.log_err();
1071 }
1072 }
1073 }
1074 }
1075 }
1076 });
1077
1078 let mut this = Self {
1079 db,
1080 threads: HashMap::default(),
1081 threads_by_paths: HashMap::default(),
1082 threads_by_main_paths: HashMap::default(),
1083 threads_by_session: HashMap::default(),
1084 reload_task: None,
1085 conversation_subscriptions: HashMap::default(),
1086 pending_thread_ops_tx: tx,
1087 in_flight_archives: HashMap::default(),
1088 _db_operations_task,
1089 };
1090 let _ = this.reload(cx);
1091 this
1092 }
1093
1094 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
1095 let mut ops = HashMap::default();
1096 for operation in operations.into_iter().rev() {
1097 if ops.contains_key(&operation.id()) {
1098 continue;
1099 }
1100 ops.insert(operation.id(), operation);
1101 }
1102 ops.into_values().collect()
1103 }
1104
1105 fn handle_conversation_event(
1106 &mut self,
1107 conversation_view: Entity<crate::ConversationView>,
1108 _event: &crate::conversation_view::RootThreadUpdated,
1109 cx: &mut Context<Self>,
1110 ) {
1111 let view = conversation_view.read(cx);
1112 let thread_id = view.thread_id;
1113 let Some(thread) = view.root_acp_thread(cx) else {
1114 return;
1115 };
1116
1117 let thread_ref = thread.read(cx);
1118 if thread_ref.is_draft_thread() {
1119 return;
1120 }
1121
1122 let existing_thread = self.entry(thread_id);
1123 let session_id = Some(thread_ref.session_id().clone());
1124 let title = thread_ref.title();
1125
1126 let updated_at = Utc::now();
1127
1128 let created_at = existing_thread
1129 .and_then(|t| t.created_at)
1130 .unwrap_or_else(|| updated_at);
1131
1132 let agent_id = thread_ref.connection().agent_id();
1133
1134 // Preserve project-dependent fields for archived threads.
1135 // The worktree may already have been removed from the
1136 // project as part of the archive flow, so re-evaluating
1137 // these from the current project state would yield
1138 // empty/incorrect results.
1139 let (worktree_paths, remote_connection) =
1140 if let Some(existing) = existing_thread.filter(|t| t.archived) {
1141 (
1142 existing.worktree_paths.clone(),
1143 existing.remote_connection.clone(),
1144 )
1145 } else {
1146 let project = thread_ref.project().read(cx);
1147 let worktree_paths = project.worktree_paths(cx);
1148 let remote_connection = project.remote_connection_options(cx);
1149
1150 (worktree_paths, remote_connection)
1151 };
1152
1153 // Threads without a folder path (e.g. started in an empty
1154 // window) are archived by default so they don't get lost,
1155 // because they won't show up in the sidebar. Users can reload
1156 // them from the archive.
1157 let archived = existing_thread
1158 .map(|t| t.archived)
1159 .unwrap_or(worktree_paths.is_empty());
1160
1161 let metadata = ThreadMetadata {
1162 thread_id,
1163 session_id,
1164 agent_id,
1165 title,
1166 created_at: Some(created_at),
1167 updated_at,
1168 worktree_paths,
1169 remote_connection,
1170 archived,
1171 };
1172
1173 self.save(metadata, cx);
1174 }
1175}
1176
1177impl Global for ThreadMetadataStore {}
1178
1179struct ThreadMetadataDb(ThreadSafeConnection);
1180
1181impl Domain for ThreadMetadataDb {
1182 const NAME: &str = stringify!(ThreadMetadataDb);
1183
1184 const MIGRATIONS: &[&str] = &[
1185 sql!(
1186 CREATE TABLE IF NOT EXISTS sidebar_threads(
1187 session_id TEXT PRIMARY KEY,
1188 agent_id TEXT,
1189 title TEXT NOT NULL,
1190 updated_at TEXT NOT NULL,
1191 created_at TEXT,
1192 folder_paths TEXT,
1193 folder_paths_order TEXT
1194 ) STRICT;
1195 ),
1196 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
1197 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
1198 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
1199 sql!(
1200 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
1201 id INTEGER PRIMARY KEY,
1202 worktree_path TEXT NOT NULL,
1203 main_repo_path TEXT NOT NULL,
1204 branch_name TEXT,
1205 staged_commit_hash TEXT,
1206 unstaged_commit_hash TEXT,
1207 original_commit_hash TEXT
1208 ) STRICT;
1209
1210 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
1211 session_id TEXT NOT NULL,
1212 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1213 PRIMARY KEY (session_id, archived_worktree_id)
1214 ) STRICT;
1215 ),
1216 sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT),
1217 sql!(ALTER TABLE sidebar_threads ADD COLUMN thread_id BLOB),
1218 sql!(
1219 UPDATE sidebar_threads SET thread_id = randomblob(16) WHERE thread_id IS NULL;
1220
1221 CREATE TABLE thread_archived_worktrees_v2(
1222 thread_id BLOB NOT NULL,
1223 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1224 PRIMARY KEY (thread_id, archived_worktree_id)
1225 ) STRICT;
1226
1227 INSERT INTO thread_archived_worktrees_v2(thread_id, archived_worktree_id)
1228 SELECT s.thread_id, t.archived_worktree_id
1229 FROM thread_archived_worktrees t
1230 JOIN sidebar_threads s ON s.session_id = t.session_id;
1231
1232 DROP TABLE thread_archived_worktrees;
1233 ALTER TABLE thread_archived_worktrees_v2 RENAME TO thread_archived_worktrees;
1234
1235 CREATE TABLE sidebar_threads_v2(
1236 thread_id BLOB PRIMARY KEY,
1237 session_id TEXT,
1238 agent_id TEXT,
1239 title TEXT NOT NULL,
1240 updated_at TEXT NOT NULL,
1241 created_at TEXT,
1242 folder_paths TEXT,
1243 folder_paths_order TEXT,
1244 archived INTEGER DEFAULT 0,
1245 main_worktree_paths TEXT,
1246 main_worktree_paths_order TEXT,
1247 remote_connection TEXT
1248 ) STRICT;
1249
1250 INSERT INTO sidebar_threads_v2(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection)
1251 SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection
1252 FROM sidebar_threads;
1253
1254 DROP TABLE sidebar_threads;
1255 ALTER TABLE sidebar_threads_v2 RENAME TO sidebar_threads;
1256 ),
1257 ];
1258}
1259
1260db::static_connection!(ThreadMetadataDb, []);
1261
1262impl ThreadMetadataDb {
1263 #[allow(dead_code)]
1264 pub fn list_ids(&self) -> anyhow::Result<Vec<ThreadId>> {
1265 self.select::<ThreadId>(
1266 "SELECT thread_id FROM sidebar_threads \
1267 ORDER BY updated_at DESC",
1268 )?()
1269 }
1270
1271 /// List all sidebar thread metadata, ordered by updated_at descending.
1272 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
1273 self.select::<ThreadMetadata>(
1274 "SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection \
1275 FROM sidebar_threads \
1276 ORDER BY updated_at DESC"
1277 )?()
1278 }
1279
1280 /// Upsert metadata for a thread.
1281 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
1282 let session_id = row.session_id.as_ref().map(|s| s.0.clone());
1283 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
1284 None
1285 } else {
1286 Some(row.agent_id.to_string())
1287 };
1288 let title = row
1289 .title
1290 .as_ref()
1291 .map(|t| t.to_string())
1292 .unwrap_or_default();
1293 let updated_at = row.updated_at.to_rfc3339();
1294 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
1295 let serialized = row.folder_paths().serialize();
1296 let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
1297 (None, None)
1298 } else {
1299 (Some(serialized.paths), Some(serialized.order))
1300 };
1301 let main_serialized = row.main_worktree_paths().serialize();
1302 let (main_worktree_paths, main_worktree_paths_order) =
1303 if row.main_worktree_paths().is_empty() {
1304 (None, None)
1305 } else {
1306 (Some(main_serialized.paths), Some(main_serialized.order))
1307 };
1308 let remote_connection = row
1309 .remote_connection
1310 .as_ref()
1311 .map(serde_json::to_string)
1312 .transpose()
1313 .context("serialize thread metadata remote connection")?;
1314 let thread_id = row.thread_id;
1315 let archived = row.archived;
1316
1317 self.write(move |conn| {
1318 let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
1319 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12) \
1320 ON CONFLICT(thread_id) DO UPDATE SET \
1321 session_id = excluded.session_id, \
1322 agent_id = excluded.agent_id, \
1323 title = excluded.title, \
1324 updated_at = excluded.updated_at, \
1325 created_at = excluded.created_at, \
1326 folder_paths = excluded.folder_paths, \
1327 folder_paths_order = excluded.folder_paths_order, \
1328 archived = excluded.archived, \
1329 main_worktree_paths = excluded.main_worktree_paths, \
1330 main_worktree_paths_order = excluded.main_worktree_paths_order, \
1331 remote_connection = excluded.remote_connection";
1332 let mut stmt = Statement::prepare(conn, sql)?;
1333 let mut i = stmt.bind(&thread_id, 1)?;
1334 i = stmt.bind(&session_id, i)?;
1335 i = stmt.bind(&agent_id, i)?;
1336 i = stmt.bind(&title, i)?;
1337 i = stmt.bind(&updated_at, i)?;
1338 i = stmt.bind(&created_at, i)?;
1339 i = stmt.bind(&folder_paths, i)?;
1340 i = stmt.bind(&folder_paths_order, i)?;
1341 i = stmt.bind(&archived, i)?;
1342 i = stmt.bind(&main_worktree_paths, i)?;
1343 i = stmt.bind(&main_worktree_paths_order, i)?;
1344 stmt.bind(&remote_connection, i)?;
1345 stmt.exec()
1346 })
1347 .await
1348 }
1349
1350 /// Delete metadata for a single thread.
1351 pub async fn delete(&self, thread_id: ThreadId) -> anyhow::Result<()> {
1352 self.write(move |conn| {
1353 let mut stmt =
1354 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE thread_id = ?")?;
1355 stmt.bind(&thread_id, 1)?;
1356 stmt.exec()
1357 })
1358 .await
1359 }
1360
1361 pub async fn create_archived_worktree(
1362 &self,
1363 worktree_path: String,
1364 main_repo_path: String,
1365 branch_name: Option<String>,
1366 staged_commit_hash: String,
1367 unstaged_commit_hash: String,
1368 original_commit_hash: String,
1369 ) -> anyhow::Result<i64> {
1370 self.write(move |conn| {
1371 let mut stmt = Statement::prepare(
1372 conn,
1373 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
1374 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
1375 RETURNING id",
1376 )?;
1377 let mut i = stmt.bind(&worktree_path, 1)?;
1378 i = stmt.bind(&main_repo_path, i)?;
1379 i = stmt.bind(&branch_name, i)?;
1380 i = stmt.bind(&staged_commit_hash, i)?;
1381 i = stmt.bind(&unstaged_commit_hash, i)?;
1382 stmt.bind(&original_commit_hash, i)?;
1383 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
1384 })
1385 .await
1386 }
1387
1388 pub async fn link_thread_to_archived_worktree(
1389 &self,
1390 thread_id: ThreadId,
1391 archived_worktree_id: i64,
1392 ) -> anyhow::Result<()> {
1393 self.write(move |conn| {
1394 let mut stmt = Statement::prepare(
1395 conn,
1396 "INSERT INTO thread_archived_worktrees(thread_id, archived_worktree_id) \
1397 VALUES (?1, ?2)",
1398 )?;
1399 let i = stmt.bind(&thread_id, 1)?;
1400 stmt.bind(&archived_worktree_id, i)?;
1401 stmt.exec()
1402 })
1403 .await
1404 }
1405
1406 pub async fn get_archived_worktrees_for_thread(
1407 &self,
1408 thread_id: ThreadId,
1409 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
1410 self.select_bound::<ThreadId, ArchivedGitWorktree>(
1411 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
1412 FROM archived_git_worktrees a \
1413 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
1414 WHERE t.thread_id = ?1",
1415 )?(thread_id)
1416 }
1417
1418 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
1419 self.write(move |conn| {
1420 let mut stmt = Statement::prepare(
1421 conn,
1422 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
1423 )?;
1424 stmt.bind(&id, 1)?;
1425 stmt.exec()?;
1426
1427 let mut stmt =
1428 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
1429 stmt.bind(&id, 1)?;
1430 stmt.exec()
1431 })
1432 .await
1433 }
1434
1435 pub async fn unlink_thread_from_all_archived_worktrees(
1436 &self,
1437 thread_id: ThreadId,
1438 ) -> anyhow::Result<()> {
1439 self.write(move |conn| {
1440 let mut stmt = Statement::prepare(
1441 conn,
1442 "DELETE FROM thread_archived_worktrees WHERE thread_id = ?",
1443 )?;
1444 stmt.bind(&thread_id, 1)?;
1445 stmt.exec()
1446 })
1447 .await
1448 }
1449
1450 pub async fn is_archived_worktree_referenced(
1451 &self,
1452 archived_worktree_id: i64,
1453 ) -> anyhow::Result<bool> {
1454 self.select_row_bound::<i64, i64>(
1455 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1456 )?(archived_worktree_id)
1457 .map(|count| count.unwrap_or(0) > 0)
1458 }
1459
1460 pub fn get_all_archived_branch_names(
1461 &self,
1462 ) -> anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>> {
1463 let rows = self.select::<(ThreadId, String, String)>(
1464 "SELECT t.thread_id, a.worktree_path, a.branch_name \
1465 FROM thread_archived_worktrees t \
1466 JOIN archived_git_worktrees a ON a.id = t.archived_worktree_id \
1467 WHERE a.branch_name IS NOT NULL \
1468 ORDER BY a.id ASC",
1469 )?()?;
1470
1471 let mut result: HashMap<ThreadId, HashMap<PathBuf, String>> = HashMap::default();
1472 for (thread_id, worktree_path, branch_name) in rows {
1473 result
1474 .entry(thread_id)
1475 .or_default()
1476 .insert(PathBuf::from(worktree_path), branch_name);
1477 }
1478 Ok(result)
1479 }
1480}
1481
1482impl Column for ThreadMetadata {
1483 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1484 let (thread_id_uuid, next): (uuid::Uuid, i32) = Column::column(statement, start_index)?;
1485 let (id, next): (Option<Arc<str>>, i32) = Column::column(statement, next)?;
1486 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1487 let (title, next): (String, i32) = Column::column(statement, next)?;
1488 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1489 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1490 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1491 let (folder_paths_order_str, next): (Option<String>, i32) =
1492 Column::column(statement, next)?;
1493 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1494 let (main_worktree_paths_str, next): (Option<String>, i32) =
1495 Column::column(statement, next)?;
1496 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1497 Column::column(statement, next)?;
1498 let (remote_connection_json, next): (Option<String>, i32) =
1499 Column::column(statement, next)?;
1500
1501 let agent_id = agent_id
1502 .map(|id| AgentId::new(id))
1503 .unwrap_or(ZED_AGENT_ID.clone());
1504
1505 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1506 let created_at = created_at_str
1507 .as_deref()
1508 .map(DateTime::parse_from_rfc3339)
1509 .transpose()?
1510 .map(|dt| dt.with_timezone(&Utc));
1511
1512 let folder_paths = folder_paths_str
1513 .map(|paths| {
1514 PathList::deserialize(&util::path_list::SerializedPathList {
1515 paths,
1516 order: folder_paths_order_str.unwrap_or_default(),
1517 })
1518 })
1519 .unwrap_or_default();
1520
1521 let main_worktree_paths = main_worktree_paths_str
1522 .map(|paths| {
1523 PathList::deserialize(&util::path_list::SerializedPathList {
1524 paths,
1525 order: main_worktree_paths_order_str.unwrap_or_default(),
1526 })
1527 })
1528 .unwrap_or_default();
1529
1530 let remote_connection = remote_connection_json
1531 .as_deref()
1532 .map(serde_json::from_str::<RemoteConnectionOptions>)
1533 .transpose()
1534 .context("deserialize thread metadata remote connection")?;
1535
1536 let worktree_paths = WorktreePaths::from_path_lists(main_worktree_paths, folder_paths)
1537 .unwrap_or_else(|_| WorktreePaths::default());
1538
1539 let thread_id = ThreadId(thread_id_uuid);
1540
1541 Ok((
1542 ThreadMetadata {
1543 thread_id,
1544 session_id: id.map(acp::SessionId::new),
1545 agent_id,
1546 title: if title.is_empty() || title == DEFAULT_THREAD_TITLE {
1547 None
1548 } else {
1549 Some(title.into())
1550 },
1551 updated_at,
1552 created_at,
1553 worktree_paths,
1554 remote_connection,
1555 archived,
1556 },
1557 next,
1558 ))
1559 }
1560}
1561
1562impl Column for ArchivedGitWorktree {
1563 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1564 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1565 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1566 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1567 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1568 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1569 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1570 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1571
1572 Ok((
1573 ArchivedGitWorktree {
1574 id,
1575 worktree_path: PathBuf::from(worktree_path_str),
1576 main_repo_path: PathBuf::from(main_repo_path_str),
1577 branch_name,
1578 staged_commit_hash,
1579 unstaged_commit_hash,
1580 original_commit_hash,
1581 },
1582 next,
1583 ))
1584 }
1585}
1586
1587#[cfg(test)]
1588mod tests {
1589 use super::*;
1590 use acp_thread::StubAgentConnection;
1591 use action_log::ActionLog;
1592 use agent::DbThread;
1593 use agent_client_protocol as acp;
1594
1595 use gpui::{TestAppContext, VisualTestContext};
1596 use project::FakeFs;
1597 use project::Project;
1598 use remote::WslConnectionOptions;
1599 use std::path::Path;
1600 use std::rc::Rc;
1601 use workspace::MultiWorkspace;
1602
1603 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1604 DbThread {
1605 title: title.to_string().into(),
1606 messages: Vec::new(),
1607 updated_at,
1608 detailed_summary: None,
1609 initial_project_snapshot: None,
1610 cumulative_token_usage: Default::default(),
1611 request_token_usage: Default::default(),
1612 model: None,
1613 profile: None,
1614 imported: false,
1615 subagent_context: None,
1616 speed: None,
1617 thinking_enabled: false,
1618 thinking_effort: None,
1619 draft_prompt: None,
1620 ui_scroll_position: None,
1621 }
1622 }
1623
1624 fn make_metadata(
1625 session_id: &str,
1626 title: &str,
1627 updated_at: DateTime<Utc>,
1628 folder_paths: PathList,
1629 ) -> ThreadMetadata {
1630 ThreadMetadata {
1631 thread_id: ThreadId::new(),
1632 archived: false,
1633 session_id: Some(acp::SessionId::new(session_id)),
1634 agent_id: agent::ZED_AGENT_ID.clone(),
1635 title: if title.is_empty() {
1636 None
1637 } else {
1638 Some(title.to_string().into())
1639 },
1640 updated_at,
1641 created_at: Some(updated_at),
1642 worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
1643 remote_connection: None,
1644 }
1645 }
1646
1647 fn init_test(cx: &mut TestAppContext) {
1648 let fs = FakeFs::new(cx.executor());
1649 cx.update(|cx| {
1650 let settings_store = settings::SettingsStore::test(cx);
1651 cx.set_global(settings_store);
1652 theme_settings::init(theme::LoadThemes::JustBase, cx);
1653 editor::init(cx);
1654 release_channel::init("0.0.0".parse().unwrap(), cx);
1655 prompt_store::init(cx);
1656 <dyn Fs>::set_global(fs, cx);
1657 ThreadMetadataStore::init_global(cx);
1658 ThreadStore::init_global(cx);
1659 language_model::LanguageModelRegistry::test(cx);
1660 });
1661 cx.run_until_parked();
1662 }
1663
1664 fn setup_panel_with_project(
1665 project: Entity<Project>,
1666 cx: &mut TestAppContext,
1667 ) -> (Entity<crate::AgentPanel>, VisualTestContext) {
1668 let multi_workspace =
1669 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1670 let workspace_entity = multi_workspace
1671 .read_with(cx, |mw, _cx| mw.workspace().clone())
1672 .unwrap();
1673 let mut vcx = VisualTestContext::from_window(multi_workspace.into(), cx);
1674 let panel = workspace_entity.update_in(&mut vcx, |workspace, window, cx| {
1675 cx.new(|cx| crate::AgentPanel::new(workspace, None, window, cx))
1676 });
1677 (panel, vcx)
1678 }
1679
1680 fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) {
1681 let kvp = cx.update(|cx| KeyValueStore::global(cx));
1682 smol::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string()))
1683 .unwrap();
1684 }
1685
1686 fn run_thread_metadata_migrations(cx: &mut TestAppContext) {
1687 clear_thread_metadata_remote_connection_backfill(cx);
1688 cx.update(|cx| {
1689 let migration_task = migrate_thread_metadata(cx);
1690 migrate_thread_remote_connections(cx, migration_task);
1691 });
1692 cx.run_until_parked();
1693 }
1694
1695 #[gpui::test]
1696 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1697 let first_paths = PathList::new(&[Path::new("/project-a")]);
1698 let second_paths = PathList::new(&[Path::new("/project-b")]);
1699 let now = Utc::now();
1700 let older = now - chrono::Duration::seconds(1);
1701
1702 let thread = std::thread::current();
1703 let test_name = thread.name().unwrap_or("unknown_test");
1704 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1705 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1706 &db_name,
1707 )));
1708
1709 db.save(make_metadata(
1710 "session-1",
1711 "First Thread",
1712 now,
1713 first_paths.clone(),
1714 ))
1715 .await
1716 .unwrap();
1717 db.save(make_metadata(
1718 "session-2",
1719 "Second Thread",
1720 older,
1721 second_paths.clone(),
1722 ))
1723 .await
1724 .unwrap();
1725
1726 cx.update(|cx| {
1727 let settings_store = settings::SettingsStore::test(cx);
1728 cx.set_global(settings_store);
1729 ThreadMetadataStore::init_global(cx);
1730 });
1731
1732 cx.run_until_parked();
1733
1734 cx.update(|cx| {
1735 let store = ThreadMetadataStore::global(cx);
1736 let store = store.read(cx);
1737
1738 assert_eq!(store.entry_ids().count(), 2);
1739 assert!(
1740 store
1741 .entry_by_session(&acp::SessionId::new("session-1"))
1742 .is_some()
1743 );
1744 assert!(
1745 store
1746 .entry_by_session(&acp::SessionId::new("session-2"))
1747 .is_some()
1748 );
1749
1750 let first_path_entries: Vec<_> = store
1751 .entries_for_path(&first_paths, None)
1752 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1753 .collect();
1754 assert_eq!(first_path_entries, vec!["session-1"]);
1755
1756 let second_path_entries: Vec<_> = store
1757 .entries_for_path(&second_paths, None)
1758 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1759 .collect();
1760 assert_eq!(second_path_entries, vec!["session-2"]);
1761 });
1762 }
1763
1764 #[gpui::test]
1765 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1766 init_test(cx);
1767
1768 let first_paths = PathList::new(&[Path::new("/project-a")]);
1769 let second_paths = PathList::new(&[Path::new("/project-b")]);
1770 let initial_time = Utc::now();
1771 let updated_time = initial_time + chrono::Duration::seconds(1);
1772
1773 let initial_metadata = make_metadata(
1774 "session-1",
1775 "First Thread",
1776 initial_time,
1777 first_paths.clone(),
1778 );
1779 let session1_thread_id = initial_metadata.thread_id;
1780
1781 let second_metadata = make_metadata(
1782 "session-2",
1783 "Second Thread",
1784 initial_time,
1785 second_paths.clone(),
1786 );
1787 let session2_thread_id = second_metadata.thread_id;
1788
1789 cx.update(|cx| {
1790 let store = ThreadMetadataStore::global(cx);
1791 store.update(cx, |store, cx| {
1792 store.save(initial_metadata, cx);
1793 store.save(second_metadata, cx);
1794 });
1795 });
1796
1797 cx.run_until_parked();
1798
1799 cx.update(|cx| {
1800 let store = ThreadMetadataStore::global(cx);
1801 let store = store.read(cx);
1802
1803 let first_path_entries: Vec<_> = store
1804 .entries_for_path(&first_paths, None)
1805 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1806 .collect();
1807 assert_eq!(first_path_entries, vec!["session-1"]);
1808
1809 let second_path_entries: Vec<_> = store
1810 .entries_for_path(&second_paths, None)
1811 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1812 .collect();
1813 assert_eq!(second_path_entries, vec!["session-2"]);
1814 });
1815
1816 let moved_metadata = ThreadMetadata {
1817 thread_id: session1_thread_id,
1818 session_id: Some(acp::SessionId::new("session-1")),
1819 agent_id: agent::ZED_AGENT_ID.clone(),
1820 title: Some("First Thread".into()),
1821 updated_at: updated_time,
1822 created_at: Some(updated_time),
1823 worktree_paths: WorktreePaths::from_folder_paths(&second_paths),
1824 remote_connection: None,
1825 archived: false,
1826 };
1827
1828 cx.update(|cx| {
1829 let store = ThreadMetadataStore::global(cx);
1830 store.update(cx, |store, cx| {
1831 store.save(moved_metadata, cx);
1832 });
1833 });
1834
1835 cx.run_until_parked();
1836
1837 cx.update(|cx| {
1838 let store = ThreadMetadataStore::global(cx);
1839 let store = store.read(cx);
1840
1841 assert_eq!(store.entry_ids().count(), 2);
1842 assert!(
1843 store
1844 .entry_by_session(&acp::SessionId::new("session-1"))
1845 .is_some()
1846 );
1847 assert!(
1848 store
1849 .entry_by_session(&acp::SessionId::new("session-2"))
1850 .is_some()
1851 );
1852
1853 let first_path_entries: Vec<_> = store
1854 .entries_for_path(&first_paths, None)
1855 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1856 .collect();
1857 assert!(first_path_entries.is_empty());
1858
1859 let second_path_entries: Vec<_> = store
1860 .entries_for_path(&second_paths, None)
1861 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1862 .collect();
1863 assert_eq!(second_path_entries.len(), 2);
1864 assert!(second_path_entries.contains(&"session-1".to_string()));
1865 assert!(second_path_entries.contains(&"session-2".to_string()));
1866 });
1867
1868 cx.update(|cx| {
1869 let store = ThreadMetadataStore::global(cx);
1870 store.update(cx, |store, cx| {
1871 store.delete(session2_thread_id, cx);
1872 });
1873 });
1874
1875 cx.run_until_parked();
1876
1877 cx.update(|cx| {
1878 let store = ThreadMetadataStore::global(cx);
1879 let store = store.read(cx);
1880
1881 assert_eq!(store.entry_ids().count(), 1);
1882
1883 let second_path_entries: Vec<_> = store
1884 .entries_for_path(&second_paths, None)
1885 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1886 .collect();
1887 assert_eq!(second_path_entries, vec!["session-1"]);
1888 });
1889 }
1890
1891 #[gpui::test]
1892 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1893 init_test(cx);
1894
1895 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1896 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1897 let now = Utc::now();
1898
1899 let existing_metadata = ThreadMetadata {
1900 thread_id: ThreadId::new(),
1901 session_id: Some(acp::SessionId::new("a-session-0")),
1902 agent_id: agent::ZED_AGENT_ID.clone(),
1903 title: Some("Existing Metadata".into()),
1904 updated_at: now - chrono::Duration::seconds(10),
1905 created_at: Some(now - chrono::Duration::seconds(10)),
1906 worktree_paths: WorktreePaths::from_folder_paths(&project_a_paths),
1907 remote_connection: None,
1908 archived: false,
1909 };
1910
1911 cx.update(|cx| {
1912 let store = ThreadMetadataStore::global(cx);
1913 store.update(cx, |store, cx| {
1914 store.save(existing_metadata, cx);
1915 });
1916 });
1917 cx.run_until_parked();
1918
1919 let threads_to_save = vec![
1920 (
1921 "a-session-0",
1922 "Thread A0 From Native Store",
1923 project_a_paths.clone(),
1924 now,
1925 ),
1926 (
1927 "a-session-1",
1928 "Thread A1",
1929 project_a_paths.clone(),
1930 now + chrono::Duration::seconds(1),
1931 ),
1932 (
1933 "b-session-0",
1934 "Thread B0",
1935 project_b_paths.clone(),
1936 now + chrono::Duration::seconds(2),
1937 ),
1938 (
1939 "projectless",
1940 "Projectless",
1941 PathList::default(),
1942 now + chrono::Duration::seconds(3),
1943 ),
1944 ];
1945
1946 for (session_id, title, paths, updated_at) in &threads_to_save {
1947 let save_task = cx.update(|cx| {
1948 let thread_store = ThreadStore::global(cx);
1949 let session_id = session_id.to_string();
1950 let title = title.to_string();
1951 let paths = paths.clone();
1952 thread_store.update(cx, |store, cx| {
1953 store.save_thread(
1954 acp::SessionId::new(session_id),
1955 make_db_thread(&title, *updated_at),
1956 paths,
1957 cx,
1958 )
1959 })
1960 });
1961 save_task.await.unwrap();
1962 cx.run_until_parked();
1963 }
1964
1965 run_thread_metadata_migrations(cx);
1966
1967 let list = cx.update(|cx| {
1968 let store = ThreadMetadataStore::global(cx);
1969 store.read(cx).entries().cloned().collect::<Vec<_>>()
1970 });
1971
1972 assert_eq!(list.len(), 4);
1973 assert!(
1974 list.iter()
1975 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1976 );
1977
1978 let existing_metadata = list
1979 .iter()
1980 .find(|metadata| {
1981 metadata
1982 .session_id
1983 .as_ref()
1984 .is_some_and(|s| s.0.as_ref() == "a-session-0")
1985 })
1986 .unwrap();
1987 assert_eq!(existing_metadata.display_title(), "Existing Metadata");
1988 assert!(!existing_metadata.archived);
1989
1990 let migrated_session_ids: Vec<_> = list
1991 .iter()
1992 .filter_map(|metadata| metadata.session_id.as_ref().map(|s| s.0.to_string()))
1993 .collect();
1994 assert!(migrated_session_ids.iter().any(|s| s == "a-session-1"));
1995 assert!(migrated_session_ids.iter().any(|s| s == "b-session-0"));
1996 assert!(migrated_session_ids.iter().any(|s| s == "projectless"));
1997
1998 let migrated_entries: Vec<_> = list
1999 .iter()
2000 .filter(|metadata| {
2001 !metadata
2002 .session_id
2003 .as_ref()
2004 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2005 })
2006 .collect();
2007 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
2008 }
2009
2010 #[gpui::test]
2011 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
2012 cx: &mut TestAppContext,
2013 ) {
2014 init_test(cx);
2015
2016 let project_paths = PathList::new(&[Path::new("/project-a")]);
2017 let existing_updated_at = Utc::now();
2018
2019 let existing_metadata = ThreadMetadata {
2020 thread_id: ThreadId::new(),
2021 session_id: Some(acp::SessionId::new("existing-session")),
2022 agent_id: agent::ZED_AGENT_ID.clone(),
2023 title: Some("Existing Metadata".into()),
2024 updated_at: existing_updated_at,
2025 created_at: Some(existing_updated_at),
2026 worktree_paths: WorktreePaths::from_folder_paths(&project_paths),
2027 remote_connection: None,
2028 archived: false,
2029 };
2030
2031 cx.update(|cx| {
2032 let store = ThreadMetadataStore::global(cx);
2033 store.update(cx, |store, cx| {
2034 store.save(existing_metadata, cx);
2035 });
2036 });
2037 cx.run_until_parked();
2038
2039 let save_task = cx.update(|cx| {
2040 let thread_store = ThreadStore::global(cx);
2041 thread_store.update(cx, |store, cx| {
2042 store.save_thread(
2043 acp::SessionId::new("existing-session"),
2044 make_db_thread(
2045 "Updated Native Thread Title",
2046 existing_updated_at + chrono::Duration::seconds(1),
2047 ),
2048 project_paths.clone(),
2049 cx,
2050 )
2051 })
2052 });
2053 save_task.await.unwrap();
2054 cx.run_until_parked();
2055
2056 run_thread_metadata_migrations(cx);
2057
2058 let list = cx.update(|cx| {
2059 let store = ThreadMetadataStore::global(cx);
2060 store.read(cx).entries().cloned().collect::<Vec<_>>()
2061 });
2062
2063 assert_eq!(list.len(), 1);
2064 assert_eq!(
2065 list[0].session_id.as_ref().unwrap().0.as_ref(),
2066 "existing-session"
2067 );
2068 }
2069
2070 #[gpui::test]
2071 async fn test_migrate_thread_remote_connections_backfills_from_workspace_db(
2072 cx: &mut TestAppContext,
2073 ) {
2074 init_test(cx);
2075
2076 let folder_paths = PathList::new(&[Path::new("/remote-project")]);
2077 let updated_at = Utc::now();
2078 let metadata = make_metadata(
2079 "remote-session",
2080 "Remote Thread",
2081 updated_at,
2082 folder_paths.clone(),
2083 );
2084
2085 cx.update(|cx| {
2086 let store = ThreadMetadataStore::global(cx);
2087 store.update(cx, |store, cx| {
2088 store.save(metadata, cx);
2089 });
2090 });
2091 cx.run_until_parked();
2092
2093 let workspace_db = cx.update(|cx| WorkspaceDb::global(cx));
2094 let workspace_id = workspace_db.next_id().await.unwrap();
2095 let serialized_paths = folder_paths.serialize();
2096 let remote_connection_id = 1_i64;
2097 workspace_db
2098 .write(move |conn| {
2099 let mut stmt = Statement::prepare(
2100 conn,
2101 "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)",
2102 )?;
2103 let mut next_index = stmt.bind(&remote_connection_id, 1)?;
2104 next_index = stmt.bind(&"wsl", next_index)?;
2105 next_index = stmt.bind(&Some("anth".to_string()), next_index)?;
2106 stmt.bind(&Some("Ubuntu".to_string()), next_index)?;
2107 stmt.exec()?;
2108
2109 let mut stmt = Statement::prepare(
2110 conn,
2111 "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1",
2112 )?;
2113 let mut next_index = stmt.bind(&workspace_id, 1)?;
2114 next_index = stmt.bind(&serialized_paths.paths, next_index)?;
2115 next_index = stmt.bind(&serialized_paths.order, next_index)?;
2116 stmt.bind(&Some(remote_connection_id as i32), next_index)?;
2117 stmt.exec()
2118 })
2119 .await
2120 .unwrap();
2121
2122 clear_thread_metadata_remote_connection_backfill(cx);
2123 cx.update(|cx| {
2124 migrate_thread_remote_connections(cx, Task::ready(Ok(())));
2125 });
2126 cx.run_until_parked();
2127
2128 let metadata = cx.update(|cx| {
2129 let store = ThreadMetadataStore::global(cx);
2130 store
2131 .read(cx)
2132 .entry_by_session(&acp::SessionId::new("remote-session"))
2133 .cloned()
2134 .expect("expected migrated metadata row")
2135 });
2136
2137 assert_eq!(
2138 metadata.remote_connection,
2139 Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
2140 distro_name: "Ubuntu".to_string(),
2141 user: Some("anth".to_string()),
2142 }))
2143 );
2144 }
2145
2146 #[gpui::test]
2147 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
2148 cx: &mut TestAppContext,
2149 ) {
2150 init_test(cx);
2151
2152 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
2153 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
2154 let now = Utc::now();
2155
2156 // Create 7 threads for project A and 3 for project B
2157 let mut threads_to_save = Vec::new();
2158 for i in 0..7 {
2159 threads_to_save.push((
2160 format!("a-session-{i}"),
2161 format!("Thread A{i}"),
2162 project_a_paths.clone(),
2163 now + chrono::Duration::seconds(i as i64),
2164 ));
2165 }
2166 for i in 0..3 {
2167 threads_to_save.push((
2168 format!("b-session-{i}"),
2169 format!("Thread B{i}"),
2170 project_b_paths.clone(),
2171 now + chrono::Duration::seconds(i as i64),
2172 ));
2173 }
2174
2175 for (session_id, title, paths, updated_at) in &threads_to_save {
2176 let save_task = cx.update(|cx| {
2177 let thread_store = ThreadStore::global(cx);
2178 let session_id = session_id.to_string();
2179 let title = title.to_string();
2180 let paths = paths.clone();
2181 thread_store.update(cx, |store, cx| {
2182 store.save_thread(
2183 acp::SessionId::new(session_id),
2184 make_db_thread(&title, *updated_at),
2185 paths,
2186 cx,
2187 )
2188 })
2189 });
2190 save_task.await.unwrap();
2191 cx.run_until_parked();
2192 }
2193
2194 run_thread_metadata_migrations(cx);
2195
2196 let list = cx.update(|cx| {
2197 let store = ThreadMetadataStore::global(cx);
2198 store.read(cx).entries().cloned().collect::<Vec<_>>()
2199 });
2200
2201 assert_eq!(list.len(), 10);
2202
2203 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
2204 let mut project_a_entries: Vec<_> = list
2205 .iter()
2206 .filter(|m| *m.folder_paths() == project_a_paths)
2207 .collect();
2208 assert_eq!(project_a_entries.len(), 7);
2209 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
2210
2211 for entry in &project_a_entries[..5] {
2212 assert!(
2213 !entry.archived,
2214 "Expected {:?} to be unarchived (top 5 most recent)",
2215 entry.session_id
2216 );
2217 }
2218 for entry in &project_a_entries[5..] {
2219 assert!(
2220 entry.archived,
2221 "Expected {:?} to be archived (older than top 5)",
2222 entry.session_id
2223 );
2224 }
2225
2226 // Project B: all 3 should be unarchived (under the limit)
2227 let project_b_entries: Vec<_> = list
2228 .iter()
2229 .filter(|m| *m.folder_paths() == project_b_paths)
2230 .collect();
2231 assert_eq!(project_b_entries.len(), 3);
2232 assert!(project_b_entries.iter().all(|m| !m.archived));
2233 }
2234
2235 #[gpui::test]
2236 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
2237 init_test(cx);
2238
2239 let fs = FakeFs::new(cx.executor());
2240 let project = Project::test(fs, None::<&Path>, cx).await;
2241 let connection = StubAgentConnection::new();
2242
2243 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2244 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2245
2246 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2247 let session_id = thread.read_with(&vcx, |t, _| t.session_id().clone());
2248 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
2249
2250 // Draft threads no longer create metadata entries.
2251 cx.read(|cx| {
2252 let store = ThreadMetadataStore::global(cx).read(cx);
2253 assert_eq!(store.entry_ids().count(), 0);
2254 });
2255
2256 // Setting a title on an empty thread should be ignored by the
2257 // event handler (entries are empty), so no metadata is created.
2258 thread.update_in(&mut vcx, |thread, _window, cx| {
2259 thread.set_title("Draft Thread".into(), cx).detach();
2260 });
2261 vcx.run_until_parked();
2262
2263 cx.read(|cx| {
2264 let store = ThreadMetadataStore::global(cx).read(cx);
2265 assert_eq!(
2266 store.entry_ids().count(),
2267 0,
2268 "expected title updates on empty thread to not create metadata"
2269 );
2270 });
2271
2272 // Pushing content makes entries non-empty, so the event handler
2273 // should now update metadata with the real session_id.
2274 thread.update_in(&mut vcx, |thread, _window, cx| {
2275 thread.push_user_content_block(None, "Hello".into(), cx);
2276 });
2277 vcx.run_until_parked();
2278
2279 cx.read(|cx| {
2280 let store = ThreadMetadataStore::global(cx).read(cx);
2281 assert_eq!(store.entry_ids().count(), 1);
2282 assert_eq!(
2283 store.entry(thread_id).unwrap().session_id.as_ref(),
2284 Some(&session_id),
2285 );
2286 });
2287 }
2288
2289 #[gpui::test]
2290 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
2291 init_test(cx);
2292
2293 let fs = FakeFs::new(cx.executor());
2294 let project = Project::test(fs, None::<&Path>, cx).await;
2295 let connection = StubAgentConnection::new();
2296
2297 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2298 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2299
2300 let session_id = crate::test_support::active_session_id(&panel, &vcx);
2301 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2302
2303 thread.update_in(&mut vcx, |thread, _window, cx| {
2304 thread.push_user_content_block(None, "Hello".into(), cx);
2305 });
2306 vcx.run_until_parked();
2307
2308 cx.read(|cx| {
2309 let store = ThreadMetadataStore::global(cx).read(cx);
2310 assert_eq!(store.entry_ids().count(), 1);
2311 assert!(store.entry_by_session(&session_id).is_some());
2312 });
2313
2314 // Dropping the panel releases the ConversationView and its thread.
2315 drop(panel);
2316 cx.update(|_| {});
2317 cx.run_until_parked();
2318
2319 cx.read(|cx| {
2320 let store = ThreadMetadataStore::global(cx).read(cx);
2321 assert_eq!(store.entry_ids().count(), 1);
2322 assert!(store.entry_by_session(&session_id).is_some());
2323 });
2324 }
2325
2326 #[gpui::test]
2327 async fn test_threads_without_project_association_are_archived_by_default(
2328 cx: &mut TestAppContext,
2329 ) {
2330 init_test(cx);
2331
2332 let fs = FakeFs::new(cx.executor());
2333 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
2334 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
2335
2336 // Thread in project without worktree
2337 let (panel_no_wt, mut vcx_no_wt) = setup_panel_with_project(project_without_worktree, cx);
2338 crate::test_support::open_thread_with_connection(
2339 &panel_no_wt,
2340 StubAgentConnection::new(),
2341 &mut vcx_no_wt,
2342 );
2343 let thread_no_wt = panel_no_wt.read_with(&vcx_no_wt, |panel, cx| {
2344 panel.active_agent_thread(cx).unwrap()
2345 });
2346 thread_no_wt.update_in(&mut vcx_no_wt, |thread, _window, cx| {
2347 thread.push_user_content_block(None, "content".into(), cx);
2348 thread.set_title("No Project Thread".into(), cx).detach();
2349 });
2350 vcx_no_wt.run_until_parked();
2351 let session_without_worktree =
2352 crate::test_support::active_session_id(&panel_no_wt, &vcx_no_wt);
2353
2354 // Thread in project with worktree
2355 let (panel_wt, mut vcx_wt) = setup_panel_with_project(project_with_worktree, cx);
2356 crate::test_support::open_thread_with_connection(
2357 &panel_wt,
2358 StubAgentConnection::new(),
2359 &mut vcx_wt,
2360 );
2361 let thread_wt =
2362 panel_wt.read_with(&vcx_wt, |panel, cx| panel.active_agent_thread(cx).unwrap());
2363 thread_wt.update_in(&mut vcx_wt, |thread, _window, cx| {
2364 thread.push_user_content_block(None, "content".into(), cx);
2365 thread.set_title("Project Thread".into(), cx).detach();
2366 });
2367 vcx_wt.run_until_parked();
2368 let session_with_worktree = crate::test_support::active_session_id(&panel_wt, &vcx_wt);
2369
2370 cx.update(|cx| {
2371 let store = ThreadMetadataStore::global(cx);
2372 let store = store.read(cx);
2373
2374 let without_worktree = store
2375 .entry_by_session(&session_without_worktree)
2376 .expect("missing metadata for thread without project association");
2377 assert!(without_worktree.folder_paths().is_empty());
2378 assert!(
2379 without_worktree.archived,
2380 "expected thread without project association to be archived"
2381 );
2382
2383 let with_worktree = store
2384 .entry_by_session(&session_with_worktree)
2385 .expect("missing metadata for thread with project association");
2386 assert_eq!(
2387 *with_worktree.folder_paths(),
2388 PathList::new(&[Path::new("/project-a")])
2389 );
2390 assert!(
2391 !with_worktree.archived,
2392 "expected thread with project association to remain unarchived"
2393 );
2394 });
2395 }
2396
2397 #[gpui::test]
2398 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
2399 init_test(cx);
2400
2401 let fs = FakeFs::new(cx.executor());
2402 let project = Project::test(fs, None::<&Path>, cx).await;
2403 let connection = Rc::new(StubAgentConnection::new());
2404
2405 // Create a regular (non-subagent) thread through the panel.
2406 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
2407 crate::test_support::open_thread_with_connection(&panel, (*connection).clone(), &mut vcx);
2408
2409 let regular_thread =
2410 panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2411 let regular_session_id = regular_thread.read_with(&vcx, |t, _| t.session_id().clone());
2412
2413 regular_thread.update_in(&mut vcx, |thread, _window, cx| {
2414 thread.push_user_content_block(None, "content".into(), cx);
2415 thread.set_title("Regular Thread".into(), cx).detach();
2416 });
2417 vcx.run_until_parked();
2418
2419 // Create a standalone subagent AcpThread (not wrapped in a
2420 // ConversationView). The ThreadMetadataStore only observes
2421 // ConversationView events, so this thread's events should
2422 // have no effect on sidebar metadata.
2423 let subagent_session_id = acp::SessionId::new("subagent-session");
2424 let subagent_thread = cx.update(|cx| {
2425 let action_log = cx.new(|_| ActionLog::new(project.clone()));
2426 cx.new(|cx| {
2427 acp_thread::AcpThread::new(
2428 Some(regular_session_id.clone()),
2429 Some("Subagent Thread".into()),
2430 None,
2431 connection.clone(),
2432 project.clone(),
2433 action_log,
2434 subagent_session_id.clone(),
2435 watch::Receiver::constant(acp::PromptCapabilities::new()),
2436 cx,
2437 )
2438 })
2439 });
2440
2441 cx.update(|cx| {
2442 subagent_thread.update(cx, |thread, cx| {
2443 thread
2444 .set_title("Subagent Thread Title".into(), cx)
2445 .detach();
2446 });
2447 });
2448 cx.run_until_parked();
2449
2450 // Only the regular thread should appear in sidebar metadata.
2451 // The subagent thread is excluded because the metadata store
2452 // only observes ConversationView events.
2453 let list = cx.update(|cx| {
2454 let store = ThreadMetadataStore::global(cx);
2455 store.read(cx).entries().cloned().collect::<Vec<_>>()
2456 });
2457
2458 assert_eq!(
2459 list.len(),
2460 1,
2461 "Expected only the regular thread in sidebar metadata, \
2462 but found {} entries (subagent threads are leaking into the sidebar)",
2463 list.len(),
2464 );
2465 assert_eq!(list[0].session_id.as_ref().unwrap(), ®ular_session_id);
2466 assert_eq!(list[0].display_title(), "Regular Thread");
2467 }
2468
2469 #[test]
2470 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
2471 let now = Utc::now();
2472
2473 let meta = make_metadata("session-1", "First Thread", now, PathList::default());
2474 let thread_id = meta.thread_id;
2475 let operations = vec![DbOperation::Upsert(meta), DbOperation::Delete(thread_id)];
2476
2477 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
2478
2479 assert_eq!(deduped.len(), 1);
2480 assert_eq!(deduped[0], DbOperation::Delete(thread_id));
2481 }
2482
2483 #[test]
2484 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
2485 let now = Utc::now();
2486 let later = now + chrono::Duration::seconds(1);
2487
2488 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
2489 let shared_thread_id = old_metadata.thread_id;
2490 let new_metadata = ThreadMetadata {
2491 thread_id: shared_thread_id,
2492 ..make_metadata("session-1", "New Title", later, PathList::default())
2493 };
2494
2495 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2496 DbOperation::Upsert(old_metadata),
2497 DbOperation::Upsert(new_metadata.clone()),
2498 ]);
2499
2500 assert_eq!(deduped.len(), 1);
2501 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
2502 }
2503
2504 #[test]
2505 fn test_dedup_db_operations_preserves_distinct_sessions() {
2506 let now = Utc::now();
2507
2508 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
2509 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
2510 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2511 DbOperation::Upsert(metadata1.clone()),
2512 DbOperation::Upsert(metadata2.clone()),
2513 ]);
2514
2515 assert_eq!(deduped.len(), 2);
2516 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
2517 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
2518 }
2519
2520 #[gpui::test]
2521 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
2522 init_test(cx);
2523
2524 let paths = PathList::new(&[Path::new("/project-a")]);
2525 let now = Utc::now();
2526 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2527 let thread_id = metadata.thread_id;
2528
2529 cx.update(|cx| {
2530 let store = ThreadMetadataStore::global(cx);
2531 store.update(cx, |store, cx| {
2532 store.save(metadata, cx);
2533 });
2534 });
2535
2536 cx.run_until_parked();
2537
2538 cx.update(|cx| {
2539 let store = ThreadMetadataStore::global(cx);
2540 let store = store.read(cx);
2541
2542 let path_entries: Vec<_> = store
2543 .entries_for_path(&paths, None)
2544 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2545 .collect();
2546 assert_eq!(path_entries, vec!["session-1"]);
2547
2548 assert_eq!(store.archived_entries().count(), 0);
2549 });
2550
2551 cx.update(|cx| {
2552 let store = ThreadMetadataStore::global(cx);
2553 store.update(cx, |store, cx| {
2554 store.archive(thread_id, None, cx);
2555 });
2556 });
2557
2558 // Thread 1 should now be archived
2559 cx.run_until_parked();
2560
2561 cx.update(|cx| {
2562 let store = ThreadMetadataStore::global(cx);
2563 let store = store.read(cx);
2564
2565 let path_entries: Vec<_> = store
2566 .entries_for_path(&paths, None)
2567 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2568 .collect();
2569 assert!(path_entries.is_empty());
2570
2571 let archived: Vec<_> = store.archived_entries().collect();
2572 assert_eq!(archived.len(), 1);
2573 assert_eq!(
2574 archived[0].session_id.as_ref().unwrap().0.as_ref(),
2575 "session-1"
2576 );
2577 assert!(archived[0].archived);
2578 });
2579
2580 cx.update(|cx| {
2581 let store = ThreadMetadataStore::global(cx);
2582 store.update(cx, |store, cx| {
2583 store.unarchive(thread_id, cx);
2584 });
2585 });
2586
2587 cx.run_until_parked();
2588
2589 cx.update(|cx| {
2590 let store = ThreadMetadataStore::global(cx);
2591 let store = store.read(cx);
2592
2593 let path_entries: Vec<_> = store
2594 .entries_for_path(&paths, None)
2595 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2596 .collect();
2597 assert_eq!(path_entries, vec!["session-1"]);
2598
2599 assert_eq!(store.archived_entries().count(), 0);
2600 });
2601 }
2602
2603 #[gpui::test]
2604 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2605 init_test(cx);
2606
2607 let paths = PathList::new(&[Path::new("/project-a")]);
2608 let now = Utc::now();
2609
2610 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2611 let metadata2 = make_metadata(
2612 "session-2",
2613 "Archived Thread",
2614 now - chrono::Duration::seconds(1),
2615 paths.clone(),
2616 );
2617 let session2_thread_id = metadata2.thread_id;
2618
2619 cx.update(|cx| {
2620 let store = ThreadMetadataStore::global(cx);
2621 store.update(cx, |store, cx| {
2622 store.save(metadata1, cx);
2623 store.save(metadata2, cx);
2624 });
2625 });
2626
2627 cx.run_until_parked();
2628
2629 cx.update(|cx| {
2630 let store = ThreadMetadataStore::global(cx);
2631 store.update(cx, |store, cx| {
2632 store.archive(session2_thread_id, None, cx);
2633 });
2634 });
2635
2636 cx.run_until_parked();
2637
2638 cx.update(|cx| {
2639 let store = ThreadMetadataStore::global(cx);
2640 let store = store.read(cx);
2641
2642 let path_entries: Vec<_> = store
2643 .entries_for_path(&paths, None)
2644 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2645 .collect();
2646 assert_eq!(path_entries, vec!["session-1"]);
2647
2648 assert_eq!(store.entries().count(), 2);
2649
2650 let archived: Vec<_> = store
2651 .archived_entries()
2652 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2653 .collect();
2654 assert_eq!(archived, vec!["session-2"]);
2655 });
2656 }
2657
2658 #[gpui::test]
2659 async fn test_entries_filter_by_remote_connection(cx: &mut TestAppContext) {
2660 init_test(cx);
2661
2662 let main_paths = PathList::new(&[Path::new("/project-a")]);
2663 let linked_paths = PathList::new(&[Path::new("/wt-feature")]);
2664 let now = Utc::now();
2665
2666 let remote_a = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 1 });
2667 let remote_b = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 2 });
2668
2669 // Three threads at the same folder_paths but different hosts.
2670 let local_thread = make_metadata("local-session", "Local Thread", now, main_paths.clone());
2671
2672 let mut remote_a_thread = make_metadata(
2673 "remote-a-session",
2674 "Remote A Thread",
2675 now - chrono::Duration::seconds(1),
2676 main_paths.clone(),
2677 );
2678 remote_a_thread.remote_connection = Some(remote_a.clone());
2679
2680 let mut remote_b_thread = make_metadata(
2681 "remote-b-session",
2682 "Remote B Thread",
2683 now - chrono::Duration::seconds(2),
2684 main_paths.clone(),
2685 );
2686 remote_b_thread.remote_connection = Some(remote_b.clone());
2687
2688 let linked_worktree_paths =
2689 WorktreePaths::from_path_lists(main_paths.clone(), linked_paths).unwrap();
2690
2691 let local_linked_thread = ThreadMetadata {
2692 thread_id: ThreadId::new(),
2693 archived: false,
2694 session_id: Some(acp::SessionId::new("local-linked")),
2695 agent_id: agent::ZED_AGENT_ID.clone(),
2696 title: Some("Local Linked".into()),
2697 updated_at: now,
2698 created_at: Some(now),
2699 worktree_paths: linked_worktree_paths.clone(),
2700 remote_connection: None,
2701 };
2702
2703 let remote_linked_thread = ThreadMetadata {
2704 thread_id: ThreadId::new(),
2705 archived: false,
2706 session_id: Some(acp::SessionId::new("remote-linked")),
2707 agent_id: agent::ZED_AGENT_ID.clone(),
2708 title: Some("Remote Linked".into()),
2709 updated_at: now - chrono::Duration::seconds(1),
2710 created_at: Some(now - chrono::Duration::seconds(1)),
2711 worktree_paths: linked_worktree_paths,
2712 remote_connection: Some(remote_a.clone()),
2713 };
2714
2715 cx.update(|cx| {
2716 let store = ThreadMetadataStore::global(cx);
2717 store.update(cx, |store, cx| {
2718 store.save(local_thread, cx);
2719 store.save(remote_a_thread, cx);
2720 store.save(remote_b_thread, cx);
2721 store.save(local_linked_thread, cx);
2722 store.save(remote_linked_thread, cx);
2723 });
2724 });
2725 cx.run_until_parked();
2726
2727 cx.update(|cx| {
2728 let store = ThreadMetadataStore::global(cx);
2729 let store = store.read(cx);
2730
2731 let local_entries: Vec<_> = store
2732 .entries_for_path(&main_paths, None)
2733 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2734 .collect();
2735 assert_eq!(local_entries, vec!["local-session"]);
2736
2737 let remote_a_entries: Vec<_> = store
2738 .entries_for_path(&main_paths, Some(&remote_a))
2739 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2740 .collect();
2741 assert_eq!(remote_a_entries, vec!["remote-a-session"]);
2742
2743 let remote_b_entries: Vec<_> = store
2744 .entries_for_path(&main_paths, Some(&remote_b))
2745 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2746 .collect();
2747 assert_eq!(remote_b_entries, vec!["remote-b-session"]);
2748
2749 let mut local_main_entries: Vec<_> = store
2750 .entries_for_main_worktree_path(&main_paths, None)
2751 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2752 .collect();
2753 local_main_entries.sort();
2754 assert_eq!(local_main_entries, vec!["local-linked", "local-session"]);
2755
2756 let mut remote_main_entries: Vec<_> = store
2757 .entries_for_main_worktree_path(&main_paths, Some(&remote_a))
2758 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2759 .collect();
2760 remote_main_entries.sort();
2761 assert_eq!(
2762 remote_main_entries,
2763 vec!["remote-a-session", "remote-linked"]
2764 );
2765 });
2766 }
2767
2768 #[gpui::test]
2769 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2770 init_test(cx);
2771
2772 let paths = PathList::new(&[Path::new("/project-a")]);
2773 let now = Utc::now();
2774
2775 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2776 let m2 = make_metadata(
2777 "session-2",
2778 "Thread Two",
2779 now - chrono::Duration::seconds(1),
2780 paths.clone(),
2781 );
2782 let m3 = make_metadata(
2783 "session-3",
2784 "Thread Three",
2785 now - chrono::Duration::seconds(2),
2786 paths,
2787 );
2788
2789 cx.update(|cx| {
2790 let store = ThreadMetadataStore::global(cx);
2791 store.update(cx, |store, cx| {
2792 store.save_all(vec![m1, m2, m3], cx);
2793 });
2794 });
2795
2796 cx.run_until_parked();
2797
2798 cx.update(|cx| {
2799 let store = ThreadMetadataStore::global(cx);
2800 let store = store.read(cx);
2801
2802 assert_eq!(store.entries().count(), 3);
2803 assert!(
2804 store
2805 .entry_by_session(&acp::SessionId::new("session-1"))
2806 .is_some()
2807 );
2808 assert!(
2809 store
2810 .entry_by_session(&acp::SessionId::new("session-2"))
2811 .is_some()
2812 );
2813 assert!(
2814 store
2815 .entry_by_session(&acp::SessionId::new("session-3"))
2816 .is_some()
2817 );
2818
2819 assert_eq!(store.entry_ids().count(), 3);
2820 });
2821 }
2822
2823 #[gpui::test]
2824 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2825 init_test(cx);
2826
2827 let paths = PathList::new(&[Path::new("/project-a")]);
2828 let now = Utc::now();
2829 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2830 let thread_id = metadata.thread_id;
2831
2832 cx.update(|cx| {
2833 let store = ThreadMetadataStore::global(cx);
2834 store.update(cx, |store, cx| {
2835 store.save(metadata, cx);
2836 });
2837 });
2838
2839 cx.run_until_parked();
2840
2841 cx.update(|cx| {
2842 let store = ThreadMetadataStore::global(cx);
2843 store.update(cx, |store, cx| {
2844 store.archive(thread_id, None, cx);
2845 });
2846 });
2847
2848 cx.run_until_parked();
2849
2850 cx.update(|cx| {
2851 let store = ThreadMetadataStore::global(cx);
2852 store.update(cx, |store, cx| {
2853 let _ = store.reload(cx);
2854 });
2855 });
2856
2857 cx.run_until_parked();
2858
2859 cx.update(|cx| {
2860 let store = ThreadMetadataStore::global(cx);
2861 let store = store.read(cx);
2862
2863 let thread = store
2864 .entry_by_session(&acp::SessionId::new("session-1"))
2865 .expect("thread should exist after reload");
2866 assert!(thread.archived);
2867
2868 let path_entries: Vec<_> = store
2869 .entries_for_path(&paths, None)
2870 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2871 .collect();
2872 assert!(path_entries.is_empty());
2873
2874 let archived: Vec<_> = store
2875 .archived_entries()
2876 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2877 .collect();
2878 assert_eq!(archived, vec!["session-1"]);
2879 });
2880 }
2881
2882 #[gpui::test]
2883 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2884 init_test(cx);
2885
2886 cx.run_until_parked();
2887
2888 cx.update(|cx| {
2889 let store = ThreadMetadataStore::global(cx);
2890 store.update(cx, |store, cx| {
2891 store.archive(ThreadId::new(), None, cx);
2892 });
2893 });
2894
2895 cx.run_until_parked();
2896
2897 cx.update(|cx| {
2898 let store = ThreadMetadataStore::global(cx);
2899 let store = store.read(cx);
2900
2901 assert!(store.is_empty());
2902 assert_eq!(store.entries().count(), 0);
2903 assert_eq!(store.archived_entries().count(), 0);
2904 });
2905 }
2906
2907 #[gpui::test]
2908 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2909 init_test(cx);
2910
2911 let paths = PathList::new(&[Path::new("/project-a")]);
2912 let now = Utc::now();
2913 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2914 let thread_id = metadata.thread_id;
2915
2916 cx.update(|cx| {
2917 let store = ThreadMetadataStore::global(cx);
2918 store.update(cx, |store, cx| {
2919 store.save(metadata.clone(), cx);
2920 store.archive(thread_id, None, cx);
2921 });
2922 });
2923
2924 cx.run_until_parked();
2925
2926 cx.update(|cx| {
2927 let store = ThreadMetadataStore::global(cx);
2928 let store = store.read(cx);
2929
2930 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2931 pretty_assertions::assert_eq!(
2932 entries,
2933 vec![ThreadMetadata {
2934 archived: true,
2935 ..metadata
2936 }]
2937 );
2938 });
2939 }
2940
2941 #[gpui::test]
2942 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2943 init_test(cx);
2944 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2945
2946 let id = store
2947 .read_with(cx, |store, cx| {
2948 store.create_archived_worktree(
2949 "/tmp/worktree".to_string(),
2950 "/home/user/repo".to_string(),
2951 Some("feature-branch".to_string()),
2952 "staged_aaa".to_string(),
2953 "unstaged_bbb".to_string(),
2954 "original_000".to_string(),
2955 cx,
2956 )
2957 })
2958 .await
2959 .unwrap();
2960
2961 let thread_id_1 = ThreadId::new();
2962
2963 store
2964 .read_with(cx, |store, cx| {
2965 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
2966 })
2967 .await
2968 .unwrap();
2969
2970 let worktrees = store
2971 .read_with(cx, |store, cx| {
2972 store.get_archived_worktrees_for_thread(thread_id_1, cx)
2973 })
2974 .await
2975 .unwrap();
2976
2977 assert_eq!(worktrees.len(), 1);
2978 let wt = &worktrees[0];
2979 assert_eq!(wt.id, id);
2980 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2981 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2982 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2983 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2984 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2985 assert_eq!(wt.original_commit_hash, "original_000");
2986 }
2987
2988 #[gpui::test]
2989 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2990 init_test(cx);
2991 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2992
2993 let id = store
2994 .read_with(cx, |store, cx| {
2995 store.create_archived_worktree(
2996 "/tmp/worktree".to_string(),
2997 "/home/user/repo".to_string(),
2998 Some("main".to_string()),
2999 "deadbeef".to_string(),
3000 "deadbeef".to_string(),
3001 "original_000".to_string(),
3002 cx,
3003 )
3004 })
3005 .await
3006 .unwrap();
3007
3008 let thread_id_1 = ThreadId::new();
3009
3010 store
3011 .read_with(cx, |store, cx| {
3012 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3013 })
3014 .await
3015 .unwrap();
3016
3017 store
3018 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
3019 .await
3020 .unwrap();
3021
3022 let worktrees = store
3023 .read_with(cx, |store, cx| {
3024 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3025 })
3026 .await
3027 .unwrap();
3028 assert!(worktrees.is_empty());
3029 }
3030
3031 #[gpui::test]
3032 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
3033 init_test(cx);
3034 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3035
3036 let id = store
3037 .read_with(cx, |store, cx| {
3038 store.create_archived_worktree(
3039 "/tmp/worktree".to_string(),
3040 "/home/user/repo".to_string(),
3041 None,
3042 "abc123".to_string(),
3043 "abc123".to_string(),
3044 "original_000".to_string(),
3045 cx,
3046 )
3047 })
3048 .await
3049 .unwrap();
3050
3051 let thread_id_1 = ThreadId::new();
3052 let thread_id_2 = ThreadId::new();
3053
3054 store
3055 .read_with(cx, |store, cx| {
3056 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3057 })
3058 .await
3059 .unwrap();
3060
3061 store
3062 .read_with(cx, |store, cx| {
3063 store.link_thread_to_archived_worktree(thread_id_2, id, cx)
3064 })
3065 .await
3066 .unwrap();
3067
3068 let wt1 = store
3069 .read_with(cx, |store, cx| {
3070 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3071 })
3072 .await
3073 .unwrap();
3074
3075 let wt2 = store
3076 .read_with(cx, |store, cx| {
3077 store.get_archived_worktrees_for_thread(thread_id_2, cx)
3078 })
3079 .await
3080 .unwrap();
3081
3082 assert_eq!(wt1.len(), 1);
3083 assert_eq!(wt2.len(), 1);
3084 assert_eq!(wt1[0].id, wt2[0].id);
3085 }
3086
3087 #[gpui::test]
3088 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
3089 init_test(cx);
3090 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3091
3092 let original_paths = PathList::new(&[
3093 Path::new("/projects/worktree-a"),
3094 Path::new("/projects/worktree-b"),
3095 Path::new("/other/unrelated"),
3096 ]);
3097 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3098 let thread_id = meta.thread_id;
3099
3100 store.update(cx, |store, cx| {
3101 store.save(meta, cx);
3102 });
3103
3104 let replacements = vec![
3105 (
3106 PathBuf::from("/projects/worktree-a"),
3107 PathBuf::from("/restored/worktree-a"),
3108 ),
3109 (
3110 PathBuf::from("/projects/worktree-b"),
3111 PathBuf::from("/restored/worktree-b"),
3112 ),
3113 ];
3114
3115 store.update(cx, |store, cx| {
3116 store.complete_worktree_restore(thread_id, &replacements, cx);
3117 });
3118
3119 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3120 let entry = entry.unwrap();
3121 let paths = entry.folder_paths().paths();
3122 assert_eq!(paths.len(), 3);
3123 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3124 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3125 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3126 }
3127
3128 #[gpui::test]
3129 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
3130 init_test(cx);
3131 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3132
3133 let original_paths =
3134 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3135 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3136 let thread_id = meta.thread_id;
3137
3138 store.update(cx, |store, cx| {
3139 store.save(meta, cx);
3140 });
3141
3142 let replacements = vec![
3143 (
3144 PathBuf::from("/projects/worktree-a"),
3145 PathBuf::from("/new/worktree-a"),
3146 ),
3147 (
3148 PathBuf::from("/nonexistent/path"),
3149 PathBuf::from("/should/not/appear"),
3150 ),
3151 ];
3152
3153 store.update(cx, |store, cx| {
3154 store.complete_worktree_restore(thread_id, &replacements, cx);
3155 });
3156
3157 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3158 let entry = entry.unwrap();
3159 let paths = entry.folder_paths().paths();
3160 assert_eq!(paths.len(), 2);
3161 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3162 assert!(paths.contains(&PathBuf::from("/other/path")));
3163 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3164 }
3165
3166 #[gpui::test]
3167 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
3168 init_test(cx);
3169 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3170
3171 let original_paths = PathList::new(&[
3172 Path::new("/projects/worktree-a"),
3173 Path::new("/projects/worktree-b"),
3174 Path::new("/other/unrelated"),
3175 ]);
3176 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3177 let thread_id = meta.thread_id;
3178
3179 store.update(cx, |store, cx| {
3180 store.save(meta, cx);
3181 });
3182
3183 let replacements = vec![
3184 (
3185 PathBuf::from("/projects/worktree-a"),
3186 PathBuf::from("/restored/worktree-a"),
3187 ),
3188 (
3189 PathBuf::from("/projects/worktree-b"),
3190 PathBuf::from("/restored/worktree-b"),
3191 ),
3192 ];
3193
3194 store.update(cx, |store, cx| {
3195 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3196 });
3197
3198 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3199 let entry = entry.unwrap();
3200 let paths = entry.folder_paths().paths();
3201 assert_eq!(paths.len(), 3);
3202 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3203 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3204 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3205 }
3206
3207 #[gpui::test]
3208 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
3209 init_test(cx);
3210 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3211
3212 let original_paths =
3213 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3214 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3215 let thread_id = meta.thread_id;
3216
3217 store.update(cx, |store, cx| {
3218 store.save(meta, cx);
3219 });
3220
3221 let replacements = vec![
3222 (
3223 PathBuf::from("/projects/worktree-a"),
3224 PathBuf::from("/new/worktree-a"),
3225 ),
3226 (
3227 PathBuf::from("/nonexistent/path"),
3228 PathBuf::from("/should/not/appear"),
3229 ),
3230 ];
3231
3232 store.update(cx, |store, cx| {
3233 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3234 });
3235
3236 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3237 let entry = entry.unwrap();
3238 let paths = entry.folder_paths().paths();
3239 assert_eq!(paths.len(), 2);
3240 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3241 assert!(paths.contains(&PathBuf::from("/other/path")));
3242 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3243 }
3244
3245 #[gpui::test]
3246 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
3247 init_test(cx);
3248 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3249
3250 let id1 = store
3251 .read_with(cx, |store, cx| {
3252 store.create_archived_worktree(
3253 "/projects/worktree-a".to_string(),
3254 "/home/user/repo".to_string(),
3255 Some("branch-a".to_string()),
3256 "staged_a".to_string(),
3257 "unstaged_a".to_string(),
3258 "original_000".to_string(),
3259 cx,
3260 )
3261 })
3262 .await
3263 .unwrap();
3264
3265 let id2 = store
3266 .read_with(cx, |store, cx| {
3267 store.create_archived_worktree(
3268 "/projects/worktree-b".to_string(),
3269 "/home/user/repo".to_string(),
3270 Some("branch-b".to_string()),
3271 "staged_b".to_string(),
3272 "unstaged_b".to_string(),
3273 "original_000".to_string(),
3274 cx,
3275 )
3276 })
3277 .await
3278 .unwrap();
3279
3280 let thread_id_1 = ThreadId::new();
3281
3282 store
3283 .read_with(cx, |store, cx| {
3284 store.link_thread_to_archived_worktree(thread_id_1, id1, cx)
3285 })
3286 .await
3287 .unwrap();
3288
3289 store
3290 .read_with(cx, |store, cx| {
3291 store.link_thread_to_archived_worktree(thread_id_1, id2, cx)
3292 })
3293 .await
3294 .unwrap();
3295
3296 let worktrees = store
3297 .read_with(cx, |store, cx| {
3298 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3299 })
3300 .await
3301 .unwrap();
3302
3303 assert_eq!(worktrees.len(), 2);
3304
3305 let paths: Vec<&Path> = worktrees
3306 .iter()
3307 .map(|w| w.worktree_path.as_path())
3308 .collect();
3309 assert!(paths.contains(&Path::new("/projects/worktree-a")));
3310 assert!(paths.contains(&Path::new("/projects/worktree-b")));
3311 }
3312
3313 // ── Migration tests ────────────────────────────────────────────────
3314
3315 #[test]
3316 fn test_thread_id_primary_key_migration_backfills_null_thread_ids() {
3317 use db::sqlez::connection::Connection;
3318
3319 let connection =
3320 Connection::open_memory(Some("test_thread_id_pk_migration_backfills_nulls"));
3321
3322 // Run migrations 0-6 (the old schema, before the thread_id PK migration).
3323 let old_migrations: &[&str] = &ThreadMetadataDb::MIGRATIONS[..7];
3324 connection
3325 .migrate(ThreadMetadataDb::NAME, old_migrations, &mut |_, _, _| false)
3326 .expect("old migrations should succeed");
3327
3328 // Insert rows: one with a thread_id, two without.
3329 connection
3330 .exec(
3331 "INSERT INTO sidebar_threads \
3332 (session_id, title, updated_at, thread_id) \
3333 VALUES ('has-tid', 'Has ThreadId', '2025-01-01T00:00:00Z', X'0102030405060708090A0B0C0D0E0F10')",
3334 )
3335 .unwrap()()
3336 .unwrap();
3337 connection
3338 .exec(
3339 "INSERT INTO sidebar_threads \
3340 (session_id, title, updated_at) \
3341 VALUES ('no-tid-1', 'No ThreadId 1', '2025-01-02T00:00:00Z')",
3342 )
3343 .unwrap()()
3344 .unwrap();
3345 connection
3346 .exec(
3347 "INSERT INTO sidebar_threads \
3348 (session_id, title, updated_at) \
3349 VALUES ('no-tid-2', 'No ThreadId 2', '2025-01-03T00:00:00Z')",
3350 )
3351 .unwrap()()
3352 .unwrap();
3353
3354 // Set up archived_git_worktrees + thread_archived_worktrees rows
3355 // referencing the session without a thread_id.
3356 connection
3357 .exec(
3358 "INSERT INTO archived_git_worktrees \
3359 (id, worktree_path, main_repo_path, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
3360 VALUES (1, '/wt', '/main', 'abc', 'def', '000')",
3361 )
3362 .unwrap()()
3363 .unwrap();
3364 connection
3365 .exec(
3366 "INSERT INTO thread_archived_worktrees \
3367 (session_id, archived_worktree_id) \
3368 VALUES ('no-tid-1', 1)",
3369 )
3370 .unwrap()()
3371 .unwrap();
3372
3373 // Run all migrations (0-7). sqlez skips 0-6 and runs only migration 7.
3374 connection
3375 .migrate(
3376 ThreadMetadataDb::NAME,
3377 ThreadMetadataDb::MIGRATIONS,
3378 &mut |_, _, _| false,
3379 )
3380 .expect("new migration should succeed");
3381
3382 // All 3 rows should survive with non-NULL thread_ids.
3383 let count: i64 = connection
3384 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM sidebar_threads")
3385 .unwrap()(())
3386 .unwrap()
3387 .unwrap();
3388 assert_eq!(count, 3, "all 3 rows should survive the migration");
3389
3390 let null_count: i64 = connection
3391 .select_row_bound::<(), i64>(
3392 "SELECT COUNT(*) FROM sidebar_threads WHERE thread_id IS NULL",
3393 )
3394 .unwrap()(())
3395 .unwrap()
3396 .unwrap();
3397 assert_eq!(
3398 null_count, 0,
3399 "no rows should have NULL thread_id after migration"
3400 );
3401
3402 // The row that already had a thread_id should keep its original value.
3403 let original_tid: Vec<u8> = connection
3404 .select_row_bound::<&str, Vec<u8>>(
3405 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3406 )
3407 .unwrap()("has-tid")
3408 .unwrap()
3409 .unwrap();
3410 assert_eq!(
3411 original_tid,
3412 vec![
3413 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
3414 0x0F, 0x10
3415 ],
3416 "pre-existing thread_id should be preserved"
3417 );
3418
3419 // The two rows that had NULL thread_ids should now have distinct non-empty blobs.
3420 let generated_tid_1: Vec<u8> = connection
3421 .select_row_bound::<&str, Vec<u8>>(
3422 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3423 )
3424 .unwrap()("no-tid-1")
3425 .unwrap()
3426 .unwrap();
3427 let generated_tid_2: Vec<u8> = connection
3428 .select_row_bound::<&str, Vec<u8>>(
3429 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3430 )
3431 .unwrap()("no-tid-2")
3432 .unwrap()
3433 .unwrap();
3434 assert_eq!(
3435 generated_tid_1.len(),
3436 16,
3437 "generated thread_id should be 16 bytes"
3438 );
3439 assert_eq!(
3440 generated_tid_2.len(),
3441 16,
3442 "generated thread_id should be 16 bytes"
3443 );
3444 assert_ne!(
3445 generated_tid_1, generated_tid_2,
3446 "each generated thread_id should be unique"
3447 );
3448
3449 // The thread_archived_worktrees join row should have migrated
3450 // using the backfilled thread_id from the session without a
3451 // pre-existing thread_id.
3452 let archived_count: i64 = connection
3453 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM thread_archived_worktrees")
3454 .unwrap()(())
3455 .unwrap()
3456 .unwrap();
3457 assert_eq!(
3458 archived_count, 1,
3459 "thread_archived_worktrees row should survive migration"
3460 );
3461
3462 // The thread_archived_worktrees row should reference the
3463 // backfilled thread_id of the 'no-tid-1' session.
3464 let archived_tid: Vec<u8> = connection
3465 .select_row_bound::<(), Vec<u8>>(
3466 "SELECT thread_id FROM thread_archived_worktrees LIMIT 1",
3467 )
3468 .unwrap()(())
3469 .unwrap()
3470 .unwrap();
3471 assert_eq!(
3472 archived_tid, generated_tid_1,
3473 "thread_archived_worktrees should reference the backfilled thread_id"
3474 );
3475 }
3476
3477 // ── ThreadWorktreePaths tests ──────────────────────────────────────
3478
3479 /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs.
3480 fn make_worktree_paths(pairs: &[(&str, &str)]) -> WorktreePaths {
3481 let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs
3482 .iter()
3483 .map(|(m, f)| (Path::new(*m), Path::new(*f)))
3484 .unzip();
3485 WorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders)).unwrap()
3486 }
3487
3488 #[test]
3489 fn test_thread_worktree_paths_full_add_then_remove_cycle() {
3490 // Full scenario from the issue:
3491 // 1. Start with linked worktree selectric → zed
3492 // 2. Add cloud
3493 // 3. Remove zed
3494
3495 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3496
3497 // Step 2: add cloud
3498 paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud"));
3499
3500 assert_eq!(paths.ordered_pairs().count(), 2);
3501 assert_eq!(
3502 paths.folder_path_list(),
3503 &PathList::new(&[
3504 Path::new("/worktrees/selectric/zed"),
3505 Path::new("/projects/cloud"),
3506 ])
3507 );
3508 assert_eq!(
3509 paths.main_worktree_path_list(),
3510 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),])
3511 );
3512
3513 // Step 3: remove zed
3514 paths.remove_main_path(Path::new("/projects/zed"));
3515
3516 assert_eq!(paths.ordered_pairs().count(), 1);
3517 assert_eq!(
3518 paths.folder_path_list(),
3519 &PathList::new(&[Path::new("/projects/cloud")])
3520 );
3521 assert_eq!(
3522 paths.main_worktree_path_list(),
3523 &PathList::new(&[Path::new("/projects/cloud")])
3524 );
3525 }
3526
3527 #[test]
3528 fn test_thread_worktree_paths_add_is_idempotent() {
3529 let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]);
3530
3531 paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed"));
3532
3533 assert_eq!(paths.ordered_pairs().count(), 1);
3534 }
3535
3536 #[test]
3537 fn test_thread_worktree_paths_remove_nonexistent_is_noop() {
3538 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3539
3540 paths.remove_main_path(Path::new("/projects/nonexistent"));
3541
3542 assert_eq!(paths.ordered_pairs().count(), 1);
3543 }
3544
3545 #[test]
3546 fn test_thread_worktree_paths_from_path_lists_preserves_association() {
3547 let folder = PathList::new(&[
3548 Path::new("/worktrees/selectric/zed"),
3549 Path::new("/projects/cloud"),
3550 ]);
3551 let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]);
3552
3553 let paths = WorktreePaths::from_path_lists(main, folder).unwrap();
3554
3555 let pairs: Vec<_> = paths
3556 .ordered_pairs()
3557 .map(|(m, f)| (m.clone(), f.clone()))
3558 .collect();
3559 assert_eq!(pairs.len(), 2);
3560 assert!(pairs.contains(&(
3561 PathBuf::from("/projects/zed"),
3562 PathBuf::from("/worktrees/selectric/zed")
3563 )));
3564 assert!(pairs.contains(&(
3565 PathBuf::from("/projects/cloud"),
3566 PathBuf::from("/projects/cloud")
3567 )));
3568 }
3569
3570 #[test]
3571 fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() {
3572 // Two linked worktrees of the same main repo: the main_worktree_path_list
3573 // deduplicates because PathList stores unique sorted paths, but
3574 // ordered_pairs still has both entries.
3575 let paths = make_worktree_paths(&[
3576 ("/projects/zed", "/worktrees/selectric/zed"),
3577 ("/projects/zed", "/worktrees/feature/zed"),
3578 ]);
3579
3580 // main_worktree_path_list has the duplicate main path twice
3581 // (PathList keeps all entries from its input)
3582 assert_eq!(paths.ordered_pairs().count(), 2);
3583 assert_eq!(
3584 paths.folder_path_list(),
3585 &PathList::new(&[
3586 Path::new("/worktrees/selectric/zed"),
3587 Path::new("/worktrees/feature/zed"),
3588 ])
3589 );
3590 assert_eq!(
3591 paths.main_worktree_path_list(),
3592 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),])
3593 );
3594 }
3595
3596 #[test]
3597 fn test_thread_worktree_paths_mismatched_lengths_returns_error() {
3598 let folder = PathList::new(&[
3599 Path::new("/worktrees/selectric/zed"),
3600 Path::new("/projects/cloud"),
3601 ]);
3602 let main = PathList::new(&[Path::new("/projects/zed")]);
3603
3604 let result = WorktreePaths::from_path_lists(main, folder);
3605 assert!(result.is_err());
3606 }
3607
3608 /// Regression test: archiving a thread created in a git worktree must
3609 /// preserve the thread's folder paths so that restoring it later does
3610 /// not prompt the user to re-associate a project.
3611 #[gpui::test]
3612 async fn test_archived_thread_retains_paths_after_worktree_removal(cx: &mut TestAppContext) {
3613 init_test(cx);
3614
3615 let fs = FakeFs::new(cx.executor());
3616 fs.insert_tree(
3617 "/worktrees/feature",
3618 serde_json::json!({ "src": { "main.rs": "" } }),
3619 )
3620 .await;
3621 let project = Project::test(fs, [Path::new("/worktrees/feature")], cx).await;
3622 let connection = StubAgentConnection::new();
3623
3624 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3625 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
3626
3627 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3628 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3629
3630 // Push content so the event handler saves metadata with the
3631 // project's worktree paths.
3632 thread.update_in(&mut vcx, |thread, _window, cx| {
3633 thread.push_user_content_block(None, "Hello".into(), cx);
3634 });
3635 vcx.run_until_parked();
3636
3637 // Verify paths were saved correctly.
3638 let (folder_paths_before, main_paths_before) = cx.read(|cx| {
3639 let store = ThreadMetadataStore::global(cx).read(cx);
3640 let entry = store.entry(thread_id).unwrap();
3641 assert!(
3642 !entry.folder_paths().is_empty(),
3643 "thread should have folder paths before archiving"
3644 );
3645 (
3646 entry.folder_paths().clone(),
3647 entry.main_worktree_paths().clone(),
3648 )
3649 });
3650
3651 // Archive the thread.
3652 cx.update(|cx| {
3653 ThreadMetadataStore::global(cx).update(cx, |store, cx| {
3654 store.archive(thread_id, None, cx);
3655 });
3656 });
3657 cx.run_until_parked();
3658
3659 // Remove the worktree from the project, simulating what the
3660 // archive flow does for linked git worktrees.
3661 let worktree_id = cx.update(|cx| {
3662 project
3663 .read(cx)
3664 .visible_worktrees(cx)
3665 .next()
3666 .unwrap()
3667 .read(cx)
3668 .id()
3669 });
3670 project.update(cx, |project, cx| {
3671 project.remove_worktree(worktree_id, cx);
3672 });
3673 cx.run_until_parked();
3674
3675 // Trigger a thread event after archiving + worktree removal.
3676 // In production this happens when an async title-generation task
3677 // completes after the thread was archived.
3678 thread.update_in(&mut vcx, |thread, _window, cx| {
3679 thread.set_title("Generated title".into(), cx).detach();
3680 });
3681 vcx.run_until_parked();
3682
3683 // The archived thread must still have its original folder paths.
3684 cx.read(|cx| {
3685 let store = ThreadMetadataStore::global(cx).read(cx);
3686 let entry = store.entry(thread_id).unwrap();
3687 assert!(entry.archived, "thread should still be archived");
3688 assert_eq!(
3689 entry.display_title().as_ref(),
3690 "Generated title",
3691 "title should still be updated for archived threads"
3692 );
3693 assert_eq!(
3694 entry.folder_paths(),
3695 &folder_paths_before,
3696 "archived thread must retain its folder paths after worktree \
3697 removal + subsequent thread event, otherwise restoring it \
3698 will prompt the user to re-associate a project"
3699 );
3700 assert_eq!(
3701 entry.main_worktree_paths(),
3702 &main_paths_before,
3703 "archived thread must retain its main worktree paths after \
3704 worktree removal + subsequent thread event"
3705 );
3706 });
3707 }
3708}