1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use agent::{ThreadStore, ZED_AGENT_ID};
7use agent_client_protocol as acp;
8use anyhow::Context as _;
9use chrono::{DateTime, Utc};
10use collections::{HashMap, HashSet};
11use db::{
12 kvp::KeyValueStore,
13 sqlez::{
14 bindable::{Bind, Column},
15 domain::Domain,
16 statement::Statement,
17 thread_safe_connection::ThreadSafeConnection,
18 },
19 sqlez_macros::sql,
20};
21use fs::Fs;
22use futures::{FutureExt, future::Shared};
23use gpui::{AppContext as _, Entity, Global, Subscription, Task};
24pub use project::WorktreePaths;
25use project::{AgentId, linked_worktree_short_name};
26use remote::{RemoteConnectionOptions, same_remote_connection_identity};
27use ui::{App, Context, SharedString, ThreadItemWorktreeInfo, WorktreeKind};
28use util::{ResultExt as _, debug_panic};
29use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
30
31use crate::DEFAULT_THREAD_TITLE;
32
33#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, serde::Serialize, serde::Deserialize)]
34pub struct ThreadId(uuid::Uuid);
35
36impl ThreadId {
37 pub fn new() -> Self {
38 Self(uuid::Uuid::new_v4())
39 }
40}
41
42impl Bind for ThreadId {
43 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
44 self.0.bind(statement, start_index)
45 }
46}
47
48impl Column for ThreadId {
49 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
50 let (uuid, next) = Column::column(statement, start_index)?;
51 Ok((ThreadId(uuid), next))
52 }
53}
54
55const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill";
56const THREAD_ID_MIGRATION_KEY: &str = "thread-metadata-thread-id-backfill";
57
58/// List all sidebar thread metadata from an arbitrary SQLite connection.
59///
60/// This is used to read thread metadata from another release channel's
61/// database without opening a full `ThreadSafeConnection`.
62pub(crate) fn list_thread_metadata_from_connection(
63 connection: &db::sqlez::connection::Connection,
64) -> anyhow::Result<Vec<ThreadMetadata>> {
65 connection.select::<ThreadMetadata>(ThreadMetadataDb::LIST_QUERY)?()
66}
67
68/// Run the `ThreadMetadataDb` migrations on a raw connection.
69///
70/// This is used in tests to set up the sidebar_threads schema in a
71/// temporary database.
72#[cfg(test)]
73pub(crate) fn run_thread_metadata_migrations(connection: &db::sqlez::connection::Connection) {
74 connection
75 .migrate(
76 ThreadMetadataDb::NAME,
77 ThreadMetadataDb::MIGRATIONS,
78 &mut |_, _, _| false,
79 )
80 .expect("thread metadata migrations should succeed");
81}
82
83pub fn init(cx: &mut App) {
84 ThreadMetadataStore::init_global(cx);
85 let migration_task = migrate_thread_metadata(cx);
86 migrate_thread_remote_connections(cx, migration_task);
87 migrate_thread_ids(cx);
88}
89
90/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
91/// We skip migrating threads that do not have a project.
92///
93/// TODO: Remove this after N weeks of shipping the sidebar
94fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
95 let store = ThreadMetadataStore::global(cx);
96 let db = store.read(cx).db.clone();
97
98 cx.spawn(async move |cx| {
99 let existing_list = db.list()?;
100 let is_first_migration = existing_list.is_empty();
101 let existing_session_ids: HashSet<Arc<str>> = existing_list
102 .into_iter()
103 .filter_map(|m| m.session_id.map(|s| s.0))
104 .collect();
105
106 let mut to_migrate = store.read_with(cx, |_store, cx| {
107 ThreadStore::global(cx)
108 .read(cx)
109 .entries()
110 .filter_map(|entry| {
111 if existing_session_ids.contains(&entry.id.0) {
112 return None;
113 }
114
115 Some(ThreadMetadata {
116 thread_id: ThreadId::new(),
117 session_id: Some(entry.id),
118 agent_id: ZED_AGENT_ID.clone(),
119 title: if entry.title.is_empty()
120 || entry.title.as_ref() == DEFAULT_THREAD_TITLE
121 {
122 None
123 } else {
124 Some(entry.title)
125 },
126 updated_at: entry.updated_at,
127 created_at: entry.created_at,
128 worktree_paths: WorktreePaths::from_folder_paths(&entry.folder_paths),
129 remote_connection: None,
130 archived: true,
131 })
132 })
133 .collect::<Vec<_>>()
134 });
135
136 if to_migrate.is_empty() {
137 return anyhow::Ok(());
138 }
139
140 // On the first migration (no entries in DB yet), keep the 5 most
141 // recent threads per project unarchived.
142 if is_first_migration {
143 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
144 for entry in &mut to_migrate {
145 if entry.worktree_paths.is_empty() {
146 continue;
147 }
148 per_project
149 .entry(entry.worktree_paths.folder_path_list().clone())
150 .or_default()
151 .push(entry);
152 }
153 for entries in per_project.values_mut() {
154 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
155 for entry in entries.iter_mut().take(5) {
156 entry.archived = false;
157 }
158 }
159 }
160
161 log::info!("Migrating {} thread store entries", to_migrate.len());
162
163 // Manually save each entry to the database and call reload, otherwise
164 // we'll end up triggering lots of reloads after each save
165 for entry in to_migrate {
166 db.save(entry).await?;
167 }
168
169 log::info!("Finished migrating thread store entries");
170
171 let _ = store.update(cx, |store, cx| store.reload(cx));
172 anyhow::Ok(())
173 })
174}
175
176fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task<anyhow::Result<()>>) {
177 let store = ThreadMetadataStore::global(cx);
178 let db = store.read(cx).db.clone();
179 let kvp = KeyValueStore::global(cx);
180 let workspace_db = WorkspaceDb::global(cx);
181 let fs = <dyn Fs>::global(cx);
182
183 cx.spawn(async move |cx| -> anyhow::Result<()> {
184 migration_task.await?;
185
186 if kvp
187 .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)?
188 .is_some()
189 {
190 return Ok(());
191 }
192
193 let recent_workspaces = workspace_db.recent_workspaces_on_disk(fs.as_ref()).await?;
194
195 let mut local_path_lists = HashSet::<PathList>::default();
196 let mut remote_path_lists = HashMap::<PathList, RemoteConnectionOptions>::default();
197
198 recent_workspaces
199 .iter()
200 .filter(|(_, location, path_list, _)| {
201 !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local)
202 })
203 .for_each(|(_, _, path_list, _)| {
204 local_path_lists.insert(path_list.clone());
205 });
206
207 for (_, location, path_list, _) in recent_workspaces {
208 match location {
209 SerializedWorkspaceLocation::Remote(remote_connection)
210 if !local_path_lists.contains(&path_list) =>
211 {
212 remote_path_lists
213 .entry(path_list)
214 .or_insert(remote_connection);
215 }
216 _ => {}
217 }
218 }
219
220 let mut reloaded = false;
221 for metadata in db.list()? {
222 if metadata.remote_connection.is_some() {
223 continue;
224 }
225
226 if let Some(remote_connection) = remote_path_lists
227 .get(metadata.folder_paths())
228 .or_else(|| remote_path_lists.get(metadata.main_worktree_paths()))
229 {
230 db.save(ThreadMetadata {
231 remote_connection: Some(remote_connection.clone()),
232 ..metadata
233 })
234 .await?;
235 reloaded = true;
236 }
237 }
238
239 let reloaded_task = reloaded
240 .then_some(store.update(cx, |store, cx| store.reload(cx)))
241 .unwrap_or(Task::ready(()).shared());
242
243 kvp.write_kvp(
244 THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(),
245 "1".to_string(),
246 )
247 .await?;
248 reloaded_task.await;
249
250 Ok(())
251 })
252 .detach_and_log_err(cx);
253}
254
255fn migrate_thread_ids(cx: &mut App) {
256 let store = ThreadMetadataStore::global(cx);
257 let db = store.read(cx).db.clone();
258 let kvp = KeyValueStore::global(cx);
259
260 cx.spawn(async move |cx| -> anyhow::Result<()> {
261 if kvp.read_kvp(THREAD_ID_MIGRATION_KEY)?.is_some() {
262 return Ok(());
263 }
264
265 let mut reloaded = false;
266 for metadata in db.list()? {
267 db.save(metadata).await?;
268 reloaded = true;
269 }
270
271 let reloaded_task = reloaded
272 .then_some(store.update(cx, |store, cx| store.reload(cx)))
273 .unwrap_or(Task::ready(()).shared());
274
275 kvp.write_kvp(THREAD_ID_MIGRATION_KEY.to_string(), "1".to_string())
276 .await?;
277 reloaded_task.await;
278
279 Ok(())
280 })
281 .detach_and_log_err(cx);
282}
283
284struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
285impl Global for GlobalThreadMetadataStore {}
286
287/// Lightweight metadata for any thread (native or ACP), enough to populate
288/// the sidebar list and route to the correct load path when clicked.
289#[derive(Debug, Clone, PartialEq)]
290pub struct ThreadMetadata {
291 pub thread_id: ThreadId,
292 pub session_id: Option<acp::SessionId>,
293 pub agent_id: AgentId,
294 pub title: Option<SharedString>,
295 pub updated_at: DateTime<Utc>,
296 pub created_at: Option<DateTime<Utc>>,
297 pub worktree_paths: WorktreePaths,
298 pub remote_connection: Option<RemoteConnectionOptions>,
299 pub archived: bool,
300}
301
302impl ThreadMetadata {
303 pub fn display_title(&self) -> SharedString {
304 self.title
305 .clone()
306 .unwrap_or_else(|| crate::DEFAULT_THREAD_TITLE.into())
307 }
308
309 pub fn folder_paths(&self) -> &PathList {
310 self.worktree_paths.folder_path_list()
311 }
312 pub fn main_worktree_paths(&self) -> &PathList {
313 self.worktree_paths.main_worktree_path_list()
314 }
315}
316
317/// Derives worktree display info from a thread's stored path list.
318///
319/// For each path in the thread's `folder_paths`, produces a
320/// [`ThreadItemWorktreeInfo`] with a short display name, full path, and whether
321/// the worktree is the main checkout or a linked git worktree. When
322/// multiple main paths exist and a linked worktree's short name alone
323/// wouldn't identify which main project it belongs to, the main project
324/// name is prefixed for disambiguation (e.g. `project:feature`).
325pub fn worktree_info_from_thread_paths<S: std::hash::BuildHasher>(
326 worktree_paths: &WorktreePaths,
327 branch_names: &std::collections::HashMap<PathBuf, SharedString, S>,
328) -> Vec<ThreadItemWorktreeInfo> {
329 let mut infos: Vec<ThreadItemWorktreeInfo> = Vec::new();
330 let mut linked_short_names: Vec<(SharedString, SharedString)> = Vec::new();
331 let mut unique_main_count = HashSet::default();
332
333 for (main_path, folder_path) in worktree_paths.ordered_pairs() {
334 unique_main_count.insert(main_path.clone());
335 let is_linked = main_path != folder_path;
336
337 if is_linked {
338 let short_name = linked_worktree_short_name(main_path, folder_path).unwrap_or_default();
339 let project_name = main_path
340 .file_name()
341 .map(|n| SharedString::from(n.to_string_lossy().to_string()))
342 .unwrap_or_default();
343 linked_short_names.push((short_name.clone(), project_name));
344 infos.push(ThreadItemWorktreeInfo {
345 name: short_name,
346 full_path: SharedString::from(folder_path.display().to_string()),
347 highlight_positions: Vec::new(),
348 kind: WorktreeKind::Linked,
349 branch_name: branch_names.get(folder_path).cloned(),
350 });
351 } else {
352 let Some(name) = folder_path.file_name() else {
353 continue;
354 };
355 infos.push(ThreadItemWorktreeInfo {
356 name: SharedString::from(name.to_string_lossy().to_string()),
357 full_path: SharedString::from(folder_path.display().to_string()),
358 highlight_positions: Vec::new(),
359 kind: WorktreeKind::Main,
360 branch_name: branch_names.get(folder_path).cloned(),
361 });
362 }
363 }
364
365 // When the group has multiple main worktree paths and the thread's
366 // folder paths don't all share the same short name, prefix each
367 // linked worktree chip with its main project name so the user knows
368 // which project it belongs to.
369 let all_same_name = infos.len() > 1 && infos.iter().all(|i| i.name == infos[0].name);
370
371 if unique_main_count.len() > 1 && !all_same_name {
372 for (info, (_short_name, project_name)) in infos
373 .iter_mut()
374 .filter(|i| i.kind == WorktreeKind::Linked)
375 .zip(linked_short_names.iter())
376 {
377 info.name = SharedString::from(format!("{}:{}", project_name, info.name));
378 }
379 }
380
381 infos
382}
383
384impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
385 fn from(meta: &ThreadMetadata) -> Self {
386 let session_id = meta
387 .session_id
388 .clone()
389 .unwrap_or_else(|| acp::SessionId::new(meta.thread_id.0.to_string()));
390 Self {
391 session_id,
392 work_dirs: Some(meta.folder_paths().clone()),
393 title: meta.title.clone(),
394 updated_at: Some(meta.updated_at),
395 created_at: meta.created_at,
396 meta: None,
397 }
398 }
399}
400
401/// Record of a git worktree that was archived (deleted from disk) when its
402/// last thread was archived.
403pub struct ArchivedGitWorktree {
404 /// Auto-incrementing primary key.
405 pub id: i64,
406 /// Absolute path to the directory of the worktree before it was deleted.
407 /// Used when restoring, to put the recreated worktree back where it was.
408 /// If the path already exists on disk, the worktree is assumed to be
409 /// already restored and is used as-is.
410 pub worktree_path: PathBuf,
411 /// Absolute path of the main repository ("main worktree") that owned this worktree.
412 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
413 /// If the main repo isn't found on disk, unarchiving fails because we only store
414 /// commit hashes, and without the actual git repo being available, we can't restore
415 /// the files.
416 pub main_repo_path: PathBuf,
417 /// Branch that was checked out in the worktree at archive time. `None` if
418 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
419 /// could happen if the user made a detached one outside of Zed.
420 /// On restore, we try to switch to this branch. If that fails (e.g. it's
421 /// checked out elsewhere), we auto-generate a new one.
422 pub branch_name: Option<String>,
423 /// SHA of the WIP commit that captures files that were staged (but not yet
424 /// committed) at the time of archiving. This commit can be empty if the
425 /// user had no staged files at the time. It sits directly on top of whatever
426 /// the user's last actual commit was.
427 pub staged_commit_hash: String,
428 /// SHA of the WIP commit that captures files that were unstaged (including
429 /// untracked) at the time of archiving. This commit can be empty if the user
430 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
431 /// After doing `git reset` past both of these commits, we're back in the state
432 /// we had before archiving, including what was staged, what was unstaged, and
433 /// what was committed.
434 pub unstaged_commit_hash: String,
435 /// SHA of the commit that HEAD pointed at before we created the two WIP
436 /// commits during archival. After resetting past the WIP commits during
437 /// restore, HEAD should land back on this commit. It also serves as a
438 /// pre-restore sanity check (abort if this commit no longer exists in the
439 /// repo) and as a fallback target if the WIP resets fail.
440 pub original_commit_hash: String,
441}
442
443/// The store holds all metadata needed to show threads in the sidebar/the archive.
444///
445/// Listens to ConversationView events and updates metadata when the root thread changes.
446pub struct ThreadMetadataStore {
447 db: ThreadMetadataDb,
448 threads: HashMap<ThreadId, ThreadMetadata>,
449 threads_by_paths: HashMap<PathList, HashSet<ThreadId>>,
450 threads_by_main_paths: HashMap<PathList, HashSet<ThreadId>>,
451 threads_by_session: HashMap<acp::SessionId, ThreadId>,
452 reload_task: Option<Shared<Task<()>>>,
453 conversation_subscriptions: HashMap<gpui::EntityId, Subscription>,
454 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
455 in_flight_archives: HashMap<ThreadId, (Task<()>, smol::channel::Sender<()>)>,
456 _db_operations_task: Task<()>,
457}
458
459#[derive(Debug, PartialEq)]
460enum DbOperation {
461 Upsert(ThreadMetadata),
462 Delete(ThreadId),
463}
464
465impl DbOperation {
466 fn id(&self) -> ThreadId {
467 match self {
468 DbOperation::Upsert(thread) => thread.thread_id,
469 DbOperation::Delete(thread_id) => *thread_id,
470 }
471 }
472}
473
474/// Override for the test DB name used by `ThreadMetadataStore::init_global`.
475/// When set as a GPUI global, `init_global` uses this name instead of
476/// deriving one from the thread name. This prevents data from leaking
477/// across proptest cases that share a thread name.
478#[cfg(any(test, feature = "test-support"))]
479pub struct TestMetadataDbName(pub String);
480#[cfg(any(test, feature = "test-support"))]
481impl gpui::Global for TestMetadataDbName {}
482
483#[cfg(any(test, feature = "test-support"))]
484impl TestMetadataDbName {
485 pub fn global(cx: &App) -> String {
486 cx.try_global::<Self>()
487 .map(|g| g.0.clone())
488 .unwrap_or_else(|| {
489 let thread = std::thread::current();
490 let test_name = thread.name().unwrap_or("unknown_test");
491 format!("THREAD_METADATA_DB_{}", test_name)
492 })
493 }
494}
495
496impl ThreadMetadataStore {
497 #[cfg(not(any(test, feature = "test-support")))]
498 pub fn init_global(cx: &mut App) {
499 if cx.has_global::<Self>() {
500 return;
501 }
502
503 let db = ThreadMetadataDb::global(cx);
504 let thread_store = cx.new(|cx| Self::new(db, cx));
505 cx.set_global(GlobalThreadMetadataStore(thread_store));
506 }
507
508 #[cfg(any(test, feature = "test-support"))]
509 pub fn init_global(cx: &mut App) {
510 let db_name = TestMetadataDbName::global(cx);
511 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
512 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
513 cx.set_global(GlobalThreadMetadataStore(thread_store));
514 }
515
516 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
517 cx.try_global::<GlobalThreadMetadataStore>()
518 .map(|store| store.0.clone())
519 }
520
521 pub fn global(cx: &App) -> Entity<Self> {
522 cx.global::<GlobalThreadMetadataStore>().0.clone()
523 }
524
525 pub fn is_empty(&self) -> bool {
526 self.threads.is_empty()
527 }
528
529 /// Returns all thread IDs.
530 pub fn entry_ids(&self) -> impl Iterator<Item = ThreadId> + '_ {
531 self.threads.keys().copied()
532 }
533
534 /// Returns the metadata for a specific thread, if it exists.
535 pub fn entry(&self, thread_id: ThreadId) -> Option<&ThreadMetadata> {
536 self.threads.get(&thread_id)
537 }
538
539 /// Returns the metadata for a thread identified by its ACP session ID.
540 pub fn entry_by_session(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
541 let thread_id = self.threads_by_session.get(session_id)?;
542 self.threads.get(thread_id)
543 }
544
545 /// Returns all threads.
546 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
547 self.threads.values()
548 }
549
550 /// Returns all archived threads.
551 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
552 self.entries().filter(|t| t.archived)
553 }
554
555 /// Returns all threads for the given path list and remote connection,
556 /// excluding archived threads.
557 ///
558 /// When `remote_connection` is `Some`, only threads whose persisted
559 /// `remote_connection` matches by normalized identity are returned.
560 /// When `None`, only local (non-remote) threads are returned.
561 pub fn entries_for_path<'a>(
562 &'a self,
563 path_list: &PathList,
564 remote_connection: Option<&'a RemoteConnectionOptions>,
565 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
566 self.threads_by_paths
567 .get(path_list)
568 .into_iter()
569 .flatten()
570 .filter_map(|s| self.threads.get(s))
571 .filter(|s| !s.archived)
572 .filter(move |s| {
573 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
574 })
575 }
576
577 /// Returns threads whose `main_worktree_paths` matches the given path list
578 /// and remote connection, excluding archived threads. This finds threads
579 /// that were opened in a linked worktree but are associated with the given
580 /// main worktree.
581 ///
582 /// When `remote_connection` is `Some`, only threads whose persisted
583 /// `remote_connection` matches by normalized identity are returned.
584 /// When `None`, only local (non-remote) threads are returned.
585 pub fn entries_for_main_worktree_path<'a>(
586 &'a self,
587 path_list: &PathList,
588 remote_connection: Option<&'a RemoteConnectionOptions>,
589 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
590 self.threads_by_main_paths
591 .get(path_list)
592 .into_iter()
593 .flatten()
594 .filter_map(|s| self.threads.get(s))
595 .filter(|s| !s.archived)
596 .filter(move |s| {
597 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
598 })
599 }
600
601 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
602 let db = self.db.clone();
603 self.reload_task.take();
604
605 let list_task = cx
606 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
607
608 let reload_task = cx
609 .spawn(async move |this, cx| {
610 let Some(rows) = list_task.await.log_err() else {
611 return;
612 };
613
614 this.update(cx, |this, cx| {
615 this.threads.clear();
616 this.threads_by_paths.clear();
617 this.threads_by_main_paths.clear();
618 this.threads_by_session.clear();
619
620 for row in rows {
621 this.cache_thread_metadata(row);
622 }
623
624 cx.notify();
625 })
626 .ok();
627 })
628 .shared();
629 self.reload_task = Some(reload_task.clone());
630 reload_task
631 }
632
633 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
634 for metadata in metadata {
635 self.save_internal(metadata);
636 }
637 cx.notify();
638 }
639
640 pub fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
641 self.save_internal(metadata);
642 cx.notify();
643 }
644
645 fn save_internal(&mut self, metadata: ThreadMetadata) {
646 if metadata.session_id.is_none() {
647 debug_panic!("cannot store thread metadata without a session_id");
648 return;
649 };
650
651 if let Some(thread) = self.threads.get(&metadata.thread_id) {
652 if thread.folder_paths() != metadata.folder_paths() {
653 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
654 thread_ids.remove(&metadata.thread_id);
655 }
656 }
657 if thread.main_worktree_paths() != metadata.main_worktree_paths()
658 && !thread.main_worktree_paths().is_empty()
659 {
660 if let Some(thread_ids) = self
661 .threads_by_main_paths
662 .get_mut(thread.main_worktree_paths())
663 {
664 thread_ids.remove(&metadata.thread_id);
665 }
666 }
667 }
668
669 self.cache_thread_metadata(metadata.clone());
670 self.pending_thread_ops_tx
671 .try_send(DbOperation::Upsert(metadata))
672 .log_err();
673 }
674
675 fn cache_thread_metadata(&mut self, metadata: ThreadMetadata) {
676 let Some(session_id) = metadata.session_id.as_ref() else {
677 debug_panic!("cannot store thread metadata without a session_id");
678 return;
679 };
680
681 self.threads_by_session
682 .insert(session_id.clone(), metadata.thread_id);
683
684 self.threads.insert(metadata.thread_id, metadata.clone());
685
686 self.threads_by_paths
687 .entry(metadata.folder_paths().clone())
688 .or_default()
689 .insert(metadata.thread_id);
690
691 if !metadata.main_worktree_paths().is_empty() {
692 self.threads_by_main_paths
693 .entry(metadata.main_worktree_paths().clone())
694 .or_default()
695 .insert(metadata.thread_id);
696 }
697 }
698
699 pub fn update_working_directories(
700 &mut self,
701 thread_id: ThreadId,
702 work_dirs: PathList,
703 cx: &mut Context<Self>,
704 ) {
705 if let Some(thread) = self.threads.get(&thread_id) {
706 debug_assert!(
707 !thread.archived,
708 "update_working_directories called on archived thread"
709 );
710 self.save_internal(ThreadMetadata {
711 worktree_paths: WorktreePaths::from_path_lists(
712 thread.main_worktree_paths().clone(),
713 work_dirs.clone(),
714 )
715 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&work_dirs)),
716 ..thread.clone()
717 });
718 cx.notify();
719 }
720 }
721
722 pub fn update_worktree_paths(
723 &mut self,
724 thread_ids: &[ThreadId],
725 worktree_paths: WorktreePaths,
726 cx: &mut Context<Self>,
727 ) {
728 let mut changed = false;
729 for &thread_id in thread_ids {
730 let Some(thread) = self.threads.get(&thread_id) else {
731 continue;
732 };
733 if thread.worktree_paths == worktree_paths {
734 continue;
735 }
736 // Don't overwrite paths for archived threads — the
737 // project may no longer include the worktree that was
738 // removed during the archive flow.
739 if thread.archived {
740 continue;
741 }
742 self.save_internal(ThreadMetadata {
743 worktree_paths: worktree_paths.clone(),
744 ..thread.clone()
745 });
746 changed = true;
747 }
748 if changed {
749 cx.notify();
750 }
751 }
752
753 pub fn archive(
754 &mut self,
755 thread_id: ThreadId,
756 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
757 cx: &mut Context<Self>,
758 ) {
759 self.update_archived(thread_id, true, cx);
760
761 if let Some(job) = archive_job {
762 self.in_flight_archives.insert(thread_id, job);
763 }
764 }
765
766 pub fn unarchive(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
767 self.update_archived(thread_id, false, cx);
768 // Dropping the Sender triggers cancellation in the background task.
769 self.in_flight_archives.remove(&thread_id);
770 }
771
772 pub fn cleanup_completed_archive(&mut self, thread_id: ThreadId) {
773 self.in_flight_archives.remove(&thread_id);
774 }
775
776 /// Returns `true` if any unarchived thread other than `current_session_id`
777 /// references `path` in its folder paths. Used to determine whether a
778 /// worktree can safely be removed from disk.
779 pub fn path_is_referenced_by_other_unarchived_threads(
780 &self,
781 thread_id: ThreadId,
782 path: &Path,
783 remote_connection: Option<&RemoteConnectionOptions>,
784 ) -> bool {
785 self.entries().any(|thread| {
786 thread.thread_id != thread_id
787 && !thread.archived
788 && same_remote_connection_identity(
789 thread.remote_connection.as_ref(),
790 remote_connection,
791 )
792 && thread
793 .folder_paths()
794 .paths()
795 .iter()
796 .any(|other_path| other_path.as_path() == path)
797 })
798 }
799
800 /// Updates a thread's `folder_paths` after an archived worktree has been
801 /// restored to disk. The restored worktree may land at a different path
802 /// than it had before archival, so each `(old_path, new_path)` pair in
803 /// `path_replacements` is applied to the thread's stored folder paths.
804 pub fn update_restored_worktree_paths(
805 &mut self,
806 thread_id: ThreadId,
807 path_replacements: &[(PathBuf, PathBuf)],
808 cx: &mut Context<Self>,
809 ) {
810 if let Some(thread) = self.threads.get(&thread_id).cloned() {
811 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
812 for (old_path, new_path) in path_replacements {
813 if let Some(pos) = paths.iter().position(|p| p == old_path) {
814 paths[pos] = new_path.clone();
815 }
816 }
817 let new_folder_paths = PathList::new(&paths);
818 self.save_internal(ThreadMetadata {
819 worktree_paths: WorktreePaths::from_path_lists(
820 thread.main_worktree_paths().clone(),
821 new_folder_paths.clone(),
822 )
823 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
824 ..thread
825 });
826 cx.notify();
827 }
828 }
829
830 pub fn complete_worktree_restore(
831 &mut self,
832 thread_id: ThreadId,
833 path_replacements: &[(PathBuf, PathBuf)],
834 cx: &mut Context<Self>,
835 ) {
836 if let Some(thread) = self.threads.get(&thread_id).cloned() {
837 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
838 for (old_path, new_path) in path_replacements {
839 for path in &mut paths {
840 if path == old_path {
841 *path = new_path.clone();
842 }
843 }
844 }
845 let new_folder_paths = PathList::new(&paths);
846 self.save_internal(ThreadMetadata {
847 worktree_paths: WorktreePaths::from_path_lists(
848 thread.main_worktree_paths().clone(),
849 new_folder_paths.clone(),
850 )
851 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
852 ..thread
853 });
854 cx.notify();
855 }
856 }
857
858 /// Apply a mutation to the worktree paths of all threads whose current
859 /// `folder_paths` matches `current_folder_paths`, then re-index.
860 /// When `remote_connection` is provided, only threads with a matching
861 /// remote connection are affected.
862 pub fn change_worktree_paths(
863 &mut self,
864 current_folder_paths: &PathList,
865 remote_connection: Option<&RemoteConnectionOptions>,
866 mutate: impl Fn(&mut WorktreePaths),
867 cx: &mut Context<Self>,
868 ) {
869 let thread_ids: Vec<_> = self
870 .threads_by_paths
871 .get(current_folder_paths)
872 .into_iter()
873 .flatten()
874 .filter(|id| {
875 self.threads.get(id).is_some_and(|t| {
876 !t.archived
877 && same_remote_connection_identity(
878 t.remote_connection.as_ref(),
879 remote_connection,
880 )
881 })
882 })
883 .copied()
884 .collect();
885
886 self.mutate_thread_paths(&thread_ids, mutate, cx);
887 }
888
889 fn mutate_thread_paths(
890 &mut self,
891 thread_ids: &[ThreadId],
892 mutate: impl Fn(&mut WorktreePaths),
893 cx: &mut Context<Self>,
894 ) {
895 if thread_ids.is_empty() {
896 return;
897 }
898
899 for thread_id in thread_ids {
900 if let Some(thread) = self.threads.get_mut(thread_id) {
901 if let Some(ids) = self
902 .threads_by_main_paths
903 .get_mut(thread.main_worktree_paths())
904 {
905 ids.remove(thread_id);
906 }
907 if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
908 ids.remove(thread_id);
909 }
910
911 mutate(&mut thread.worktree_paths);
912
913 self.threads_by_main_paths
914 .entry(thread.main_worktree_paths().clone())
915 .or_default()
916 .insert(*thread_id);
917 self.threads_by_paths
918 .entry(thread.folder_paths().clone())
919 .or_default()
920 .insert(*thread_id);
921
922 self.pending_thread_ops_tx
923 .try_send(DbOperation::Upsert(thread.clone()))
924 .log_err();
925 }
926 }
927
928 cx.notify();
929 }
930
931 pub fn create_archived_worktree(
932 &self,
933 worktree_path: String,
934 main_repo_path: String,
935 branch_name: Option<String>,
936 staged_commit_hash: String,
937 unstaged_commit_hash: String,
938 original_commit_hash: String,
939 cx: &App,
940 ) -> Task<anyhow::Result<i64>> {
941 let db = self.db.clone();
942 cx.background_spawn(async move {
943 db.create_archived_worktree(
944 worktree_path,
945 main_repo_path,
946 branch_name,
947 staged_commit_hash,
948 unstaged_commit_hash,
949 original_commit_hash,
950 )
951 .await
952 })
953 }
954
955 pub fn link_thread_to_archived_worktree(
956 &self,
957 thread_id: ThreadId,
958 archived_worktree_id: i64,
959 cx: &App,
960 ) -> Task<anyhow::Result<()>> {
961 let db = self.db.clone();
962 cx.background_spawn(async move {
963 db.link_thread_to_archived_worktree(thread_id, archived_worktree_id)
964 .await
965 })
966 }
967
968 pub fn get_archived_worktrees_for_thread(
969 &self,
970 thread_id: ThreadId,
971 cx: &App,
972 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
973 let db = self.db.clone();
974 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(thread_id).await })
975 }
976
977 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
978 let db = self.db.clone();
979 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
980 }
981
982 pub fn unlink_thread_from_all_archived_worktrees(
983 &self,
984 thread_id: ThreadId,
985 cx: &App,
986 ) -> Task<anyhow::Result<()>> {
987 let db = self.db.clone();
988 cx.background_spawn(async move {
989 db.unlink_thread_from_all_archived_worktrees(thread_id)
990 .await
991 })
992 }
993
994 pub fn is_archived_worktree_referenced(
995 &self,
996 archived_worktree_id: i64,
997 cx: &App,
998 ) -> Task<anyhow::Result<bool>> {
999 let db = self.db.clone();
1000 cx.background_spawn(async move {
1001 db.is_archived_worktree_referenced(archived_worktree_id)
1002 .await
1003 })
1004 }
1005
1006 pub fn get_all_archived_branch_names(
1007 &self,
1008 cx: &App,
1009 ) -> Task<anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>>> {
1010 let db = self.db.clone();
1011 cx.background_spawn(async move { db.get_all_archived_branch_names() })
1012 }
1013
1014 fn update_archived(&mut self, thread_id: ThreadId, archived: bool, cx: &mut Context<Self>) {
1015 if let Some(thread) = self.threads.get(&thread_id) {
1016 self.save_internal(ThreadMetadata {
1017 archived,
1018 ..thread.clone()
1019 });
1020 cx.notify();
1021 }
1022 }
1023
1024 pub fn delete(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
1025 if let Some(thread) = self.threads.get(&thread_id) {
1026 if let Some(sid) = &thread.session_id {
1027 self.threads_by_session.remove(sid);
1028 }
1029 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
1030 thread_ids.remove(&thread_id);
1031 }
1032 if !thread.main_worktree_paths().is_empty() {
1033 if let Some(thread_ids) = self
1034 .threads_by_main_paths
1035 .get_mut(thread.main_worktree_paths())
1036 {
1037 thread_ids.remove(&thread_id);
1038 }
1039 }
1040 }
1041 self.threads.remove(&thread_id);
1042 self.pending_thread_ops_tx
1043 .try_send(DbOperation::Delete(thread_id))
1044 .log_err();
1045 cx.notify();
1046 }
1047
1048 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
1049 let weak_store = cx.weak_entity();
1050
1051 cx.observe_new::<crate::ConversationView>(move |_view, _window, cx| {
1052 let view_entity = cx.entity();
1053 let entity_id = view_entity.entity_id();
1054
1055 cx.on_release({
1056 let weak_store = weak_store.clone();
1057 move |_view, cx| {
1058 weak_store
1059 .update(cx, |store, _cx| {
1060 store.conversation_subscriptions.remove(&entity_id);
1061 })
1062 .ok();
1063 }
1064 })
1065 .detach();
1066
1067 weak_store
1068 .update(cx, |this, cx| {
1069 let subscription = cx.subscribe(&view_entity, Self::handle_conversation_event);
1070 this.conversation_subscriptions
1071 .insert(entity_id, subscription);
1072 })
1073 .ok();
1074 })
1075 .detach();
1076
1077 let (tx, rx) = smol::channel::unbounded();
1078 let _db_operations_task = cx.background_spawn({
1079 let db = db.clone();
1080 async move {
1081 while let Ok(first_update) = rx.recv().await {
1082 let mut updates = vec![first_update];
1083 while let Ok(update) = rx.try_recv() {
1084 updates.push(update);
1085 }
1086 let updates = Self::dedup_db_operations(updates);
1087 for operation in updates {
1088 match operation {
1089 DbOperation::Upsert(metadata) => {
1090 db.save(metadata).await.log_err();
1091 }
1092 DbOperation::Delete(thread_id) => {
1093 db.delete(thread_id).await.log_err();
1094 }
1095 }
1096 }
1097 }
1098 }
1099 });
1100
1101 let mut this = Self {
1102 db,
1103 threads: HashMap::default(),
1104 threads_by_paths: HashMap::default(),
1105 threads_by_main_paths: HashMap::default(),
1106 threads_by_session: HashMap::default(),
1107 reload_task: None,
1108 conversation_subscriptions: HashMap::default(),
1109 pending_thread_ops_tx: tx,
1110 in_flight_archives: HashMap::default(),
1111 _db_operations_task,
1112 };
1113 let _ = this.reload(cx);
1114 this
1115 }
1116
1117 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
1118 let mut ops = HashMap::default();
1119 for operation in operations.into_iter().rev() {
1120 if ops.contains_key(&operation.id()) {
1121 continue;
1122 }
1123 ops.insert(operation.id(), operation);
1124 }
1125 ops.into_values().collect()
1126 }
1127
1128 fn handle_conversation_event(
1129 &mut self,
1130 conversation_view: Entity<crate::ConversationView>,
1131 _event: &crate::conversation_view::RootThreadUpdated,
1132 cx: &mut Context<Self>,
1133 ) {
1134 let view = conversation_view.read(cx);
1135 let thread_id = view.thread_id;
1136 let Some(thread) = view.root_thread(cx) else {
1137 return;
1138 };
1139
1140 let thread_ref = thread.read(cx);
1141 if thread_ref.is_draft_thread() {
1142 return;
1143 }
1144
1145 let existing_thread = self.entry(thread_id);
1146 let session_id = Some(thread_ref.session_id().clone());
1147 let title = thread_ref.title();
1148
1149 let updated_at = Utc::now();
1150
1151 let created_at = existing_thread
1152 .and_then(|t| t.created_at)
1153 .unwrap_or_else(|| updated_at);
1154
1155 let agent_id = thread_ref.connection().agent_id();
1156
1157 // Preserve project-dependent fields for archived threads.
1158 // The worktree may already have been removed from the
1159 // project as part of the archive flow, so re-evaluating
1160 // these from the current project state would yield
1161 // empty/incorrect results.
1162 let (worktree_paths, remote_connection) =
1163 if let Some(existing) = existing_thread.filter(|t| t.archived) {
1164 (
1165 existing.worktree_paths.clone(),
1166 existing.remote_connection.clone(),
1167 )
1168 } else {
1169 let project = thread_ref.project().read(cx);
1170 let worktree_paths = project.worktree_paths(cx);
1171 let remote_connection = project.remote_connection_options(cx);
1172
1173 (worktree_paths, remote_connection)
1174 };
1175
1176 // Threads without a folder path (e.g. started in an empty
1177 // window) are archived by default so they don't get lost,
1178 // because they won't show up in the sidebar. Users can reload
1179 // them from the archive.
1180 let archived = existing_thread
1181 .map(|t| t.archived)
1182 .unwrap_or(worktree_paths.is_empty());
1183
1184 let metadata = ThreadMetadata {
1185 thread_id,
1186 session_id,
1187 agent_id,
1188 title,
1189 created_at: Some(created_at),
1190 updated_at,
1191 worktree_paths,
1192 remote_connection,
1193 archived,
1194 };
1195
1196 self.save(metadata, cx);
1197 }
1198}
1199
1200impl Global for ThreadMetadataStore {}
1201
1202struct ThreadMetadataDb(ThreadSafeConnection);
1203
1204impl Domain for ThreadMetadataDb {
1205 const NAME: &str = stringify!(ThreadMetadataDb);
1206
1207 const MIGRATIONS: &[&str] = &[
1208 sql!(
1209 CREATE TABLE IF NOT EXISTS sidebar_threads(
1210 session_id TEXT PRIMARY KEY,
1211 agent_id TEXT,
1212 title TEXT NOT NULL,
1213 updated_at TEXT NOT NULL,
1214 created_at TEXT,
1215 folder_paths TEXT,
1216 folder_paths_order TEXT
1217 ) STRICT;
1218 ),
1219 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
1220 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
1221 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
1222 sql!(
1223 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
1224 id INTEGER PRIMARY KEY,
1225 worktree_path TEXT NOT NULL,
1226 main_repo_path TEXT NOT NULL,
1227 branch_name TEXT,
1228 staged_commit_hash TEXT,
1229 unstaged_commit_hash TEXT,
1230 original_commit_hash TEXT
1231 ) STRICT;
1232
1233 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
1234 session_id TEXT NOT NULL,
1235 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1236 PRIMARY KEY (session_id, archived_worktree_id)
1237 ) STRICT;
1238 ),
1239 sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT),
1240 sql!(ALTER TABLE sidebar_threads ADD COLUMN thread_id BLOB),
1241 sql!(
1242 UPDATE sidebar_threads SET thread_id = randomblob(16) WHERE thread_id IS NULL;
1243
1244 CREATE TABLE thread_archived_worktrees_v2(
1245 thread_id BLOB NOT NULL,
1246 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1247 PRIMARY KEY (thread_id, archived_worktree_id)
1248 ) STRICT;
1249
1250 INSERT INTO thread_archived_worktrees_v2(thread_id, archived_worktree_id)
1251 SELECT s.thread_id, t.archived_worktree_id
1252 FROM thread_archived_worktrees t
1253 JOIN sidebar_threads s ON s.session_id = t.session_id;
1254
1255 DROP TABLE thread_archived_worktrees;
1256 ALTER TABLE thread_archived_worktrees_v2 RENAME TO thread_archived_worktrees;
1257
1258 CREATE TABLE sidebar_threads_v2(
1259 thread_id BLOB PRIMARY KEY,
1260 session_id TEXT,
1261 agent_id TEXT,
1262 title TEXT NOT NULL,
1263 updated_at TEXT NOT NULL,
1264 created_at TEXT,
1265 folder_paths TEXT,
1266 folder_paths_order TEXT,
1267 archived INTEGER DEFAULT 0,
1268 main_worktree_paths TEXT,
1269 main_worktree_paths_order TEXT,
1270 remote_connection TEXT
1271 ) STRICT;
1272
1273 INSERT INTO sidebar_threads_v2(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection)
1274 SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection
1275 FROM sidebar_threads;
1276
1277 DROP TABLE sidebar_threads;
1278 ALTER TABLE sidebar_threads_v2 RENAME TO sidebar_threads;
1279 ),
1280 sql!(
1281 DELETE FROM thread_archived_worktrees
1282 WHERE thread_id IN (
1283 SELECT thread_id FROM sidebar_threads WHERE session_id IS NULL
1284 );
1285
1286 DELETE FROM sidebar_threads WHERE session_id IS NULL;
1287
1288 DELETE FROM archived_git_worktrees
1289 WHERE id NOT IN (
1290 SELECT archived_worktree_id FROM thread_archived_worktrees
1291 );
1292 ),
1293 ];
1294}
1295
1296db::static_connection!(ThreadMetadataDb, []);
1297
1298impl ThreadMetadataDb {
1299 #[allow(dead_code)]
1300 pub fn list_ids(&self) -> anyhow::Result<Vec<ThreadId>> {
1301 self.select::<ThreadId>(
1302 "SELECT thread_id FROM sidebar_threads \
1303 WHERE session_id IS NOT NULL \
1304 ORDER BY updated_at DESC",
1305 )?()
1306 }
1307
1308 const LIST_QUERY: &str = "SELECT thread_id, session_id, agent_id, title, updated_at, \
1309 created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, \
1310 main_worktree_paths_order, remote_connection \
1311 FROM sidebar_threads \
1312 WHERE session_id IS NOT NULL \
1313 ORDER BY updated_at DESC";
1314
1315 /// List all sidebar thread metadata, ordered by updated_at descending.
1316 ///
1317 /// Only returns threads that have a `session_id`.
1318 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
1319 self.select::<ThreadMetadata>(Self::LIST_QUERY)?()
1320 }
1321
1322 /// Upsert metadata for a thread.
1323 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
1324 anyhow::ensure!(
1325 row.session_id.is_some(),
1326 "refusing to persist thread metadata without a session_id"
1327 );
1328
1329 let session_id = row.session_id.as_ref().map(|s| s.0.clone());
1330 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
1331 None
1332 } else {
1333 Some(row.agent_id.to_string())
1334 };
1335 let title = row
1336 .title
1337 .as_ref()
1338 .map(|t| t.to_string())
1339 .unwrap_or_default();
1340 let updated_at = row.updated_at.to_rfc3339();
1341 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
1342 let serialized = row.folder_paths().serialize();
1343 let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
1344 (None, None)
1345 } else {
1346 (Some(serialized.paths), Some(serialized.order))
1347 };
1348 let main_serialized = row.main_worktree_paths().serialize();
1349 let (main_worktree_paths, main_worktree_paths_order) =
1350 if row.main_worktree_paths().is_empty() {
1351 (None, None)
1352 } else {
1353 (Some(main_serialized.paths), Some(main_serialized.order))
1354 };
1355 let remote_connection = row
1356 .remote_connection
1357 .as_ref()
1358 .map(serde_json::to_string)
1359 .transpose()
1360 .context("serialize thread metadata remote connection")?;
1361 let thread_id = row.thread_id;
1362 let archived = row.archived;
1363
1364 self.write(move |conn| {
1365 let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
1366 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12) \
1367 ON CONFLICT(thread_id) DO UPDATE SET \
1368 session_id = excluded.session_id, \
1369 agent_id = excluded.agent_id, \
1370 title = excluded.title, \
1371 updated_at = excluded.updated_at, \
1372 created_at = excluded.created_at, \
1373 folder_paths = excluded.folder_paths, \
1374 folder_paths_order = excluded.folder_paths_order, \
1375 archived = excluded.archived, \
1376 main_worktree_paths = excluded.main_worktree_paths, \
1377 main_worktree_paths_order = excluded.main_worktree_paths_order, \
1378 remote_connection = excluded.remote_connection";
1379 let mut stmt = Statement::prepare(conn, sql)?;
1380 let mut i = stmt.bind(&thread_id, 1)?;
1381 i = stmt.bind(&session_id, i)?;
1382 i = stmt.bind(&agent_id, i)?;
1383 i = stmt.bind(&title, i)?;
1384 i = stmt.bind(&updated_at, i)?;
1385 i = stmt.bind(&created_at, i)?;
1386 i = stmt.bind(&folder_paths, i)?;
1387 i = stmt.bind(&folder_paths_order, i)?;
1388 i = stmt.bind(&archived, i)?;
1389 i = stmt.bind(&main_worktree_paths, i)?;
1390 i = stmt.bind(&main_worktree_paths_order, i)?;
1391 stmt.bind(&remote_connection, i)?;
1392 stmt.exec()
1393 })
1394 .await
1395 }
1396
1397 /// Delete metadata for a single thread.
1398 pub async fn delete(&self, thread_id: ThreadId) -> anyhow::Result<()> {
1399 self.write(move |conn| {
1400 let mut stmt =
1401 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE thread_id = ?")?;
1402 stmt.bind(&thread_id, 1)?;
1403 stmt.exec()
1404 })
1405 .await
1406 }
1407
1408 pub async fn create_archived_worktree(
1409 &self,
1410 worktree_path: String,
1411 main_repo_path: String,
1412 branch_name: Option<String>,
1413 staged_commit_hash: String,
1414 unstaged_commit_hash: String,
1415 original_commit_hash: String,
1416 ) -> anyhow::Result<i64> {
1417 self.write(move |conn| {
1418 let mut stmt = Statement::prepare(
1419 conn,
1420 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
1421 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
1422 RETURNING id",
1423 )?;
1424 let mut i = stmt.bind(&worktree_path, 1)?;
1425 i = stmt.bind(&main_repo_path, i)?;
1426 i = stmt.bind(&branch_name, i)?;
1427 i = stmt.bind(&staged_commit_hash, i)?;
1428 i = stmt.bind(&unstaged_commit_hash, i)?;
1429 stmt.bind(&original_commit_hash, i)?;
1430 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
1431 })
1432 .await
1433 }
1434
1435 pub async fn link_thread_to_archived_worktree(
1436 &self,
1437 thread_id: ThreadId,
1438 archived_worktree_id: i64,
1439 ) -> anyhow::Result<()> {
1440 self.write(move |conn| {
1441 let mut stmt = Statement::prepare(
1442 conn,
1443 "INSERT INTO thread_archived_worktrees(thread_id, archived_worktree_id) \
1444 VALUES (?1, ?2)",
1445 )?;
1446 let i = stmt.bind(&thread_id, 1)?;
1447 stmt.bind(&archived_worktree_id, i)?;
1448 stmt.exec()
1449 })
1450 .await
1451 }
1452
1453 pub async fn get_archived_worktrees_for_thread(
1454 &self,
1455 thread_id: ThreadId,
1456 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
1457 self.select_bound::<ThreadId, ArchivedGitWorktree>(
1458 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
1459 FROM archived_git_worktrees a \
1460 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
1461 WHERE t.thread_id = ?1",
1462 )?(thread_id)
1463 }
1464
1465 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
1466 self.write(move |conn| {
1467 let mut stmt = Statement::prepare(
1468 conn,
1469 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
1470 )?;
1471 stmt.bind(&id, 1)?;
1472 stmt.exec()?;
1473
1474 let mut stmt =
1475 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
1476 stmt.bind(&id, 1)?;
1477 stmt.exec()
1478 })
1479 .await
1480 }
1481
1482 pub async fn unlink_thread_from_all_archived_worktrees(
1483 &self,
1484 thread_id: ThreadId,
1485 ) -> anyhow::Result<()> {
1486 self.write(move |conn| {
1487 let mut stmt = Statement::prepare(
1488 conn,
1489 "DELETE FROM thread_archived_worktrees WHERE thread_id = ?",
1490 )?;
1491 stmt.bind(&thread_id, 1)?;
1492 stmt.exec()
1493 })
1494 .await
1495 }
1496
1497 pub async fn is_archived_worktree_referenced(
1498 &self,
1499 archived_worktree_id: i64,
1500 ) -> anyhow::Result<bool> {
1501 self.select_row_bound::<i64, i64>(
1502 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1503 )?(archived_worktree_id)
1504 .map(|count| count.unwrap_or(0) > 0)
1505 }
1506
1507 pub fn get_all_archived_branch_names(
1508 &self,
1509 ) -> anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>> {
1510 let rows = self.select::<(ThreadId, String, String)>(
1511 "SELECT t.thread_id, a.worktree_path, a.branch_name \
1512 FROM thread_archived_worktrees t \
1513 JOIN archived_git_worktrees a ON a.id = t.archived_worktree_id \
1514 WHERE a.branch_name IS NOT NULL \
1515 ORDER BY a.id ASC",
1516 )?()?;
1517
1518 let mut result: HashMap<ThreadId, HashMap<PathBuf, String>> = HashMap::default();
1519 for (thread_id, worktree_path, branch_name) in rows {
1520 result
1521 .entry(thread_id)
1522 .or_default()
1523 .insert(PathBuf::from(worktree_path), branch_name);
1524 }
1525 Ok(result)
1526 }
1527}
1528
1529impl Column for ThreadMetadata {
1530 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1531 let (thread_id_uuid, next): (uuid::Uuid, i32) = Column::column(statement, start_index)?;
1532 let (id, next): (Option<Arc<str>>, i32) = Column::column(statement, next)?;
1533 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1534 let (title, next): (String, i32) = Column::column(statement, next)?;
1535 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1536 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1537 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1538 let (folder_paths_order_str, next): (Option<String>, i32) =
1539 Column::column(statement, next)?;
1540 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1541 let (main_worktree_paths_str, next): (Option<String>, i32) =
1542 Column::column(statement, next)?;
1543 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1544 Column::column(statement, next)?;
1545 let (remote_connection_json, next): (Option<String>, i32) =
1546 Column::column(statement, next)?;
1547
1548 let agent_id = agent_id
1549 .map(|id| AgentId::new(id))
1550 .unwrap_or(ZED_AGENT_ID.clone());
1551
1552 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1553 let created_at = created_at_str
1554 .as_deref()
1555 .map(DateTime::parse_from_rfc3339)
1556 .transpose()?
1557 .map(|dt| dt.with_timezone(&Utc));
1558
1559 let folder_paths = folder_paths_str
1560 .map(|paths| {
1561 PathList::deserialize(&util::path_list::SerializedPathList {
1562 paths,
1563 order: folder_paths_order_str.unwrap_or_default(),
1564 })
1565 })
1566 .unwrap_or_default();
1567
1568 let main_worktree_paths = main_worktree_paths_str
1569 .map(|paths| {
1570 PathList::deserialize(&util::path_list::SerializedPathList {
1571 paths,
1572 order: main_worktree_paths_order_str.unwrap_or_default(),
1573 })
1574 })
1575 .unwrap_or_default();
1576
1577 let remote_connection = remote_connection_json
1578 .as_deref()
1579 .map(serde_json::from_str::<RemoteConnectionOptions>)
1580 .transpose()
1581 .context("deserialize thread metadata remote connection")?;
1582
1583 let worktree_paths = WorktreePaths::from_path_lists(main_worktree_paths, folder_paths)
1584 .unwrap_or_else(|_| WorktreePaths::default());
1585
1586 let thread_id = ThreadId(thread_id_uuid);
1587
1588 Ok((
1589 ThreadMetadata {
1590 thread_id,
1591 session_id: id.map(acp::SessionId::new),
1592 agent_id,
1593 title: if title.is_empty() || title == DEFAULT_THREAD_TITLE {
1594 None
1595 } else {
1596 Some(title.into())
1597 },
1598 updated_at,
1599 created_at,
1600 worktree_paths,
1601 remote_connection,
1602 archived,
1603 },
1604 next,
1605 ))
1606 }
1607}
1608
1609impl Column for ArchivedGitWorktree {
1610 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1611 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1612 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1613 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1614 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1615 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1616 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1617 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1618
1619 Ok((
1620 ArchivedGitWorktree {
1621 id,
1622 worktree_path: PathBuf::from(worktree_path_str),
1623 main_repo_path: PathBuf::from(main_repo_path_str),
1624 branch_name,
1625 staged_commit_hash,
1626 unstaged_commit_hash,
1627 original_commit_hash,
1628 },
1629 next,
1630 ))
1631 }
1632}
1633
1634#[cfg(test)]
1635mod tests {
1636 use super::*;
1637 use acp_thread::StubAgentConnection;
1638 use action_log::ActionLog;
1639 use agent::DbThread;
1640 use agent_client_protocol as acp;
1641
1642 use gpui::{TestAppContext, VisualTestContext};
1643 use project::FakeFs;
1644 use project::Project;
1645 use remote::WslConnectionOptions;
1646 use std::path::Path;
1647 use std::rc::Rc;
1648 use workspace::MultiWorkspace;
1649
1650 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1651 DbThread {
1652 title: title.to_string().into(),
1653 messages: Vec::new(),
1654 updated_at,
1655 detailed_summary: None,
1656 initial_project_snapshot: None,
1657 cumulative_token_usage: Default::default(),
1658 request_token_usage: Default::default(),
1659 model: None,
1660 profile: None,
1661 imported: false,
1662 subagent_context: None,
1663 speed: None,
1664 thinking_enabled: false,
1665 thinking_effort: None,
1666 draft_prompt: None,
1667 ui_scroll_position: None,
1668 }
1669 }
1670
1671 fn make_metadata(
1672 session_id: &str,
1673 title: &str,
1674 updated_at: DateTime<Utc>,
1675 folder_paths: PathList,
1676 ) -> ThreadMetadata {
1677 ThreadMetadata {
1678 thread_id: ThreadId::new(),
1679 archived: false,
1680 session_id: Some(acp::SessionId::new(session_id)),
1681 agent_id: agent::ZED_AGENT_ID.clone(),
1682 title: if title.is_empty() {
1683 None
1684 } else {
1685 Some(title.to_string().into())
1686 },
1687 updated_at,
1688 created_at: Some(updated_at),
1689 worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
1690 remote_connection: None,
1691 }
1692 }
1693
1694 fn init_test(cx: &mut TestAppContext) {
1695 let fs = FakeFs::new(cx.executor());
1696 cx.update(|cx| {
1697 let settings_store = settings::SettingsStore::test(cx);
1698 cx.set_global(settings_store);
1699 theme_settings::init(theme::LoadThemes::JustBase, cx);
1700 editor::init(cx);
1701 release_channel::init("0.0.0".parse().unwrap(), cx);
1702 prompt_store::init(cx);
1703 <dyn Fs>::set_global(fs, cx);
1704 ThreadMetadataStore::init_global(cx);
1705 ThreadStore::init_global(cx);
1706 language_model::LanguageModelRegistry::test(cx);
1707 });
1708 cx.run_until_parked();
1709 }
1710
1711 fn setup_panel_with_project(
1712 project: Entity<Project>,
1713 cx: &mut TestAppContext,
1714 ) -> (Entity<crate::AgentPanel>, VisualTestContext) {
1715 let multi_workspace =
1716 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1717 let workspace_entity = multi_workspace
1718 .read_with(cx, |mw, _cx| mw.workspace().clone())
1719 .unwrap();
1720 let mut vcx = VisualTestContext::from_window(multi_workspace.into(), cx);
1721 let panel = workspace_entity.update_in(&mut vcx, |workspace, window, cx| {
1722 cx.new(|cx| crate::AgentPanel::new(workspace, None, window, cx))
1723 });
1724 (panel, vcx)
1725 }
1726
1727 fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) {
1728 let kvp = cx.update(|cx| KeyValueStore::global(cx));
1729 smol::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string()))
1730 .unwrap();
1731 }
1732
1733 fn run_store_migrations(cx: &mut TestAppContext) {
1734 clear_thread_metadata_remote_connection_backfill(cx);
1735 cx.update(|cx| {
1736 let migration_task = migrate_thread_metadata(cx);
1737 migrate_thread_remote_connections(cx, migration_task);
1738 });
1739 cx.run_until_parked();
1740 }
1741
1742 #[gpui::test]
1743 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1744 let first_paths = PathList::new(&[Path::new("/project-a")]);
1745 let second_paths = PathList::new(&[Path::new("/project-b")]);
1746 let now = Utc::now();
1747 let older = now - chrono::Duration::seconds(1);
1748
1749 let thread = std::thread::current();
1750 let test_name = thread.name().unwrap_or("unknown_test");
1751 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1752 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1753 &db_name,
1754 )));
1755
1756 db.save(make_metadata(
1757 "session-1",
1758 "First Thread",
1759 now,
1760 first_paths.clone(),
1761 ))
1762 .await
1763 .unwrap();
1764 db.save(make_metadata(
1765 "session-2",
1766 "Second Thread",
1767 older,
1768 second_paths.clone(),
1769 ))
1770 .await
1771 .unwrap();
1772
1773 cx.update(|cx| {
1774 let settings_store = settings::SettingsStore::test(cx);
1775 cx.set_global(settings_store);
1776 ThreadMetadataStore::init_global(cx);
1777 });
1778
1779 cx.run_until_parked();
1780
1781 cx.update(|cx| {
1782 let store = ThreadMetadataStore::global(cx);
1783 let store = store.read(cx);
1784
1785 assert_eq!(store.entry_ids().count(), 2);
1786 assert!(
1787 store
1788 .entry_by_session(&acp::SessionId::new("session-1"))
1789 .is_some()
1790 );
1791 assert!(
1792 store
1793 .entry_by_session(&acp::SessionId::new("session-2"))
1794 .is_some()
1795 );
1796
1797 let first_path_entries: Vec<_> = store
1798 .entries_for_path(&first_paths, None)
1799 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1800 .collect();
1801 assert_eq!(first_path_entries, vec!["session-1"]);
1802
1803 let second_path_entries: Vec<_> = store
1804 .entries_for_path(&second_paths, None)
1805 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1806 .collect();
1807 assert_eq!(second_path_entries, vec!["session-2"]);
1808 });
1809 }
1810
1811 #[gpui::test]
1812 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1813 init_test(cx);
1814
1815 let first_paths = PathList::new(&[Path::new("/project-a")]);
1816 let second_paths = PathList::new(&[Path::new("/project-b")]);
1817 let initial_time = Utc::now();
1818 let updated_time = initial_time + chrono::Duration::seconds(1);
1819
1820 let initial_metadata = make_metadata(
1821 "session-1",
1822 "First Thread",
1823 initial_time,
1824 first_paths.clone(),
1825 );
1826 let session1_thread_id = initial_metadata.thread_id;
1827
1828 let second_metadata = make_metadata(
1829 "session-2",
1830 "Second Thread",
1831 initial_time,
1832 second_paths.clone(),
1833 );
1834 let session2_thread_id = second_metadata.thread_id;
1835
1836 cx.update(|cx| {
1837 let store = ThreadMetadataStore::global(cx);
1838 store.update(cx, |store, cx| {
1839 store.save(initial_metadata, cx);
1840 store.save(second_metadata, cx);
1841 });
1842 });
1843
1844 cx.run_until_parked();
1845
1846 cx.update(|cx| {
1847 let store = ThreadMetadataStore::global(cx);
1848 let store = store.read(cx);
1849
1850 let first_path_entries: Vec<_> = store
1851 .entries_for_path(&first_paths, None)
1852 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1853 .collect();
1854 assert_eq!(first_path_entries, vec!["session-1"]);
1855
1856 let second_path_entries: Vec<_> = store
1857 .entries_for_path(&second_paths, None)
1858 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1859 .collect();
1860 assert_eq!(second_path_entries, vec!["session-2"]);
1861 });
1862
1863 let moved_metadata = ThreadMetadata {
1864 thread_id: session1_thread_id,
1865 session_id: Some(acp::SessionId::new("session-1")),
1866 agent_id: agent::ZED_AGENT_ID.clone(),
1867 title: Some("First Thread".into()),
1868 updated_at: updated_time,
1869 created_at: Some(updated_time),
1870 worktree_paths: WorktreePaths::from_folder_paths(&second_paths),
1871 remote_connection: None,
1872 archived: false,
1873 };
1874
1875 cx.update(|cx| {
1876 let store = ThreadMetadataStore::global(cx);
1877 store.update(cx, |store, cx| {
1878 store.save(moved_metadata, cx);
1879 });
1880 });
1881
1882 cx.run_until_parked();
1883
1884 cx.update(|cx| {
1885 let store = ThreadMetadataStore::global(cx);
1886 let store = store.read(cx);
1887
1888 assert_eq!(store.entry_ids().count(), 2);
1889 assert!(
1890 store
1891 .entry_by_session(&acp::SessionId::new("session-1"))
1892 .is_some()
1893 );
1894 assert!(
1895 store
1896 .entry_by_session(&acp::SessionId::new("session-2"))
1897 .is_some()
1898 );
1899
1900 let first_path_entries: Vec<_> = store
1901 .entries_for_path(&first_paths, None)
1902 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1903 .collect();
1904 assert!(first_path_entries.is_empty());
1905
1906 let second_path_entries: Vec<_> = store
1907 .entries_for_path(&second_paths, None)
1908 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1909 .collect();
1910 assert_eq!(second_path_entries.len(), 2);
1911 assert!(second_path_entries.contains(&"session-1".to_string()));
1912 assert!(second_path_entries.contains(&"session-2".to_string()));
1913 });
1914
1915 cx.update(|cx| {
1916 let store = ThreadMetadataStore::global(cx);
1917 store.update(cx, |store, cx| {
1918 store.delete(session2_thread_id, cx);
1919 });
1920 });
1921
1922 cx.run_until_parked();
1923
1924 cx.update(|cx| {
1925 let store = ThreadMetadataStore::global(cx);
1926 let store = store.read(cx);
1927
1928 assert_eq!(store.entry_ids().count(), 1);
1929
1930 let second_path_entries: Vec<_> = store
1931 .entries_for_path(&second_paths, None)
1932 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1933 .collect();
1934 assert_eq!(second_path_entries, vec!["session-1"]);
1935 });
1936 }
1937
1938 #[gpui::test]
1939 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1940 init_test(cx);
1941
1942 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1943 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1944 let now = Utc::now();
1945
1946 let existing_metadata = ThreadMetadata {
1947 thread_id: ThreadId::new(),
1948 session_id: Some(acp::SessionId::new("a-session-0")),
1949 agent_id: agent::ZED_AGENT_ID.clone(),
1950 title: Some("Existing Metadata".into()),
1951 updated_at: now - chrono::Duration::seconds(10),
1952 created_at: Some(now - chrono::Duration::seconds(10)),
1953 worktree_paths: WorktreePaths::from_folder_paths(&project_a_paths),
1954 remote_connection: None,
1955 archived: false,
1956 };
1957
1958 cx.update(|cx| {
1959 let store = ThreadMetadataStore::global(cx);
1960 store.update(cx, |store, cx| {
1961 store.save(existing_metadata, cx);
1962 });
1963 });
1964 cx.run_until_parked();
1965
1966 let threads_to_save = vec![
1967 (
1968 "a-session-0",
1969 "Thread A0 From Native Store",
1970 project_a_paths.clone(),
1971 now,
1972 ),
1973 (
1974 "a-session-1",
1975 "Thread A1",
1976 project_a_paths.clone(),
1977 now + chrono::Duration::seconds(1),
1978 ),
1979 (
1980 "b-session-0",
1981 "Thread B0",
1982 project_b_paths.clone(),
1983 now + chrono::Duration::seconds(2),
1984 ),
1985 (
1986 "projectless",
1987 "Projectless",
1988 PathList::default(),
1989 now + chrono::Duration::seconds(3),
1990 ),
1991 ];
1992
1993 for (session_id, title, paths, updated_at) in &threads_to_save {
1994 let save_task = cx.update(|cx| {
1995 let thread_store = ThreadStore::global(cx);
1996 let session_id = session_id.to_string();
1997 let title = title.to_string();
1998 let paths = paths.clone();
1999 thread_store.update(cx, |store, cx| {
2000 store.save_thread(
2001 acp::SessionId::new(session_id),
2002 make_db_thread(&title, *updated_at),
2003 paths,
2004 cx,
2005 )
2006 })
2007 });
2008 save_task.await.unwrap();
2009 cx.run_until_parked();
2010 }
2011
2012 run_store_migrations(cx);
2013
2014 let list = cx.update(|cx| {
2015 let store = ThreadMetadataStore::global(cx);
2016 store.read(cx).entries().cloned().collect::<Vec<_>>()
2017 });
2018
2019 assert_eq!(list.len(), 4);
2020 assert!(
2021 list.iter()
2022 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
2023 );
2024
2025 let existing_metadata = list
2026 .iter()
2027 .find(|metadata| {
2028 metadata
2029 .session_id
2030 .as_ref()
2031 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2032 })
2033 .unwrap();
2034 assert_eq!(existing_metadata.display_title(), "Existing Metadata");
2035 assert!(!existing_metadata.archived);
2036
2037 let migrated_session_ids: Vec<_> = list
2038 .iter()
2039 .filter_map(|metadata| metadata.session_id.as_ref().map(|s| s.0.to_string()))
2040 .collect();
2041 assert!(migrated_session_ids.iter().any(|s| s == "a-session-1"));
2042 assert!(migrated_session_ids.iter().any(|s| s == "b-session-0"));
2043 assert!(migrated_session_ids.iter().any(|s| s == "projectless"));
2044
2045 let migrated_entries: Vec<_> = list
2046 .iter()
2047 .filter(|metadata| {
2048 !metadata
2049 .session_id
2050 .as_ref()
2051 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2052 })
2053 .collect();
2054 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
2055 }
2056
2057 #[gpui::test]
2058 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
2059 cx: &mut TestAppContext,
2060 ) {
2061 init_test(cx);
2062
2063 let project_paths = PathList::new(&[Path::new("/project-a")]);
2064 let existing_updated_at = Utc::now();
2065
2066 let existing_metadata = ThreadMetadata {
2067 thread_id: ThreadId::new(),
2068 session_id: Some(acp::SessionId::new("existing-session")),
2069 agent_id: agent::ZED_AGENT_ID.clone(),
2070 title: Some("Existing Metadata".into()),
2071 updated_at: existing_updated_at,
2072 created_at: Some(existing_updated_at),
2073 worktree_paths: WorktreePaths::from_folder_paths(&project_paths),
2074 remote_connection: None,
2075 archived: false,
2076 };
2077
2078 cx.update(|cx| {
2079 let store = ThreadMetadataStore::global(cx);
2080 store.update(cx, |store, cx| {
2081 store.save(existing_metadata, cx);
2082 });
2083 });
2084 cx.run_until_parked();
2085
2086 let save_task = cx.update(|cx| {
2087 let thread_store = ThreadStore::global(cx);
2088 thread_store.update(cx, |store, cx| {
2089 store.save_thread(
2090 acp::SessionId::new("existing-session"),
2091 make_db_thread(
2092 "Updated Native Thread Title",
2093 existing_updated_at + chrono::Duration::seconds(1),
2094 ),
2095 project_paths.clone(),
2096 cx,
2097 )
2098 })
2099 });
2100 save_task.await.unwrap();
2101 cx.run_until_parked();
2102
2103 run_store_migrations(cx);
2104
2105 let list = cx.update(|cx| {
2106 let store = ThreadMetadataStore::global(cx);
2107 store.read(cx).entries().cloned().collect::<Vec<_>>()
2108 });
2109
2110 assert_eq!(list.len(), 1);
2111 assert_eq!(
2112 list[0].session_id.as_ref().unwrap().0.as_ref(),
2113 "existing-session"
2114 );
2115 }
2116
2117 #[gpui::test]
2118 async fn test_migrate_thread_remote_connections_backfills_from_workspace_db(
2119 cx: &mut TestAppContext,
2120 ) {
2121 init_test(cx);
2122
2123 let folder_paths = PathList::new(&[Path::new("/remote-project")]);
2124 let updated_at = Utc::now();
2125 let metadata = make_metadata(
2126 "remote-session",
2127 "Remote Thread",
2128 updated_at,
2129 folder_paths.clone(),
2130 );
2131
2132 cx.update(|cx| {
2133 let store = ThreadMetadataStore::global(cx);
2134 store.update(cx, |store, cx| {
2135 store.save(metadata, cx);
2136 });
2137 });
2138 cx.run_until_parked();
2139
2140 let workspace_db = cx.update(|cx| WorkspaceDb::global(cx));
2141 let workspace_id = workspace_db.next_id().await.unwrap();
2142 let serialized_paths = folder_paths.serialize();
2143 let remote_connection_id = 1_i64;
2144 workspace_db
2145 .write(move |conn| {
2146 let mut stmt = Statement::prepare(
2147 conn,
2148 "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)",
2149 )?;
2150 let mut next_index = stmt.bind(&remote_connection_id, 1)?;
2151 next_index = stmt.bind(&"wsl", next_index)?;
2152 next_index = stmt.bind(&Some("anth".to_string()), next_index)?;
2153 stmt.bind(&Some("Ubuntu".to_string()), next_index)?;
2154 stmt.exec()?;
2155
2156 let mut stmt = Statement::prepare(
2157 conn,
2158 "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1",
2159 )?;
2160 let mut next_index = stmt.bind(&workspace_id, 1)?;
2161 next_index = stmt.bind(&serialized_paths.paths, next_index)?;
2162 next_index = stmt.bind(&serialized_paths.order, next_index)?;
2163 stmt.bind(&Some(remote_connection_id as i32), next_index)?;
2164 stmt.exec()
2165 })
2166 .await
2167 .unwrap();
2168
2169 clear_thread_metadata_remote_connection_backfill(cx);
2170 cx.update(|cx| {
2171 migrate_thread_remote_connections(cx, Task::ready(Ok(())));
2172 });
2173 cx.run_until_parked();
2174
2175 let metadata = cx.update(|cx| {
2176 let store = ThreadMetadataStore::global(cx);
2177 store
2178 .read(cx)
2179 .entry_by_session(&acp::SessionId::new("remote-session"))
2180 .cloned()
2181 .expect("expected migrated metadata row")
2182 });
2183
2184 assert_eq!(
2185 metadata.remote_connection,
2186 Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
2187 distro_name: "Ubuntu".to_string(),
2188 user: Some("anth".to_string()),
2189 }))
2190 );
2191 }
2192
2193 #[gpui::test]
2194 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
2195 cx: &mut TestAppContext,
2196 ) {
2197 init_test(cx);
2198
2199 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
2200 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
2201 let now = Utc::now();
2202
2203 // Create 7 threads for project A and 3 for project B
2204 let mut threads_to_save = Vec::new();
2205 for i in 0..7 {
2206 threads_to_save.push((
2207 format!("a-session-{i}"),
2208 format!("Thread A{i}"),
2209 project_a_paths.clone(),
2210 now + chrono::Duration::seconds(i as i64),
2211 ));
2212 }
2213 for i in 0..3 {
2214 threads_to_save.push((
2215 format!("b-session-{i}"),
2216 format!("Thread B{i}"),
2217 project_b_paths.clone(),
2218 now + chrono::Duration::seconds(i as i64),
2219 ));
2220 }
2221
2222 for (session_id, title, paths, updated_at) in &threads_to_save {
2223 let save_task = cx.update(|cx| {
2224 let thread_store = ThreadStore::global(cx);
2225 let session_id = session_id.to_string();
2226 let title = title.to_string();
2227 let paths = paths.clone();
2228 thread_store.update(cx, |store, cx| {
2229 store.save_thread(
2230 acp::SessionId::new(session_id),
2231 make_db_thread(&title, *updated_at),
2232 paths,
2233 cx,
2234 )
2235 })
2236 });
2237 save_task.await.unwrap();
2238 cx.run_until_parked();
2239 }
2240
2241 run_store_migrations(cx);
2242
2243 let list = cx.update(|cx| {
2244 let store = ThreadMetadataStore::global(cx);
2245 store.read(cx).entries().cloned().collect::<Vec<_>>()
2246 });
2247
2248 assert_eq!(list.len(), 10);
2249
2250 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
2251 let mut project_a_entries: Vec<_> = list
2252 .iter()
2253 .filter(|m| *m.folder_paths() == project_a_paths)
2254 .collect();
2255 assert_eq!(project_a_entries.len(), 7);
2256 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
2257
2258 for entry in &project_a_entries[..5] {
2259 assert!(
2260 !entry.archived,
2261 "Expected {:?} to be unarchived (top 5 most recent)",
2262 entry.session_id
2263 );
2264 }
2265 for entry in &project_a_entries[5..] {
2266 assert!(
2267 entry.archived,
2268 "Expected {:?} to be archived (older than top 5)",
2269 entry.session_id
2270 );
2271 }
2272
2273 // Project B: all 3 should be unarchived (under the limit)
2274 let project_b_entries: Vec<_> = list
2275 .iter()
2276 .filter(|m| *m.folder_paths() == project_b_paths)
2277 .collect();
2278 assert_eq!(project_b_entries.len(), 3);
2279 assert!(project_b_entries.iter().all(|m| !m.archived));
2280 }
2281
2282 #[gpui::test]
2283 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
2284 init_test(cx);
2285
2286 let fs = FakeFs::new(cx.executor());
2287 let project = Project::test(fs, None::<&Path>, cx).await;
2288 let connection = StubAgentConnection::new();
2289
2290 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2291 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2292
2293 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2294 let session_id = thread.read_with(&vcx, |t, _| t.session_id().clone());
2295 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
2296
2297 // Draft threads no longer create metadata entries.
2298 cx.read(|cx| {
2299 let store = ThreadMetadataStore::global(cx).read(cx);
2300 assert_eq!(store.entry_ids().count(), 0);
2301 });
2302
2303 // Setting a title on an empty thread should be ignored by the
2304 // event handler (entries are empty), so no metadata is created.
2305 thread.update_in(&mut vcx, |thread, _window, cx| {
2306 thread.set_title("Draft Thread".into(), cx).detach();
2307 });
2308 vcx.run_until_parked();
2309
2310 cx.read(|cx| {
2311 let store = ThreadMetadataStore::global(cx).read(cx);
2312 assert_eq!(
2313 store.entry_ids().count(),
2314 0,
2315 "expected title updates on empty thread to not create metadata"
2316 );
2317 });
2318
2319 // Pushing content makes entries non-empty, so the event handler
2320 // should now update metadata with the real session_id.
2321 thread.update_in(&mut vcx, |thread, _window, cx| {
2322 thread.push_user_content_block(None, "Hello".into(), cx);
2323 });
2324 vcx.run_until_parked();
2325
2326 cx.read(|cx| {
2327 let store = ThreadMetadataStore::global(cx).read(cx);
2328 assert_eq!(store.entry_ids().count(), 1);
2329 assert_eq!(
2330 store.entry(thread_id).unwrap().session_id.as_ref(),
2331 Some(&session_id),
2332 );
2333 });
2334 }
2335
2336 #[gpui::test]
2337 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
2338 init_test(cx);
2339
2340 let fs = FakeFs::new(cx.executor());
2341 let project = Project::test(fs, None::<&Path>, cx).await;
2342 let connection = StubAgentConnection::new();
2343
2344 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2345 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2346
2347 let session_id = crate::test_support::active_session_id(&panel, &vcx);
2348 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2349
2350 thread.update_in(&mut vcx, |thread, _window, cx| {
2351 thread.push_user_content_block(None, "Hello".into(), cx);
2352 });
2353 vcx.run_until_parked();
2354
2355 cx.read(|cx| {
2356 let store = ThreadMetadataStore::global(cx).read(cx);
2357 assert_eq!(store.entry_ids().count(), 1);
2358 assert!(store.entry_by_session(&session_id).is_some());
2359 });
2360
2361 // Dropping the panel releases the ConversationView and its thread.
2362 drop(panel);
2363 cx.update(|_| {});
2364 cx.run_until_parked();
2365
2366 cx.read(|cx| {
2367 let store = ThreadMetadataStore::global(cx).read(cx);
2368 assert_eq!(store.entry_ids().count(), 1);
2369 assert!(store.entry_by_session(&session_id).is_some());
2370 });
2371 }
2372
2373 #[gpui::test]
2374 async fn test_threads_without_project_association_are_archived_by_default(
2375 cx: &mut TestAppContext,
2376 ) {
2377 init_test(cx);
2378
2379 let fs = FakeFs::new(cx.executor());
2380 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
2381 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
2382
2383 // Thread in project without worktree
2384 let (panel_no_wt, mut vcx_no_wt) = setup_panel_with_project(project_without_worktree, cx);
2385 crate::test_support::open_thread_with_connection(
2386 &panel_no_wt,
2387 StubAgentConnection::new(),
2388 &mut vcx_no_wt,
2389 );
2390 let thread_no_wt = panel_no_wt.read_with(&vcx_no_wt, |panel, cx| {
2391 panel.active_agent_thread(cx).unwrap()
2392 });
2393 thread_no_wt.update_in(&mut vcx_no_wt, |thread, _window, cx| {
2394 thread.push_user_content_block(None, "content".into(), cx);
2395 thread.set_title("No Project Thread".into(), cx).detach();
2396 });
2397 vcx_no_wt.run_until_parked();
2398 let session_without_worktree =
2399 crate::test_support::active_session_id(&panel_no_wt, &vcx_no_wt);
2400
2401 // Thread in project with worktree
2402 let (panel_wt, mut vcx_wt) = setup_panel_with_project(project_with_worktree, cx);
2403 crate::test_support::open_thread_with_connection(
2404 &panel_wt,
2405 StubAgentConnection::new(),
2406 &mut vcx_wt,
2407 );
2408 let thread_wt =
2409 panel_wt.read_with(&vcx_wt, |panel, cx| panel.active_agent_thread(cx).unwrap());
2410 thread_wt.update_in(&mut vcx_wt, |thread, _window, cx| {
2411 thread.push_user_content_block(None, "content".into(), cx);
2412 thread.set_title("Project Thread".into(), cx).detach();
2413 });
2414 vcx_wt.run_until_parked();
2415 let session_with_worktree = crate::test_support::active_session_id(&panel_wt, &vcx_wt);
2416
2417 cx.update(|cx| {
2418 let store = ThreadMetadataStore::global(cx);
2419 let store = store.read(cx);
2420
2421 let without_worktree = store
2422 .entry_by_session(&session_without_worktree)
2423 .expect("missing metadata for thread without project association");
2424 assert!(without_worktree.folder_paths().is_empty());
2425 assert!(
2426 without_worktree.archived,
2427 "expected thread without project association to be archived"
2428 );
2429
2430 let with_worktree = store
2431 .entry_by_session(&session_with_worktree)
2432 .expect("missing metadata for thread with project association");
2433 assert_eq!(
2434 *with_worktree.folder_paths(),
2435 PathList::new(&[Path::new("/project-a")])
2436 );
2437 assert!(
2438 !with_worktree.archived,
2439 "expected thread with project association to remain unarchived"
2440 );
2441 });
2442 }
2443
2444 #[gpui::test]
2445 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
2446 init_test(cx);
2447
2448 let fs = FakeFs::new(cx.executor());
2449 let project = Project::test(fs, None::<&Path>, cx).await;
2450 let connection = Rc::new(StubAgentConnection::new());
2451
2452 // Create a regular (non-subagent) thread through the panel.
2453 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
2454 crate::test_support::open_thread_with_connection(&panel, (*connection).clone(), &mut vcx);
2455
2456 let regular_thread =
2457 panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2458 let regular_session_id = regular_thread.read_with(&vcx, |t, _| t.session_id().clone());
2459
2460 regular_thread.update_in(&mut vcx, |thread, _window, cx| {
2461 thread.push_user_content_block(None, "content".into(), cx);
2462 thread.set_title("Regular Thread".into(), cx).detach();
2463 });
2464 vcx.run_until_parked();
2465
2466 // Create a standalone subagent AcpThread (not wrapped in a
2467 // ConversationView). The ThreadMetadataStore only observes
2468 // ConversationView events, so this thread's events should
2469 // have no effect on sidebar metadata.
2470 let subagent_session_id = acp::SessionId::new("subagent-session");
2471 let subagent_thread = cx.update(|cx| {
2472 let action_log = cx.new(|_| ActionLog::new(project.clone()));
2473 cx.new(|cx| {
2474 acp_thread::AcpThread::new(
2475 Some(regular_session_id.clone()),
2476 Some("Subagent Thread".into()),
2477 None,
2478 connection.clone(),
2479 project.clone(),
2480 action_log,
2481 subagent_session_id.clone(),
2482 watch::Receiver::constant(acp::PromptCapabilities::new()),
2483 cx,
2484 )
2485 })
2486 });
2487
2488 cx.update(|cx| {
2489 subagent_thread.update(cx, |thread, cx| {
2490 thread
2491 .set_title("Subagent Thread Title".into(), cx)
2492 .detach();
2493 });
2494 });
2495 cx.run_until_parked();
2496
2497 // Only the regular thread should appear in sidebar metadata.
2498 // The subagent thread is excluded because the metadata store
2499 // only observes ConversationView events.
2500 let list = cx.update(|cx| {
2501 let store = ThreadMetadataStore::global(cx);
2502 store.read(cx).entries().cloned().collect::<Vec<_>>()
2503 });
2504
2505 assert_eq!(
2506 list.len(),
2507 1,
2508 "Expected only the regular thread in sidebar metadata, \
2509 but found {} entries (subagent threads are leaking into the sidebar)",
2510 list.len(),
2511 );
2512 assert_eq!(list[0].session_id.as_ref().unwrap(), ®ular_session_id);
2513 assert_eq!(list[0].display_title(), "Regular Thread");
2514 }
2515
2516 #[test]
2517 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
2518 let now = Utc::now();
2519
2520 let meta = make_metadata("session-1", "First Thread", now, PathList::default());
2521 let thread_id = meta.thread_id;
2522 let operations = vec![DbOperation::Upsert(meta), DbOperation::Delete(thread_id)];
2523
2524 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
2525
2526 assert_eq!(deduped.len(), 1);
2527 assert_eq!(deduped[0], DbOperation::Delete(thread_id));
2528 }
2529
2530 #[test]
2531 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
2532 let now = Utc::now();
2533 let later = now + chrono::Duration::seconds(1);
2534
2535 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
2536 let shared_thread_id = old_metadata.thread_id;
2537 let new_metadata = ThreadMetadata {
2538 thread_id: shared_thread_id,
2539 ..make_metadata("session-1", "New Title", later, PathList::default())
2540 };
2541
2542 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2543 DbOperation::Upsert(old_metadata),
2544 DbOperation::Upsert(new_metadata.clone()),
2545 ]);
2546
2547 assert_eq!(deduped.len(), 1);
2548 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
2549 }
2550
2551 #[test]
2552 fn test_dedup_db_operations_preserves_distinct_sessions() {
2553 let now = Utc::now();
2554
2555 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
2556 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
2557 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2558 DbOperation::Upsert(metadata1.clone()),
2559 DbOperation::Upsert(metadata2.clone()),
2560 ]);
2561
2562 assert_eq!(deduped.len(), 2);
2563 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
2564 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
2565 }
2566
2567 #[gpui::test]
2568 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
2569 init_test(cx);
2570
2571 let paths = PathList::new(&[Path::new("/project-a")]);
2572 let now = Utc::now();
2573 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2574 let thread_id = metadata.thread_id;
2575
2576 cx.update(|cx| {
2577 let store = ThreadMetadataStore::global(cx);
2578 store.update(cx, |store, cx| {
2579 store.save(metadata, cx);
2580 });
2581 });
2582
2583 cx.run_until_parked();
2584
2585 cx.update(|cx| {
2586 let store = ThreadMetadataStore::global(cx);
2587 let store = store.read(cx);
2588
2589 let path_entries: Vec<_> = store
2590 .entries_for_path(&paths, None)
2591 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2592 .collect();
2593 assert_eq!(path_entries, vec!["session-1"]);
2594
2595 assert_eq!(store.archived_entries().count(), 0);
2596 });
2597
2598 cx.update(|cx| {
2599 let store = ThreadMetadataStore::global(cx);
2600 store.update(cx, |store, cx| {
2601 store.archive(thread_id, None, cx);
2602 });
2603 });
2604
2605 // Thread 1 should now be archived
2606 cx.run_until_parked();
2607
2608 cx.update(|cx| {
2609 let store = ThreadMetadataStore::global(cx);
2610 let store = store.read(cx);
2611
2612 let path_entries: Vec<_> = store
2613 .entries_for_path(&paths, None)
2614 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2615 .collect();
2616 assert!(path_entries.is_empty());
2617
2618 let archived: Vec<_> = store.archived_entries().collect();
2619 assert_eq!(archived.len(), 1);
2620 assert_eq!(
2621 archived[0].session_id.as_ref().unwrap().0.as_ref(),
2622 "session-1"
2623 );
2624 assert!(archived[0].archived);
2625 });
2626
2627 cx.update(|cx| {
2628 let store = ThreadMetadataStore::global(cx);
2629 store.update(cx, |store, cx| {
2630 store.unarchive(thread_id, cx);
2631 });
2632 });
2633
2634 cx.run_until_parked();
2635
2636 cx.update(|cx| {
2637 let store = ThreadMetadataStore::global(cx);
2638 let store = store.read(cx);
2639
2640 let path_entries: Vec<_> = store
2641 .entries_for_path(&paths, None)
2642 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2643 .collect();
2644 assert_eq!(path_entries, vec!["session-1"]);
2645
2646 assert_eq!(store.archived_entries().count(), 0);
2647 });
2648 }
2649
2650 #[gpui::test]
2651 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2652 init_test(cx);
2653
2654 let paths = PathList::new(&[Path::new("/project-a")]);
2655 let now = Utc::now();
2656
2657 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2658 let metadata2 = make_metadata(
2659 "session-2",
2660 "Archived Thread",
2661 now - chrono::Duration::seconds(1),
2662 paths.clone(),
2663 );
2664 let session2_thread_id = metadata2.thread_id;
2665
2666 cx.update(|cx| {
2667 let store = ThreadMetadataStore::global(cx);
2668 store.update(cx, |store, cx| {
2669 store.save(metadata1, cx);
2670 store.save(metadata2, cx);
2671 });
2672 });
2673
2674 cx.run_until_parked();
2675
2676 cx.update(|cx| {
2677 let store = ThreadMetadataStore::global(cx);
2678 store.update(cx, |store, cx| {
2679 store.archive(session2_thread_id, None, cx);
2680 });
2681 });
2682
2683 cx.run_until_parked();
2684
2685 cx.update(|cx| {
2686 let store = ThreadMetadataStore::global(cx);
2687 let store = store.read(cx);
2688
2689 let path_entries: Vec<_> = store
2690 .entries_for_path(&paths, None)
2691 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2692 .collect();
2693 assert_eq!(path_entries, vec!["session-1"]);
2694
2695 assert_eq!(store.entries().count(), 2);
2696
2697 let archived: Vec<_> = store
2698 .archived_entries()
2699 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2700 .collect();
2701 assert_eq!(archived, vec!["session-2"]);
2702 });
2703 }
2704
2705 #[gpui::test]
2706 async fn test_entries_filter_by_remote_connection(cx: &mut TestAppContext) {
2707 init_test(cx);
2708
2709 let main_paths = PathList::new(&[Path::new("/project-a")]);
2710 let linked_paths = PathList::new(&[Path::new("/wt-feature")]);
2711 let now = Utc::now();
2712
2713 let remote_a = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 1 });
2714 let remote_b = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 2 });
2715
2716 // Three threads at the same folder_paths but different hosts.
2717 let local_thread = make_metadata("local-session", "Local Thread", now, main_paths.clone());
2718
2719 let mut remote_a_thread = make_metadata(
2720 "remote-a-session",
2721 "Remote A Thread",
2722 now - chrono::Duration::seconds(1),
2723 main_paths.clone(),
2724 );
2725 remote_a_thread.remote_connection = Some(remote_a.clone());
2726
2727 let mut remote_b_thread = make_metadata(
2728 "remote-b-session",
2729 "Remote B Thread",
2730 now - chrono::Duration::seconds(2),
2731 main_paths.clone(),
2732 );
2733 remote_b_thread.remote_connection = Some(remote_b.clone());
2734
2735 let linked_worktree_paths =
2736 WorktreePaths::from_path_lists(main_paths.clone(), linked_paths).unwrap();
2737
2738 let local_linked_thread = ThreadMetadata {
2739 thread_id: ThreadId::new(),
2740 archived: false,
2741 session_id: Some(acp::SessionId::new("local-linked")),
2742 agent_id: agent::ZED_AGENT_ID.clone(),
2743 title: Some("Local Linked".into()),
2744 updated_at: now,
2745 created_at: Some(now),
2746 worktree_paths: linked_worktree_paths.clone(),
2747 remote_connection: None,
2748 };
2749
2750 let remote_linked_thread = ThreadMetadata {
2751 thread_id: ThreadId::new(),
2752 archived: false,
2753 session_id: Some(acp::SessionId::new("remote-linked")),
2754 agent_id: agent::ZED_AGENT_ID.clone(),
2755 title: Some("Remote Linked".into()),
2756 updated_at: now - chrono::Duration::seconds(1),
2757 created_at: Some(now - chrono::Duration::seconds(1)),
2758 worktree_paths: linked_worktree_paths,
2759 remote_connection: Some(remote_a.clone()),
2760 };
2761
2762 cx.update(|cx| {
2763 let store = ThreadMetadataStore::global(cx);
2764 store.update(cx, |store, cx| {
2765 store.save(local_thread, cx);
2766 store.save(remote_a_thread, cx);
2767 store.save(remote_b_thread, cx);
2768 store.save(local_linked_thread, cx);
2769 store.save(remote_linked_thread, cx);
2770 });
2771 });
2772 cx.run_until_parked();
2773
2774 cx.update(|cx| {
2775 let store = ThreadMetadataStore::global(cx);
2776 let store = store.read(cx);
2777
2778 let local_entries: Vec<_> = store
2779 .entries_for_path(&main_paths, None)
2780 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2781 .collect();
2782 assert_eq!(local_entries, vec!["local-session"]);
2783
2784 let remote_a_entries: Vec<_> = store
2785 .entries_for_path(&main_paths, Some(&remote_a))
2786 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2787 .collect();
2788 assert_eq!(remote_a_entries, vec!["remote-a-session"]);
2789
2790 let remote_b_entries: Vec<_> = store
2791 .entries_for_path(&main_paths, Some(&remote_b))
2792 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2793 .collect();
2794 assert_eq!(remote_b_entries, vec!["remote-b-session"]);
2795
2796 let mut local_main_entries: Vec<_> = store
2797 .entries_for_main_worktree_path(&main_paths, None)
2798 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2799 .collect();
2800 local_main_entries.sort();
2801 assert_eq!(local_main_entries, vec!["local-linked", "local-session"]);
2802
2803 let mut remote_main_entries: Vec<_> = store
2804 .entries_for_main_worktree_path(&main_paths, Some(&remote_a))
2805 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2806 .collect();
2807 remote_main_entries.sort();
2808 assert_eq!(
2809 remote_main_entries,
2810 vec!["remote-a-session", "remote-linked"]
2811 );
2812 });
2813 }
2814
2815 #[gpui::test]
2816 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2817 init_test(cx);
2818
2819 let paths = PathList::new(&[Path::new("/project-a")]);
2820 let now = Utc::now();
2821
2822 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2823 let m2 = make_metadata(
2824 "session-2",
2825 "Thread Two",
2826 now - chrono::Duration::seconds(1),
2827 paths.clone(),
2828 );
2829 let m3 = make_metadata(
2830 "session-3",
2831 "Thread Three",
2832 now - chrono::Duration::seconds(2),
2833 paths,
2834 );
2835
2836 cx.update(|cx| {
2837 let store = ThreadMetadataStore::global(cx);
2838 store.update(cx, |store, cx| {
2839 store.save_all(vec![m1, m2, m3], cx);
2840 });
2841 });
2842
2843 cx.run_until_parked();
2844
2845 cx.update(|cx| {
2846 let store = ThreadMetadataStore::global(cx);
2847 let store = store.read(cx);
2848
2849 assert_eq!(store.entries().count(), 3);
2850 assert!(
2851 store
2852 .entry_by_session(&acp::SessionId::new("session-1"))
2853 .is_some()
2854 );
2855 assert!(
2856 store
2857 .entry_by_session(&acp::SessionId::new("session-2"))
2858 .is_some()
2859 );
2860 assert!(
2861 store
2862 .entry_by_session(&acp::SessionId::new("session-3"))
2863 .is_some()
2864 );
2865
2866 assert_eq!(store.entry_ids().count(), 3);
2867 });
2868 }
2869
2870 #[gpui::test]
2871 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2872 init_test(cx);
2873
2874 let paths = PathList::new(&[Path::new("/project-a")]);
2875 let now = Utc::now();
2876 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2877 let thread_id = metadata.thread_id;
2878
2879 cx.update(|cx| {
2880 let store = ThreadMetadataStore::global(cx);
2881 store.update(cx, |store, cx| {
2882 store.save(metadata, cx);
2883 });
2884 });
2885
2886 cx.run_until_parked();
2887
2888 cx.update(|cx| {
2889 let store = ThreadMetadataStore::global(cx);
2890 store.update(cx, |store, cx| {
2891 store.archive(thread_id, None, cx);
2892 });
2893 });
2894
2895 cx.run_until_parked();
2896
2897 cx.update(|cx| {
2898 let store = ThreadMetadataStore::global(cx);
2899 store.update(cx, |store, cx| {
2900 let _ = store.reload(cx);
2901 });
2902 });
2903
2904 cx.run_until_parked();
2905
2906 cx.update(|cx| {
2907 let store = ThreadMetadataStore::global(cx);
2908 let store = store.read(cx);
2909
2910 let thread = store
2911 .entry_by_session(&acp::SessionId::new("session-1"))
2912 .expect("thread should exist after reload");
2913 assert!(thread.archived);
2914
2915 let path_entries: Vec<_> = store
2916 .entries_for_path(&paths, None)
2917 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2918 .collect();
2919 assert!(path_entries.is_empty());
2920
2921 let archived: Vec<_> = store
2922 .archived_entries()
2923 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2924 .collect();
2925 assert_eq!(archived, vec!["session-1"]);
2926 });
2927 }
2928
2929 #[gpui::test]
2930 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2931 init_test(cx);
2932
2933 cx.run_until_parked();
2934
2935 cx.update(|cx| {
2936 let store = ThreadMetadataStore::global(cx);
2937 store.update(cx, |store, cx| {
2938 store.archive(ThreadId::new(), None, cx);
2939 });
2940 });
2941
2942 cx.run_until_parked();
2943
2944 cx.update(|cx| {
2945 let store = ThreadMetadataStore::global(cx);
2946 let store = store.read(cx);
2947
2948 assert!(store.is_empty());
2949 assert_eq!(store.entries().count(), 0);
2950 assert_eq!(store.archived_entries().count(), 0);
2951 });
2952 }
2953
2954 #[gpui::test]
2955 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2956 init_test(cx);
2957
2958 let paths = PathList::new(&[Path::new("/project-a")]);
2959 let now = Utc::now();
2960 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2961 let thread_id = metadata.thread_id;
2962
2963 cx.update(|cx| {
2964 let store = ThreadMetadataStore::global(cx);
2965 store.update(cx, |store, cx| {
2966 store.save(metadata.clone(), cx);
2967 store.archive(thread_id, None, cx);
2968 });
2969 });
2970
2971 cx.run_until_parked();
2972
2973 cx.update(|cx| {
2974 let store = ThreadMetadataStore::global(cx);
2975 let store = store.read(cx);
2976
2977 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2978 pretty_assertions::assert_eq!(
2979 entries,
2980 vec![ThreadMetadata {
2981 archived: true,
2982 ..metadata
2983 }]
2984 );
2985 });
2986 }
2987
2988 #[gpui::test]
2989 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2990 init_test(cx);
2991 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2992
2993 let id = store
2994 .read_with(cx, |store, cx| {
2995 store.create_archived_worktree(
2996 "/tmp/worktree".to_string(),
2997 "/home/user/repo".to_string(),
2998 Some("feature-branch".to_string()),
2999 "staged_aaa".to_string(),
3000 "unstaged_bbb".to_string(),
3001 "original_000".to_string(),
3002 cx,
3003 )
3004 })
3005 .await
3006 .unwrap();
3007
3008 let thread_id_1 = ThreadId::new();
3009
3010 store
3011 .read_with(cx, |store, cx| {
3012 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3013 })
3014 .await
3015 .unwrap();
3016
3017 let worktrees = store
3018 .read_with(cx, |store, cx| {
3019 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3020 })
3021 .await
3022 .unwrap();
3023
3024 assert_eq!(worktrees.len(), 1);
3025 let wt = &worktrees[0];
3026 assert_eq!(wt.id, id);
3027 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
3028 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
3029 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
3030 assert_eq!(wt.staged_commit_hash, "staged_aaa");
3031 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
3032 assert_eq!(wt.original_commit_hash, "original_000");
3033 }
3034
3035 #[gpui::test]
3036 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
3037 init_test(cx);
3038 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3039
3040 let id = store
3041 .read_with(cx, |store, cx| {
3042 store.create_archived_worktree(
3043 "/tmp/worktree".to_string(),
3044 "/home/user/repo".to_string(),
3045 Some("main".to_string()),
3046 "deadbeef".to_string(),
3047 "deadbeef".to_string(),
3048 "original_000".to_string(),
3049 cx,
3050 )
3051 })
3052 .await
3053 .unwrap();
3054
3055 let thread_id_1 = ThreadId::new();
3056
3057 store
3058 .read_with(cx, |store, cx| {
3059 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3060 })
3061 .await
3062 .unwrap();
3063
3064 store
3065 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
3066 .await
3067 .unwrap();
3068
3069 let worktrees = store
3070 .read_with(cx, |store, cx| {
3071 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3072 })
3073 .await
3074 .unwrap();
3075 assert!(worktrees.is_empty());
3076 }
3077
3078 #[gpui::test]
3079 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
3080 init_test(cx);
3081 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3082
3083 let id = store
3084 .read_with(cx, |store, cx| {
3085 store.create_archived_worktree(
3086 "/tmp/worktree".to_string(),
3087 "/home/user/repo".to_string(),
3088 None,
3089 "abc123".to_string(),
3090 "abc123".to_string(),
3091 "original_000".to_string(),
3092 cx,
3093 )
3094 })
3095 .await
3096 .unwrap();
3097
3098 let thread_id_1 = ThreadId::new();
3099 let thread_id_2 = ThreadId::new();
3100
3101 store
3102 .read_with(cx, |store, cx| {
3103 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3104 })
3105 .await
3106 .unwrap();
3107
3108 store
3109 .read_with(cx, |store, cx| {
3110 store.link_thread_to_archived_worktree(thread_id_2, id, cx)
3111 })
3112 .await
3113 .unwrap();
3114
3115 let wt1 = store
3116 .read_with(cx, |store, cx| {
3117 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3118 })
3119 .await
3120 .unwrap();
3121
3122 let wt2 = store
3123 .read_with(cx, |store, cx| {
3124 store.get_archived_worktrees_for_thread(thread_id_2, cx)
3125 })
3126 .await
3127 .unwrap();
3128
3129 assert_eq!(wt1.len(), 1);
3130 assert_eq!(wt2.len(), 1);
3131 assert_eq!(wt1[0].id, wt2[0].id);
3132 }
3133
3134 #[gpui::test]
3135 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
3136 init_test(cx);
3137 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3138
3139 let original_paths = PathList::new(&[
3140 Path::new("/projects/worktree-a"),
3141 Path::new("/projects/worktree-b"),
3142 Path::new("/other/unrelated"),
3143 ]);
3144 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3145 let thread_id = meta.thread_id;
3146
3147 store.update(cx, |store, cx| {
3148 store.save(meta, cx);
3149 });
3150
3151 let replacements = vec![
3152 (
3153 PathBuf::from("/projects/worktree-a"),
3154 PathBuf::from("/restored/worktree-a"),
3155 ),
3156 (
3157 PathBuf::from("/projects/worktree-b"),
3158 PathBuf::from("/restored/worktree-b"),
3159 ),
3160 ];
3161
3162 store.update(cx, |store, cx| {
3163 store.complete_worktree_restore(thread_id, &replacements, cx);
3164 });
3165
3166 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3167 let entry = entry.unwrap();
3168 let paths = entry.folder_paths().paths();
3169 assert_eq!(paths.len(), 3);
3170 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3171 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3172 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3173 }
3174
3175 #[gpui::test]
3176 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
3177 init_test(cx);
3178 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3179
3180 let original_paths =
3181 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3182 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3183 let thread_id = meta.thread_id;
3184
3185 store.update(cx, |store, cx| {
3186 store.save(meta, cx);
3187 });
3188
3189 let replacements = vec![
3190 (
3191 PathBuf::from("/projects/worktree-a"),
3192 PathBuf::from("/new/worktree-a"),
3193 ),
3194 (
3195 PathBuf::from("/nonexistent/path"),
3196 PathBuf::from("/should/not/appear"),
3197 ),
3198 ];
3199
3200 store.update(cx, |store, cx| {
3201 store.complete_worktree_restore(thread_id, &replacements, cx);
3202 });
3203
3204 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3205 let entry = entry.unwrap();
3206 let paths = entry.folder_paths().paths();
3207 assert_eq!(paths.len(), 2);
3208 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3209 assert!(paths.contains(&PathBuf::from("/other/path")));
3210 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3211 }
3212
3213 #[gpui::test]
3214 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
3215 init_test(cx);
3216 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3217
3218 let original_paths = PathList::new(&[
3219 Path::new("/projects/worktree-a"),
3220 Path::new("/projects/worktree-b"),
3221 Path::new("/other/unrelated"),
3222 ]);
3223 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3224 let thread_id = meta.thread_id;
3225
3226 store.update(cx, |store, cx| {
3227 store.save(meta, cx);
3228 });
3229
3230 let replacements = vec![
3231 (
3232 PathBuf::from("/projects/worktree-a"),
3233 PathBuf::from("/restored/worktree-a"),
3234 ),
3235 (
3236 PathBuf::from("/projects/worktree-b"),
3237 PathBuf::from("/restored/worktree-b"),
3238 ),
3239 ];
3240
3241 store.update(cx, |store, cx| {
3242 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3243 });
3244
3245 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3246 let entry = entry.unwrap();
3247 let paths = entry.folder_paths().paths();
3248 assert_eq!(paths.len(), 3);
3249 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3250 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3251 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3252 }
3253
3254 #[gpui::test]
3255 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
3256 init_test(cx);
3257 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3258
3259 let original_paths =
3260 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3261 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3262 let thread_id = meta.thread_id;
3263
3264 store.update(cx, |store, cx| {
3265 store.save(meta, cx);
3266 });
3267
3268 let replacements = vec![
3269 (
3270 PathBuf::from("/projects/worktree-a"),
3271 PathBuf::from("/new/worktree-a"),
3272 ),
3273 (
3274 PathBuf::from("/nonexistent/path"),
3275 PathBuf::from("/should/not/appear"),
3276 ),
3277 ];
3278
3279 store.update(cx, |store, cx| {
3280 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3281 });
3282
3283 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3284 let entry = entry.unwrap();
3285 let paths = entry.folder_paths().paths();
3286 assert_eq!(paths.len(), 2);
3287 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3288 assert!(paths.contains(&PathBuf::from("/other/path")));
3289 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3290 }
3291
3292 #[gpui::test]
3293 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
3294 init_test(cx);
3295 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3296
3297 let id1 = store
3298 .read_with(cx, |store, cx| {
3299 store.create_archived_worktree(
3300 "/projects/worktree-a".to_string(),
3301 "/home/user/repo".to_string(),
3302 Some("branch-a".to_string()),
3303 "staged_a".to_string(),
3304 "unstaged_a".to_string(),
3305 "original_000".to_string(),
3306 cx,
3307 )
3308 })
3309 .await
3310 .unwrap();
3311
3312 let id2 = store
3313 .read_with(cx, |store, cx| {
3314 store.create_archived_worktree(
3315 "/projects/worktree-b".to_string(),
3316 "/home/user/repo".to_string(),
3317 Some("branch-b".to_string()),
3318 "staged_b".to_string(),
3319 "unstaged_b".to_string(),
3320 "original_000".to_string(),
3321 cx,
3322 )
3323 })
3324 .await
3325 .unwrap();
3326
3327 let thread_id_1 = ThreadId::new();
3328
3329 store
3330 .read_with(cx, |store, cx| {
3331 store.link_thread_to_archived_worktree(thread_id_1, id1, cx)
3332 })
3333 .await
3334 .unwrap();
3335
3336 store
3337 .read_with(cx, |store, cx| {
3338 store.link_thread_to_archived_worktree(thread_id_1, id2, cx)
3339 })
3340 .await
3341 .unwrap();
3342
3343 let worktrees = store
3344 .read_with(cx, |store, cx| {
3345 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3346 })
3347 .await
3348 .unwrap();
3349
3350 assert_eq!(worktrees.len(), 2);
3351
3352 let paths: Vec<&Path> = worktrees
3353 .iter()
3354 .map(|w| w.worktree_path.as_path())
3355 .collect();
3356 assert!(paths.contains(&Path::new("/projects/worktree-a")));
3357 assert!(paths.contains(&Path::new("/projects/worktree-b")));
3358 }
3359
3360 // ── Migration tests ────────────────────────────────────────────────
3361
3362 #[test]
3363 fn test_thread_id_primary_key_migration_backfills_null_thread_ids() {
3364 use db::sqlez::connection::Connection;
3365
3366 let connection =
3367 Connection::open_memory(Some("test_thread_id_pk_migration_backfills_nulls"));
3368
3369 // Run migrations 0-6 (the old schema, before the thread_id PK migration).
3370 let old_migrations: &[&str] = &ThreadMetadataDb::MIGRATIONS[..7];
3371 connection
3372 .migrate(ThreadMetadataDb::NAME, old_migrations, &mut |_, _, _| false)
3373 .expect("old migrations should succeed");
3374
3375 // Insert rows: one with a thread_id, two without.
3376 connection
3377 .exec(
3378 "INSERT INTO sidebar_threads \
3379 (session_id, title, updated_at, thread_id) \
3380 VALUES ('has-tid', 'Has ThreadId', '2025-01-01T00:00:00Z', X'0102030405060708090A0B0C0D0E0F10')",
3381 )
3382 .unwrap()()
3383 .unwrap();
3384 connection
3385 .exec(
3386 "INSERT INTO sidebar_threads \
3387 (session_id, title, updated_at) \
3388 VALUES ('no-tid-1', 'No ThreadId 1', '2025-01-02T00:00:00Z')",
3389 )
3390 .unwrap()()
3391 .unwrap();
3392 connection
3393 .exec(
3394 "INSERT INTO sidebar_threads \
3395 (session_id, title, updated_at) \
3396 VALUES ('no-tid-2', 'No ThreadId 2', '2025-01-03T00:00:00Z')",
3397 )
3398 .unwrap()()
3399 .unwrap();
3400
3401 // Set up archived_git_worktrees + thread_archived_worktrees rows
3402 // referencing the session without a thread_id.
3403 connection
3404 .exec(
3405 "INSERT INTO archived_git_worktrees \
3406 (id, worktree_path, main_repo_path, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
3407 VALUES (1, '/wt', '/main', 'abc', 'def', '000')",
3408 )
3409 .unwrap()()
3410 .unwrap();
3411 connection
3412 .exec(
3413 "INSERT INTO thread_archived_worktrees \
3414 (session_id, archived_worktree_id) \
3415 VALUES ('no-tid-1', 1)",
3416 )
3417 .unwrap()()
3418 .unwrap();
3419
3420 // Run all current migrations. sqlez skips the already-applied ones and
3421 // runs the remaining migrations.
3422 run_thread_metadata_migrations(&connection);
3423
3424 // All 3 rows should survive with non-NULL thread_ids.
3425 let count: i64 = connection
3426 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM sidebar_threads")
3427 .unwrap()(())
3428 .unwrap()
3429 .unwrap();
3430 assert_eq!(count, 3, "all 3 rows should survive the migration");
3431
3432 let null_count: i64 = connection
3433 .select_row_bound::<(), i64>(
3434 "SELECT COUNT(*) FROM sidebar_threads WHERE thread_id IS NULL",
3435 )
3436 .unwrap()(())
3437 .unwrap()
3438 .unwrap();
3439 assert_eq!(
3440 null_count, 0,
3441 "no rows should have NULL thread_id after migration"
3442 );
3443
3444 // The row that already had a thread_id should keep its original value.
3445 let original_tid: Vec<u8> = connection
3446 .select_row_bound::<&str, Vec<u8>>(
3447 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3448 )
3449 .unwrap()("has-tid")
3450 .unwrap()
3451 .unwrap();
3452 assert_eq!(
3453 original_tid,
3454 vec![
3455 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
3456 0x0F, 0x10
3457 ],
3458 "pre-existing thread_id should be preserved"
3459 );
3460
3461 // The two rows that had NULL thread_ids should now have distinct non-empty blobs.
3462 let generated_tid_1: Vec<u8> = connection
3463 .select_row_bound::<&str, Vec<u8>>(
3464 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3465 )
3466 .unwrap()("no-tid-1")
3467 .unwrap()
3468 .unwrap();
3469 let generated_tid_2: Vec<u8> = connection
3470 .select_row_bound::<&str, Vec<u8>>(
3471 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3472 )
3473 .unwrap()("no-tid-2")
3474 .unwrap()
3475 .unwrap();
3476 assert_eq!(
3477 generated_tid_1.len(),
3478 16,
3479 "generated thread_id should be 16 bytes"
3480 );
3481 assert_eq!(
3482 generated_tid_2.len(),
3483 16,
3484 "generated thread_id should be 16 bytes"
3485 );
3486 assert_ne!(
3487 generated_tid_1, generated_tid_2,
3488 "each generated thread_id should be unique"
3489 );
3490
3491 // The thread_archived_worktrees join row should have migrated
3492 // using the backfilled thread_id from the session without a
3493 // pre-existing thread_id.
3494 let archived_count: i64 = connection
3495 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM thread_archived_worktrees")
3496 .unwrap()(())
3497 .unwrap()
3498 .unwrap();
3499 assert_eq!(
3500 archived_count, 1,
3501 "thread_archived_worktrees row should survive migration"
3502 );
3503
3504 // The thread_archived_worktrees row should reference the
3505 // backfilled thread_id of the 'no-tid-1' session.
3506 let archived_tid: Vec<u8> = connection
3507 .select_row_bound::<(), Vec<u8>>(
3508 "SELECT thread_id FROM thread_archived_worktrees LIMIT 1",
3509 )
3510 .unwrap()(())
3511 .unwrap()
3512 .unwrap();
3513 assert_eq!(
3514 archived_tid, generated_tid_1,
3515 "thread_archived_worktrees should reference the backfilled thread_id"
3516 );
3517 }
3518
3519 // ── ThreadWorktreePaths tests ──────────────────────────────────────
3520
3521 /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs.
3522 fn make_worktree_paths(pairs: &[(&str, &str)]) -> WorktreePaths {
3523 let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs
3524 .iter()
3525 .map(|(m, f)| (Path::new(*m), Path::new(*f)))
3526 .unzip();
3527 WorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders)).unwrap()
3528 }
3529
3530 #[test]
3531 fn test_thread_worktree_paths_full_add_then_remove_cycle() {
3532 // Full scenario from the issue:
3533 // 1. Start with linked worktree selectric → zed
3534 // 2. Add cloud
3535 // 3. Remove zed
3536
3537 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3538
3539 // Step 2: add cloud
3540 paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud"));
3541
3542 assert_eq!(paths.ordered_pairs().count(), 2);
3543 assert_eq!(
3544 paths.folder_path_list(),
3545 &PathList::new(&[
3546 Path::new("/worktrees/selectric/zed"),
3547 Path::new("/projects/cloud"),
3548 ])
3549 );
3550 assert_eq!(
3551 paths.main_worktree_path_list(),
3552 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),])
3553 );
3554
3555 // Step 3: remove zed
3556 paths.remove_main_path(Path::new("/projects/zed"));
3557
3558 assert_eq!(paths.ordered_pairs().count(), 1);
3559 assert_eq!(
3560 paths.folder_path_list(),
3561 &PathList::new(&[Path::new("/projects/cloud")])
3562 );
3563 assert_eq!(
3564 paths.main_worktree_path_list(),
3565 &PathList::new(&[Path::new("/projects/cloud")])
3566 );
3567 }
3568
3569 #[test]
3570 fn test_thread_worktree_paths_add_is_idempotent() {
3571 let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]);
3572
3573 paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed"));
3574
3575 assert_eq!(paths.ordered_pairs().count(), 1);
3576 }
3577
3578 #[test]
3579 fn test_thread_worktree_paths_remove_nonexistent_is_noop() {
3580 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3581
3582 paths.remove_main_path(Path::new("/projects/nonexistent"));
3583
3584 assert_eq!(paths.ordered_pairs().count(), 1);
3585 }
3586
3587 #[test]
3588 fn test_thread_worktree_paths_from_path_lists_preserves_association() {
3589 let folder = PathList::new(&[
3590 Path::new("/worktrees/selectric/zed"),
3591 Path::new("/projects/cloud"),
3592 ]);
3593 let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]);
3594
3595 let paths = WorktreePaths::from_path_lists(main, folder).unwrap();
3596
3597 let pairs: Vec<_> = paths
3598 .ordered_pairs()
3599 .map(|(m, f)| (m.clone(), f.clone()))
3600 .collect();
3601 assert_eq!(pairs.len(), 2);
3602 assert!(pairs.contains(&(
3603 PathBuf::from("/projects/zed"),
3604 PathBuf::from("/worktrees/selectric/zed")
3605 )));
3606 assert!(pairs.contains(&(
3607 PathBuf::from("/projects/cloud"),
3608 PathBuf::from("/projects/cloud")
3609 )));
3610 }
3611
3612 #[test]
3613 fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() {
3614 // Two linked worktrees of the same main repo: the main_worktree_path_list
3615 // deduplicates because PathList stores unique sorted paths, but
3616 // ordered_pairs still has both entries.
3617 let paths = make_worktree_paths(&[
3618 ("/projects/zed", "/worktrees/selectric/zed"),
3619 ("/projects/zed", "/worktrees/feature/zed"),
3620 ]);
3621
3622 // main_worktree_path_list has the duplicate main path twice
3623 // (PathList keeps all entries from its input)
3624 assert_eq!(paths.ordered_pairs().count(), 2);
3625 assert_eq!(
3626 paths.folder_path_list(),
3627 &PathList::new(&[
3628 Path::new("/worktrees/selectric/zed"),
3629 Path::new("/worktrees/feature/zed"),
3630 ])
3631 );
3632 assert_eq!(
3633 paths.main_worktree_path_list(),
3634 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),])
3635 );
3636 }
3637
3638 #[test]
3639 fn test_thread_worktree_paths_mismatched_lengths_returns_error() {
3640 let folder = PathList::new(&[
3641 Path::new("/worktrees/selectric/zed"),
3642 Path::new("/projects/cloud"),
3643 ]);
3644 let main = PathList::new(&[Path::new("/projects/zed")]);
3645
3646 let result = WorktreePaths::from_path_lists(main, folder);
3647 assert!(result.is_err());
3648 }
3649
3650 /// Regression test: archiving a thread created in a git worktree must
3651 /// preserve the thread's folder paths so that restoring it later does
3652 /// not prompt the user to re-associate a project.
3653 #[gpui::test]
3654 async fn test_archived_thread_retains_paths_after_worktree_removal(cx: &mut TestAppContext) {
3655 init_test(cx);
3656
3657 let fs = FakeFs::new(cx.executor());
3658 fs.insert_tree(
3659 "/worktrees/feature",
3660 serde_json::json!({ "src": { "main.rs": "" } }),
3661 )
3662 .await;
3663 let project = Project::test(fs, [Path::new("/worktrees/feature")], cx).await;
3664 let connection = StubAgentConnection::new();
3665
3666 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3667 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
3668
3669 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3670 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3671
3672 // Push content so the event handler saves metadata with the
3673 // project's worktree paths.
3674 thread.update_in(&mut vcx, |thread, _window, cx| {
3675 thread.push_user_content_block(None, "Hello".into(), cx);
3676 });
3677 vcx.run_until_parked();
3678
3679 // Verify paths were saved correctly.
3680 let (folder_paths_before, main_paths_before) = cx.read(|cx| {
3681 let store = ThreadMetadataStore::global(cx).read(cx);
3682 let entry = store.entry(thread_id).unwrap();
3683 assert!(
3684 !entry.folder_paths().is_empty(),
3685 "thread should have folder paths before archiving"
3686 );
3687 (
3688 entry.folder_paths().clone(),
3689 entry.main_worktree_paths().clone(),
3690 )
3691 });
3692
3693 // Archive the thread.
3694 cx.update(|cx| {
3695 ThreadMetadataStore::global(cx).update(cx, |store, cx| {
3696 store.archive(thread_id, None, cx);
3697 });
3698 });
3699 cx.run_until_parked();
3700
3701 // Remove the worktree from the project, simulating what the
3702 // archive flow does for linked git worktrees.
3703 let worktree_id = cx.update(|cx| {
3704 project
3705 .read(cx)
3706 .visible_worktrees(cx)
3707 .next()
3708 .unwrap()
3709 .read(cx)
3710 .id()
3711 });
3712 project.update(cx, |project, cx| {
3713 project.remove_worktree(worktree_id, cx);
3714 });
3715 cx.run_until_parked();
3716
3717 // Trigger a thread event after archiving + worktree removal.
3718 // In production this happens when an async title-generation task
3719 // completes after the thread was archived.
3720 thread.update_in(&mut vcx, |thread, _window, cx| {
3721 thread.set_title("Generated title".into(), cx).detach();
3722 });
3723 vcx.run_until_parked();
3724
3725 // The archived thread must still have its original folder paths.
3726 cx.read(|cx| {
3727 let store = ThreadMetadataStore::global(cx).read(cx);
3728 let entry = store.entry(thread_id).unwrap();
3729 assert!(entry.archived, "thread should still be archived");
3730 assert_eq!(
3731 entry.display_title().as_ref(),
3732 "Generated title",
3733 "title should still be updated for archived threads"
3734 );
3735 assert_eq!(
3736 entry.folder_paths(),
3737 &folder_paths_before,
3738 "archived thread must retain its folder paths after worktree \
3739 removal + subsequent thread event, otherwise restoring it \
3740 will prompt the user to re-associate a project"
3741 );
3742 assert_eq!(
3743 entry.main_worktree_paths(),
3744 &main_paths_before,
3745 "archived thread must retain its main worktree paths after \
3746 worktree removal + subsequent thread event"
3747 );
3748 });
3749 }
3750}