1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use agent::{ThreadStore, ZED_AGENT_ID};
7use agent_client_protocol::schema as acp;
8use anyhow::Context as _;
9use chrono::{DateTime, Utc};
10use collections::{HashMap, HashSet};
11use db::{
12 kvp::KeyValueStore,
13 sqlez::{
14 bindable::{Bind, Column},
15 domain::Domain,
16 statement::Statement,
17 thread_safe_connection::ThreadSafeConnection,
18 },
19 sqlez_macros::sql,
20};
21use fs::Fs;
22use futures::{FutureExt, future::Shared};
23use gpui::{AppContext as _, Entity, Global, Subscription, Task};
24pub use project::WorktreePaths;
25use project::{AgentId, linked_worktree_short_name};
26use remote::{RemoteConnectionOptions, same_remote_connection_identity};
27use ui::{App, Context, SharedString, ThreadItemWorktreeInfo, WorktreeKind};
28use util::{ResultExt as _, debug_panic};
29use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
30
31use crate::DEFAULT_THREAD_TITLE;
32
33#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, serde::Serialize, serde::Deserialize)]
34pub struct ThreadId(uuid::Uuid);
35
36impl ThreadId {
37 pub fn new() -> Self {
38 Self(uuid::Uuid::new_v4())
39 }
40}
41
42impl Bind for ThreadId {
43 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
44 self.0.bind(statement, start_index)
45 }
46}
47
48impl Column for ThreadId {
49 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
50 let (uuid, next) = Column::column(statement, start_index)?;
51 Ok((ThreadId(uuid), next))
52 }
53}
54
55const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill";
56const THREAD_ID_MIGRATION_KEY: &str = "thread-metadata-thread-id-backfill";
57
58/// List all sidebar thread metadata from an arbitrary SQLite connection.
59///
60/// This is used to read thread metadata from another release channel's
61/// database without opening a full `ThreadSafeConnection`.
62pub(crate) fn list_thread_metadata_from_connection(
63 connection: &db::sqlez::connection::Connection,
64) -> anyhow::Result<Vec<ThreadMetadata>> {
65 connection.select::<ThreadMetadata>(ThreadMetadataDb::LIST_QUERY)?()
66}
67
68/// Run the `ThreadMetadataDb` migrations on a raw connection.
69///
70/// This is used in tests to set up the sidebar_threads schema in a
71/// temporary database.
72#[cfg(test)]
73pub(crate) fn run_thread_metadata_migrations(connection: &db::sqlez::connection::Connection) {
74 connection
75 .migrate(
76 ThreadMetadataDb::NAME,
77 ThreadMetadataDb::MIGRATIONS,
78 &mut |_, _, _| false,
79 )
80 .expect("thread metadata migrations should succeed");
81}
82
83pub fn init(cx: &mut App) {
84 ThreadMetadataStore::init_global(cx);
85 let migration_task = migrate_thread_metadata(cx);
86 migrate_thread_remote_connections(cx, migration_task);
87 migrate_thread_ids(cx);
88}
89
90/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
91/// We skip migrating threads that do not have a project.
92///
93/// TODO: Remove this after N weeks of shipping the sidebar
94fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
95 let store = ThreadMetadataStore::global(cx);
96 let db = store.read(cx).db.clone();
97
98 cx.spawn(async move |cx| {
99 let existing_list = db.list()?;
100 let is_first_migration = existing_list.is_empty();
101 let existing_session_ids: HashSet<Arc<str>> = existing_list
102 .into_iter()
103 .filter_map(|m| m.session_id.map(|s| s.0))
104 .collect();
105
106 let mut to_migrate = store.read_with(cx, |_store, cx| {
107 ThreadStore::global(cx)
108 .read(cx)
109 .entries()
110 .filter_map(|entry| {
111 if existing_session_ids.contains(&entry.id.0) {
112 return None;
113 }
114
115 Some(ThreadMetadata {
116 thread_id: ThreadId::new(),
117 session_id: Some(entry.id),
118 agent_id: ZED_AGENT_ID.clone(),
119 title: if entry.title.is_empty()
120 || entry.title.as_ref() == DEFAULT_THREAD_TITLE
121 {
122 None
123 } else {
124 Some(entry.title)
125 },
126 updated_at: entry.updated_at,
127 created_at: entry.created_at,
128 interacted_at: None,
129 worktree_paths: WorktreePaths::from_folder_paths(&entry.folder_paths),
130 remote_connection: None,
131 archived: true,
132 })
133 })
134 .collect::<Vec<_>>()
135 });
136
137 if to_migrate.is_empty() {
138 return anyhow::Ok(());
139 }
140
141 // On the first migration (no entries in DB yet), keep the 5 most
142 // recent threads per project unarchived.
143 if is_first_migration {
144 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
145 for entry in &mut to_migrate {
146 if entry.worktree_paths.is_empty() {
147 continue;
148 }
149 per_project
150 .entry(entry.worktree_paths.folder_path_list().clone())
151 .or_default()
152 .push(entry);
153 }
154 for entries in per_project.values_mut() {
155 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
156 for entry in entries.iter_mut().take(5) {
157 entry.archived = false;
158 }
159 }
160 }
161
162 log::info!("Migrating {} thread store entries", to_migrate.len());
163
164 // Manually save each entry to the database and call reload, otherwise
165 // we'll end up triggering lots of reloads after each save
166 for entry in to_migrate {
167 db.save(entry).await?;
168 }
169
170 log::info!("Finished migrating thread store entries");
171
172 let _ = store.update(cx, |store, cx| store.reload(cx));
173 anyhow::Ok(())
174 })
175}
176
177fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task<anyhow::Result<()>>) {
178 let store = ThreadMetadataStore::global(cx);
179 let db = store.read(cx).db.clone();
180 let kvp = KeyValueStore::global(cx);
181 let workspace_db = WorkspaceDb::global(cx);
182 let fs = <dyn Fs>::global(cx);
183
184 cx.spawn(async move |cx| -> anyhow::Result<()> {
185 migration_task.await?;
186
187 if kvp
188 .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)?
189 .is_some()
190 {
191 return Ok(());
192 }
193
194 let recent_workspaces = workspace_db.recent_project_workspaces(fs.as_ref()).await?;
195
196 let mut local_path_lists = HashSet::<PathList>::default();
197 let mut remote_path_lists = HashMap::<PathList, RemoteConnectionOptions>::default();
198
199 recent_workspaces
200 .iter()
201 .filter(|(_, location, path_list, _)| {
202 !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local)
203 })
204 .for_each(|(_, _, path_list, _)| {
205 local_path_lists.insert(path_list.clone());
206 });
207
208 for (_, location, path_list, _) in recent_workspaces {
209 match location {
210 SerializedWorkspaceLocation::Remote(remote_connection)
211 if !local_path_lists.contains(&path_list) =>
212 {
213 remote_path_lists
214 .entry(path_list)
215 .or_insert(remote_connection);
216 }
217 _ => {}
218 }
219 }
220
221 let mut reloaded = false;
222 for metadata in db.list()? {
223 if metadata.remote_connection.is_some() {
224 continue;
225 }
226
227 if let Some(remote_connection) = remote_path_lists
228 .get(metadata.folder_paths())
229 .or_else(|| remote_path_lists.get(metadata.main_worktree_paths()))
230 {
231 db.save(ThreadMetadata {
232 remote_connection: Some(remote_connection.clone()),
233 ..metadata
234 })
235 .await?;
236 reloaded = true;
237 }
238 }
239
240 let reloaded_task = reloaded
241 .then_some(store.update(cx, |store, cx| store.reload(cx)))
242 .unwrap_or(Task::ready(()).shared());
243
244 kvp.write_kvp(
245 THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(),
246 "1".to_string(),
247 )
248 .await?;
249 reloaded_task.await;
250
251 Ok(())
252 })
253 .detach_and_log_err(cx);
254}
255
256fn migrate_thread_ids(cx: &mut App) {
257 let store = ThreadMetadataStore::global(cx);
258 let db = store.read(cx).db.clone();
259 let kvp = KeyValueStore::global(cx);
260
261 cx.spawn(async move |cx| -> anyhow::Result<()> {
262 if kvp.read_kvp(THREAD_ID_MIGRATION_KEY)?.is_some() {
263 return Ok(());
264 }
265
266 let mut reloaded = false;
267 for metadata in db.list()? {
268 db.save(metadata).await?;
269 reloaded = true;
270 }
271
272 let reloaded_task = reloaded
273 .then_some(store.update(cx, |store, cx| store.reload(cx)))
274 .unwrap_or(Task::ready(()).shared());
275
276 kvp.write_kvp(THREAD_ID_MIGRATION_KEY.to_string(), "1".to_string())
277 .await?;
278 reloaded_task.await;
279
280 Ok(())
281 })
282 .detach_and_log_err(cx);
283}
284
285struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
286impl Global for GlobalThreadMetadataStore {}
287
288/// Lightweight metadata for any thread (native or ACP), enough to populate
289/// the sidebar list and route to the correct load path when clicked.
290#[derive(Debug, Clone, PartialEq)]
291pub struct ThreadMetadata {
292 pub thread_id: ThreadId,
293 pub session_id: Option<acp::SessionId>,
294 pub agent_id: AgentId,
295 pub title: Option<SharedString>,
296 pub updated_at: DateTime<Utc>,
297 pub created_at: Option<DateTime<Utc>>,
298 /// When a user last interacted to send a message (including queueing).
299 /// Doesn't include the time when a queued message is fired.
300 pub interacted_at: Option<DateTime<Utc>>,
301 pub worktree_paths: WorktreePaths,
302 pub remote_connection: Option<RemoteConnectionOptions>,
303 pub archived: bool,
304}
305
306impl ThreadMetadata {
307 pub fn display_title(&self) -> SharedString {
308 self.title
309 .clone()
310 .unwrap_or_else(|| crate::DEFAULT_THREAD_TITLE.into())
311 }
312
313 pub fn folder_paths(&self) -> &PathList {
314 self.worktree_paths.folder_path_list()
315 }
316 pub fn main_worktree_paths(&self) -> &PathList {
317 self.worktree_paths.main_worktree_path_list()
318 }
319}
320
321/// Derives worktree display info from a thread's stored path list.
322///
323/// For each path in the thread's `folder_paths`, produces a
324/// [`ThreadItemWorktreeInfo`] with a short display name, full path, and whether
325/// the worktree is the main checkout or a linked git worktree. When
326/// multiple main paths exist and a linked worktree's short name alone
327/// wouldn't identify which main project it belongs to, the main project
328/// name is prefixed for disambiguation (e.g. `project:feature`).
329pub fn worktree_info_from_thread_paths<S: std::hash::BuildHasher>(
330 worktree_paths: &WorktreePaths,
331 branch_names: &std::collections::HashMap<PathBuf, SharedString, S>,
332) -> Vec<ThreadItemWorktreeInfo> {
333 let mut infos: Vec<ThreadItemWorktreeInfo> = Vec::new();
334 let mut linked_short_names: Vec<(SharedString, SharedString)> = Vec::new();
335 let mut unique_main_count = HashSet::default();
336
337 for (main_path, folder_path) in worktree_paths.ordered_pairs() {
338 unique_main_count.insert(main_path.clone());
339 let is_linked = main_path != folder_path;
340
341 if is_linked {
342 let short_name = linked_worktree_short_name(main_path, folder_path).unwrap_or_default();
343 let project_name = main_path
344 .file_name()
345 .map(|n| SharedString::from(n.to_string_lossy().to_string()))
346 .unwrap_or_default();
347 linked_short_names.push((short_name.clone(), project_name));
348 infos.push(ThreadItemWorktreeInfo {
349 worktree_name: Some(short_name),
350 full_path: SharedString::from(folder_path.display().to_string()),
351 highlight_positions: Vec::new(),
352 kind: WorktreeKind::Linked,
353 branch_name: branch_names.get(folder_path).cloned(),
354 });
355 } else {
356 let Some(name) = folder_path.file_name() else {
357 continue;
358 };
359 infos.push(ThreadItemWorktreeInfo {
360 worktree_name: Some(SharedString::from(name.to_string_lossy().to_string())),
361 full_path: SharedString::from(folder_path.display().to_string()),
362 highlight_positions: Vec::new(),
363 kind: WorktreeKind::Main,
364 branch_name: branch_names.get(folder_path).cloned(),
365 });
366 }
367 }
368
369 // When the group has multiple main worktree paths and the thread's
370 // folder paths don't all share the same short name, prefix each
371 // linked worktree chip with its main project name so the user knows
372 // which project it belongs to.
373 let all_same_name = infos.len() > 1
374 && infos
375 .iter()
376 .all(|i| i.worktree_name == infos[0].worktree_name);
377
378 if unique_main_count.len() > 1 && !all_same_name {
379 for (info, (_short_name, project_name)) in infos
380 .iter_mut()
381 .filter(|i| i.kind == WorktreeKind::Linked)
382 .zip(linked_short_names.iter())
383 {
384 if let Some(name) = &info.worktree_name {
385 info.worktree_name = Some(SharedString::from(format!("{}:{}", project_name, name)));
386 }
387 }
388 }
389
390 infos
391}
392
393impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
394 fn from(meta: &ThreadMetadata) -> Self {
395 let session_id = meta
396 .session_id
397 .clone()
398 .unwrap_or_else(|| acp::SessionId::new(meta.thread_id.0.to_string()));
399 Self {
400 session_id,
401 work_dirs: Some(meta.folder_paths().clone()),
402 title: meta.title.clone(),
403 updated_at: Some(meta.updated_at),
404 created_at: meta.created_at,
405 meta: None,
406 }
407 }
408}
409
410/// Record of a git worktree that was archived (deleted from disk) when its
411/// last thread was archived.
412pub struct ArchivedGitWorktree {
413 /// Auto-incrementing primary key.
414 pub id: i64,
415 /// Absolute path to the directory of the worktree before it was deleted.
416 /// Used when restoring, to put the recreated worktree back where it was.
417 /// If the path already exists on disk, the worktree is assumed to be
418 /// already restored and is used as-is.
419 pub worktree_path: PathBuf,
420 /// Absolute path of the main repository ("main worktree") that owned this worktree.
421 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
422 /// If the main repo isn't found on disk, unarchiving fails because we only store
423 /// commit hashes, and without the actual git repo being available, we can't restore
424 /// the files.
425 pub main_repo_path: PathBuf,
426 /// Branch that was checked out in the worktree at archive time. `None` if
427 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
428 /// could happen if the user made a detached one outside of Zed.
429 /// On restore, we try to switch to this branch. If that fails (e.g. it's
430 /// checked out elsewhere), we auto-generate a new one.
431 pub branch_name: Option<String>,
432 /// SHA of the WIP commit that captures files that were staged (but not yet
433 /// committed) at the time of archiving. This commit can be empty if the
434 /// user had no staged files at the time. It sits directly on top of whatever
435 /// the user's last actual commit was.
436 pub staged_commit_hash: String,
437 /// SHA of the WIP commit that captures files that were unstaged (including
438 /// untracked) at the time of archiving. This commit can be empty if the user
439 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
440 /// After doing `git reset` past both of these commits, we're back in the state
441 /// we had before archiving, including what was staged, what was unstaged, and
442 /// what was committed.
443 pub unstaged_commit_hash: String,
444 /// SHA of the commit that HEAD pointed at before we created the two WIP
445 /// commits during archival. After resetting past the WIP commits during
446 /// restore, HEAD should land back on this commit. It also serves as a
447 /// pre-restore sanity check (abort if this commit no longer exists in the
448 /// repo) and as a fallback target if the WIP resets fail.
449 pub original_commit_hash: String,
450}
451
452/// The store holds all metadata needed to show threads in the sidebar/the archive.
453///
454/// Listens to ConversationView events and updates metadata when the root thread changes.
455pub struct ThreadMetadataStore {
456 db: ThreadMetadataDb,
457 threads: HashMap<ThreadId, ThreadMetadata>,
458 threads_by_paths: HashMap<PathList, HashSet<ThreadId>>,
459 threads_by_main_paths: HashMap<PathList, HashSet<ThreadId>>,
460 threads_by_session: HashMap<acp::SessionId, ThreadId>,
461 reload_task: Option<Shared<Task<()>>>,
462 conversation_subscriptions: HashMap<gpui::EntityId, Subscription>,
463 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
464 in_flight_archives: HashMap<ThreadId, (Task<()>, smol::channel::Sender<()>)>,
465 _db_operations_task: Task<()>,
466}
467
468#[derive(Debug, PartialEq)]
469enum DbOperation {
470 Upsert(ThreadMetadata),
471 Delete(ThreadId),
472}
473
474impl DbOperation {
475 fn id(&self) -> ThreadId {
476 match self {
477 DbOperation::Upsert(thread) => thread.thread_id,
478 DbOperation::Delete(thread_id) => *thread_id,
479 }
480 }
481}
482
483/// Override for the test DB name used by `ThreadMetadataStore::init_global`.
484/// When set as a GPUI global, `init_global` uses this name instead of
485/// deriving one from the thread name. This prevents data from leaking
486/// across proptest cases that share a thread name.
487#[cfg(any(test, feature = "test-support"))]
488pub struct TestMetadataDbName(pub String);
489#[cfg(any(test, feature = "test-support"))]
490impl gpui::Global for TestMetadataDbName {}
491
492#[cfg(any(test, feature = "test-support"))]
493impl TestMetadataDbName {
494 pub fn global(cx: &App) -> String {
495 cx.try_global::<Self>()
496 .map(|g| g.0.clone())
497 .unwrap_or_else(|| {
498 let thread = std::thread::current();
499 let test_name = thread.name().unwrap_or("unknown_test");
500 format!("THREAD_METADATA_DB_{}", test_name)
501 })
502 }
503}
504
505impl ThreadMetadataStore {
506 #[cfg(not(any(test, feature = "test-support")))]
507 pub fn init_global(cx: &mut App) {
508 if cx.has_global::<Self>() {
509 return;
510 }
511
512 let db = ThreadMetadataDb::global(cx);
513 let thread_store = cx.new(|cx| Self::new(db, cx));
514 cx.set_global(GlobalThreadMetadataStore(thread_store));
515 }
516
517 #[cfg(any(test, feature = "test-support"))]
518 pub fn init_global(cx: &mut App) {
519 let db_name = TestMetadataDbName::global(cx);
520 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
521 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
522 cx.set_global(GlobalThreadMetadataStore(thread_store));
523 }
524
525 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
526 cx.try_global::<GlobalThreadMetadataStore>()
527 .map(|store| store.0.clone())
528 }
529
530 pub fn global(cx: &App) -> Entity<Self> {
531 cx.global::<GlobalThreadMetadataStore>().0.clone()
532 }
533
534 pub fn is_empty(&self) -> bool {
535 self.threads.is_empty()
536 }
537
538 /// Returns all thread IDs.
539 pub fn entry_ids(&self) -> impl Iterator<Item = ThreadId> + '_ {
540 self.threads.keys().copied()
541 }
542
543 /// Returns the metadata for a specific thread, if it exists.
544 pub fn entry(&self, thread_id: ThreadId) -> Option<&ThreadMetadata> {
545 self.threads.get(&thread_id)
546 }
547
548 /// Returns the metadata for a thread identified by its ACP session ID.
549 pub fn entry_by_session(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
550 let thread_id = self.threads_by_session.get(session_id)?;
551 self.threads.get(thread_id)
552 }
553
554 /// Returns all threads.
555 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
556 self.threads.values()
557 }
558
559 /// Returns all archived threads.
560 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
561 self.entries().filter(|t| t.archived)
562 }
563
564 /// Returns all threads for the given path list and remote connection,
565 /// excluding archived threads.
566 ///
567 /// When `remote_connection` is `Some`, only threads whose persisted
568 /// `remote_connection` matches by normalized identity are returned.
569 /// When `None`, only local (non-remote) threads are returned.
570 pub fn entries_for_path<'a>(
571 &'a self,
572 path_list: &PathList,
573 remote_connection: Option<&'a RemoteConnectionOptions>,
574 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
575 self.threads_by_paths
576 .get(path_list)
577 .into_iter()
578 .flatten()
579 .filter_map(|s| self.threads.get(s))
580 .filter(|s| !s.archived)
581 .filter(move |s| {
582 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
583 })
584 }
585
586 /// Returns threads whose `main_worktree_paths` matches the given path list
587 /// and remote connection, excluding archived threads. This finds threads
588 /// that were opened in a linked worktree but are associated with the given
589 /// main worktree.
590 ///
591 /// When `remote_connection` is `Some`, only threads whose persisted
592 /// `remote_connection` matches by normalized identity are returned.
593 /// When `None`, only local (non-remote) threads are returned.
594 pub fn entries_for_main_worktree_path<'a>(
595 &'a self,
596 path_list: &PathList,
597 remote_connection: Option<&'a RemoteConnectionOptions>,
598 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
599 self.threads_by_main_paths
600 .get(path_list)
601 .into_iter()
602 .flatten()
603 .filter_map(|s| self.threads.get(s))
604 .filter(|s| !s.archived)
605 .filter(move |s| {
606 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
607 })
608 }
609
610 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
611 let db = self.db.clone();
612 self.reload_task.take();
613
614 let list_task = cx
615 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
616
617 let reload_task = cx
618 .spawn(async move |this, cx| {
619 let Some(rows) = list_task.await.log_err() else {
620 return;
621 };
622
623 this.update(cx, |this, cx| {
624 this.threads.clear();
625 this.threads_by_paths.clear();
626 this.threads_by_main_paths.clear();
627 this.threads_by_session.clear();
628
629 for row in rows {
630 this.cache_thread_metadata(row);
631 }
632
633 cx.notify();
634 })
635 .ok();
636 })
637 .shared();
638 self.reload_task = Some(reload_task.clone());
639 reload_task
640 }
641
642 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
643 for metadata in metadata {
644 self.save_internal(metadata);
645 }
646 cx.notify();
647 }
648
649 pub fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
650 self.save_internal(metadata);
651 cx.notify();
652 }
653
654 fn save_internal(&mut self, metadata: ThreadMetadata) {
655 if metadata.session_id.is_none() {
656 debug_panic!("cannot store thread metadata without a session_id");
657 return;
658 };
659
660 if let Some(thread) = self.threads.get(&metadata.thread_id) {
661 if thread.folder_paths() != metadata.folder_paths() {
662 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
663 thread_ids.remove(&metadata.thread_id);
664 }
665 }
666 if thread.main_worktree_paths() != metadata.main_worktree_paths()
667 && !thread.main_worktree_paths().is_empty()
668 {
669 if let Some(thread_ids) = self
670 .threads_by_main_paths
671 .get_mut(thread.main_worktree_paths())
672 {
673 thread_ids.remove(&metadata.thread_id);
674 }
675 }
676 }
677
678 self.cache_thread_metadata(metadata.clone());
679 self.pending_thread_ops_tx
680 .try_send(DbOperation::Upsert(metadata))
681 .log_err();
682 }
683
684 fn cache_thread_metadata(&mut self, metadata: ThreadMetadata) {
685 let Some(session_id) = metadata.session_id.as_ref() else {
686 debug_panic!("cannot store thread metadata without a session_id");
687 return;
688 };
689
690 self.threads_by_session
691 .insert(session_id.clone(), metadata.thread_id);
692
693 self.threads.insert(metadata.thread_id, metadata.clone());
694
695 self.threads_by_paths
696 .entry(metadata.folder_paths().clone())
697 .or_default()
698 .insert(metadata.thread_id);
699
700 if !metadata.main_worktree_paths().is_empty() {
701 self.threads_by_main_paths
702 .entry(metadata.main_worktree_paths().clone())
703 .or_default()
704 .insert(metadata.thread_id);
705 }
706 }
707
708 pub fn update_working_directories(
709 &mut self,
710 thread_id: ThreadId,
711 work_dirs: PathList,
712 cx: &mut Context<Self>,
713 ) {
714 if let Some(thread) = self.threads.get(&thread_id) {
715 debug_assert!(
716 !thread.archived,
717 "update_working_directories called on archived thread"
718 );
719 self.save_internal(ThreadMetadata {
720 worktree_paths: WorktreePaths::from_path_lists(
721 thread.main_worktree_paths().clone(),
722 work_dirs.clone(),
723 )
724 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&work_dirs)),
725 ..thread.clone()
726 });
727 cx.notify();
728 }
729 }
730
731 pub fn update_worktree_paths(
732 &mut self,
733 thread_ids: &[ThreadId],
734 worktree_paths: WorktreePaths,
735 cx: &mut Context<Self>,
736 ) {
737 let mut changed = false;
738 for &thread_id in thread_ids {
739 let Some(thread) = self.threads.get(&thread_id) else {
740 continue;
741 };
742 if thread.worktree_paths == worktree_paths {
743 continue;
744 }
745 // Don't overwrite paths for archived threads — the
746 // project may no longer include the worktree that was
747 // removed during the archive flow.
748 if thread.archived {
749 continue;
750 }
751 self.save_internal(ThreadMetadata {
752 worktree_paths: worktree_paths.clone(),
753 ..thread.clone()
754 });
755 changed = true;
756 }
757 if changed {
758 cx.notify();
759 }
760 }
761
762 pub fn update_interacted_at(
763 &mut self,
764 thread_id: &ThreadId,
765 time: DateTime<Utc>,
766 cx: &mut Context<Self>,
767 ) {
768 if let Some(thread) = self.threads.get(thread_id) {
769 self.save_internal(ThreadMetadata {
770 interacted_at: Some(time),
771 ..thread.clone()
772 });
773 cx.notify();
774 };
775 }
776
777 pub fn archive(
778 &mut self,
779 thread_id: ThreadId,
780 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
781 cx: &mut Context<Self>,
782 ) {
783 self.update_archived(thread_id, true, cx);
784
785 if let Some(job) = archive_job {
786 self.in_flight_archives.insert(thread_id, job);
787 }
788
789 cx.emit(ThreadMetadataStoreEvent::ThreadArchived(thread_id));
790 }
791
792 pub fn unarchive(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
793 self.update_archived(thread_id, false, cx);
794 // Dropping the Sender triggers cancellation in the background task.
795 self.in_flight_archives.remove(&thread_id);
796 }
797
798 pub fn cleanup_completed_archive(&mut self, thread_id: ThreadId) {
799 self.in_flight_archives.remove(&thread_id);
800 }
801
802 /// Returns `true` if any unarchived thread other than `current_session_id`
803 /// references `path` in its folder paths. Used to determine whether a
804 /// worktree can safely be removed from disk.
805 pub fn path_is_referenced_by_other_unarchived_threads(
806 &self,
807 thread_id: ThreadId,
808 path: &Path,
809 remote_connection: Option<&RemoteConnectionOptions>,
810 ) -> bool {
811 self.entries().any(|thread| {
812 thread.thread_id != thread_id
813 && !thread.archived
814 && same_remote_connection_identity(
815 thread.remote_connection.as_ref(),
816 remote_connection,
817 )
818 && thread
819 .folder_paths()
820 .paths()
821 .iter()
822 .any(|other_path| other_path.as_path() == path)
823 })
824 }
825
826 /// Updates a thread's `folder_paths` after an archived worktree has been
827 /// restored to disk. The restored worktree may land at a different path
828 /// than it had before archival, so each `(old_path, new_path)` pair in
829 /// `path_replacements` is applied to the thread's stored folder paths.
830 pub fn update_restored_worktree_paths(
831 &mut self,
832 thread_id: ThreadId,
833 path_replacements: &[(PathBuf, PathBuf)],
834 cx: &mut Context<Self>,
835 ) {
836 if let Some(thread) = self.threads.get(&thread_id).cloned() {
837 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
838 for (old_path, new_path) in path_replacements {
839 if let Some(pos) = paths.iter().position(|p| p == old_path) {
840 paths[pos] = new_path.clone();
841 }
842 }
843 let new_folder_paths = PathList::new(&paths);
844 self.save_internal(ThreadMetadata {
845 worktree_paths: WorktreePaths::from_path_lists(
846 thread.main_worktree_paths().clone(),
847 new_folder_paths.clone(),
848 )
849 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
850 ..thread
851 });
852 cx.notify();
853 }
854 }
855
856 pub fn complete_worktree_restore(
857 &mut self,
858 thread_id: ThreadId,
859 path_replacements: &[(PathBuf, PathBuf)],
860 cx: &mut Context<Self>,
861 ) {
862 if let Some(thread) = self.threads.get(&thread_id).cloned() {
863 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
864 for (old_path, new_path) in path_replacements {
865 for path in &mut paths {
866 if path == old_path {
867 *path = new_path.clone();
868 }
869 }
870 }
871 let new_folder_paths = PathList::new(&paths);
872 self.save_internal(ThreadMetadata {
873 worktree_paths: WorktreePaths::from_path_lists(
874 thread.main_worktree_paths().clone(),
875 new_folder_paths.clone(),
876 )
877 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
878 ..thread
879 });
880 cx.notify();
881 }
882 }
883
884 /// Apply a mutation to the worktree paths of all threads whose current
885 /// `folder_paths` matches `current_folder_paths`, then re-index.
886 /// When `remote_connection` is provided, only threads with a matching
887 /// remote connection are affected.
888 pub fn change_worktree_paths(
889 &mut self,
890 current_folder_paths: &PathList,
891 remote_connection: Option<&RemoteConnectionOptions>,
892 mutate: impl Fn(&mut WorktreePaths),
893 cx: &mut Context<Self>,
894 ) {
895 let thread_ids: Vec<_> = self
896 .threads_by_paths
897 .get(current_folder_paths)
898 .into_iter()
899 .flatten()
900 .filter(|id| {
901 self.threads.get(id).is_some_and(|t| {
902 !t.archived
903 && same_remote_connection_identity(
904 t.remote_connection.as_ref(),
905 remote_connection,
906 )
907 })
908 })
909 .copied()
910 .collect();
911
912 self.mutate_thread_paths(&thread_ids, mutate, cx);
913 }
914
915 fn mutate_thread_paths(
916 &mut self,
917 thread_ids: &[ThreadId],
918 mutate: impl Fn(&mut WorktreePaths),
919 cx: &mut Context<Self>,
920 ) {
921 if thread_ids.is_empty() {
922 return;
923 }
924
925 for thread_id in thread_ids {
926 if let Some(thread) = self.threads.get_mut(thread_id) {
927 if let Some(ids) = self
928 .threads_by_main_paths
929 .get_mut(thread.main_worktree_paths())
930 {
931 ids.remove(thread_id);
932 }
933 if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
934 ids.remove(thread_id);
935 }
936
937 mutate(&mut thread.worktree_paths);
938
939 self.threads_by_main_paths
940 .entry(thread.main_worktree_paths().clone())
941 .or_default()
942 .insert(*thread_id);
943 self.threads_by_paths
944 .entry(thread.folder_paths().clone())
945 .or_default()
946 .insert(*thread_id);
947
948 self.pending_thread_ops_tx
949 .try_send(DbOperation::Upsert(thread.clone()))
950 .log_err();
951 }
952 }
953
954 cx.notify();
955 }
956
957 pub fn create_archived_worktree(
958 &self,
959 worktree_path: String,
960 main_repo_path: String,
961 branch_name: Option<String>,
962 staged_commit_hash: String,
963 unstaged_commit_hash: String,
964 original_commit_hash: String,
965 cx: &App,
966 ) -> Task<anyhow::Result<i64>> {
967 let db = self.db.clone();
968 cx.background_spawn(async move {
969 db.create_archived_worktree(
970 worktree_path,
971 main_repo_path,
972 branch_name,
973 staged_commit_hash,
974 unstaged_commit_hash,
975 original_commit_hash,
976 )
977 .await
978 })
979 }
980
981 pub fn link_thread_to_archived_worktree(
982 &self,
983 thread_id: ThreadId,
984 archived_worktree_id: i64,
985 cx: &App,
986 ) -> Task<anyhow::Result<()>> {
987 let db = self.db.clone();
988 cx.background_spawn(async move {
989 db.link_thread_to_archived_worktree(thread_id, archived_worktree_id)
990 .await
991 })
992 }
993
994 pub fn get_archived_worktrees_for_thread(
995 &self,
996 thread_id: ThreadId,
997 cx: &App,
998 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
999 let db = self.db.clone();
1000 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(thread_id).await })
1001 }
1002
1003 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
1004 let db = self.db.clone();
1005 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
1006 }
1007
1008 pub fn unlink_thread_from_all_archived_worktrees(
1009 &self,
1010 thread_id: ThreadId,
1011 cx: &App,
1012 ) -> Task<anyhow::Result<()>> {
1013 let db = self.db.clone();
1014 cx.background_spawn(async move {
1015 db.unlink_thread_from_all_archived_worktrees(thread_id)
1016 .await
1017 })
1018 }
1019
1020 pub fn is_archived_worktree_referenced(
1021 &self,
1022 archived_worktree_id: i64,
1023 cx: &App,
1024 ) -> Task<anyhow::Result<bool>> {
1025 let db = self.db.clone();
1026 cx.background_spawn(async move {
1027 db.is_archived_worktree_referenced(archived_worktree_id)
1028 .await
1029 })
1030 }
1031
1032 pub fn get_all_archived_branch_names(
1033 &self,
1034 cx: &App,
1035 ) -> Task<anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>>> {
1036 let db = self.db.clone();
1037 cx.background_spawn(async move { db.get_all_archived_branch_names() })
1038 }
1039
1040 fn update_archived(&mut self, thread_id: ThreadId, archived: bool, cx: &mut Context<Self>) {
1041 if let Some(thread) = self.threads.get(&thread_id) {
1042 self.save_internal(ThreadMetadata {
1043 archived,
1044 ..thread.clone()
1045 });
1046 cx.notify();
1047 }
1048 }
1049
1050 pub fn delete(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
1051 if let Some(thread) = self.threads.get(&thread_id) {
1052 if let Some(sid) = &thread.session_id {
1053 self.threads_by_session.remove(sid);
1054 }
1055 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
1056 thread_ids.remove(&thread_id);
1057 }
1058 if !thread.main_worktree_paths().is_empty() {
1059 if let Some(thread_ids) = self
1060 .threads_by_main_paths
1061 .get_mut(thread.main_worktree_paths())
1062 {
1063 thread_ids.remove(&thread_id);
1064 }
1065 }
1066 }
1067 self.threads.remove(&thread_id);
1068 self.pending_thread_ops_tx
1069 .try_send(DbOperation::Delete(thread_id))
1070 .log_err();
1071 cx.notify();
1072 }
1073
1074 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
1075 let weak_store = cx.weak_entity();
1076
1077 cx.observe_new::<crate::ConversationView>(move |_view, _window, cx| {
1078 let view_entity = cx.entity();
1079 let entity_id = view_entity.entity_id();
1080
1081 cx.on_release({
1082 let weak_store = weak_store.clone();
1083 move |_view, cx| {
1084 weak_store
1085 .update(cx, |store, _cx| {
1086 store.conversation_subscriptions.remove(&entity_id);
1087 })
1088 .ok();
1089 }
1090 })
1091 .detach();
1092
1093 weak_store
1094 .update(cx, |this, cx| {
1095 let subscription = cx.subscribe(&view_entity, Self::handle_conversation_event);
1096 this.conversation_subscriptions
1097 .insert(entity_id, subscription);
1098 })
1099 .ok();
1100 })
1101 .detach();
1102
1103 let (tx, rx) = smol::channel::unbounded();
1104 let _db_operations_task = cx.background_spawn({
1105 let db = db.clone();
1106 async move {
1107 while let Ok(first_update) = rx.recv().await {
1108 let mut updates = vec![first_update];
1109 while let Ok(update) = rx.try_recv() {
1110 updates.push(update);
1111 }
1112 let updates = Self::dedup_db_operations(updates);
1113 for operation in updates {
1114 match operation {
1115 DbOperation::Upsert(metadata) => {
1116 db.save(metadata).await.log_err();
1117 }
1118 DbOperation::Delete(thread_id) => {
1119 db.delete(thread_id).await.log_err();
1120 }
1121 }
1122 }
1123 }
1124 }
1125 });
1126
1127 let mut this = Self {
1128 db,
1129 threads: HashMap::default(),
1130 threads_by_paths: HashMap::default(),
1131 threads_by_main_paths: HashMap::default(),
1132 threads_by_session: HashMap::default(),
1133 reload_task: None,
1134 conversation_subscriptions: HashMap::default(),
1135 pending_thread_ops_tx: tx,
1136 in_flight_archives: HashMap::default(),
1137 _db_operations_task,
1138 };
1139 let _ = this.reload(cx);
1140 this
1141 }
1142
1143 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
1144 let mut ops = HashMap::default();
1145 for operation in operations.into_iter().rev() {
1146 if ops.contains_key(&operation.id()) {
1147 continue;
1148 }
1149 ops.insert(operation.id(), operation);
1150 }
1151 ops.into_values().collect()
1152 }
1153
1154 fn handle_conversation_event(
1155 &mut self,
1156 conversation_view: Entity<crate::ConversationView>,
1157 _event: &crate::conversation_view::RootThreadUpdated,
1158 cx: &mut Context<Self>,
1159 ) {
1160 let view = conversation_view.read(cx);
1161 let thread_id = view.thread_id;
1162 let Some(thread) = view.root_thread(cx) else {
1163 return;
1164 };
1165
1166 let thread_ref = thread.read(cx);
1167 if thread_ref.is_draft_thread() || thread_ref.project().read(cx).is_via_collab() {
1168 return;
1169 }
1170
1171 let existing_thread = self.entry(thread_id);
1172 let session_id = Some(thread_ref.session_id().clone());
1173 let title = thread_ref.title();
1174
1175 let updated_at = Utc::now();
1176
1177 let created_at = existing_thread
1178 .and_then(|t| t.created_at)
1179 .unwrap_or_else(|| updated_at);
1180
1181 let interacted_at = existing_thread
1182 .map(|t| t.interacted_at)
1183 .unwrap_or(Some(updated_at));
1184
1185 let agent_id = thread_ref.connection().agent_id();
1186
1187 // Preserve project-dependent fields for archived threads.
1188 // The worktree may already have been removed from the
1189 // project as part of the archive flow, so re-evaluating
1190 // these from the current project state would yield
1191 // empty/incorrect results.
1192 let (worktree_paths, remote_connection) =
1193 if let Some(existing) = existing_thread.filter(|t| t.archived) {
1194 (
1195 existing.worktree_paths.clone(),
1196 existing.remote_connection.clone(),
1197 )
1198 } else {
1199 let project = thread_ref.project().read(cx);
1200 let worktree_paths = project.worktree_paths(cx);
1201 let remote_connection = project.remote_connection_options(cx);
1202
1203 (worktree_paths, remote_connection)
1204 };
1205
1206 // Threads without a folder path (e.g. started in an empty
1207 // window) are archived by default so they don't get lost,
1208 // because they won't show up in the sidebar. Users can reload
1209 // them from the archive.
1210 let archived = existing_thread
1211 .map(|t| t.archived)
1212 .unwrap_or(worktree_paths.is_empty());
1213
1214 let metadata = ThreadMetadata {
1215 thread_id,
1216 session_id,
1217 agent_id,
1218 title,
1219 created_at: Some(created_at),
1220 interacted_at,
1221 updated_at,
1222 worktree_paths,
1223 remote_connection,
1224 archived,
1225 };
1226
1227 self.save(metadata, cx);
1228 }
1229}
1230
1231impl Global for ThreadMetadataStore {}
1232
1233#[derive(Clone, Debug)]
1234pub enum ThreadMetadataStoreEvent {
1235 ThreadArchived(ThreadId),
1236}
1237
1238impl gpui::EventEmitter<ThreadMetadataStoreEvent> for ThreadMetadataStore {}
1239
1240struct ThreadMetadataDb(ThreadSafeConnection);
1241
1242impl Domain for ThreadMetadataDb {
1243 const NAME: &str = stringify!(ThreadMetadataDb);
1244
1245 const MIGRATIONS: &[&str] = &[
1246 sql!(
1247 CREATE TABLE IF NOT EXISTS sidebar_threads(
1248 session_id TEXT PRIMARY KEY,
1249 agent_id TEXT,
1250 title TEXT NOT NULL,
1251 updated_at TEXT NOT NULL,
1252 created_at TEXT,
1253 folder_paths TEXT,
1254 folder_paths_order TEXT
1255 ) STRICT;
1256 ),
1257 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
1258 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
1259 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
1260 sql!(
1261 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
1262 id INTEGER PRIMARY KEY,
1263 worktree_path TEXT NOT NULL,
1264 main_repo_path TEXT NOT NULL,
1265 branch_name TEXT,
1266 staged_commit_hash TEXT,
1267 unstaged_commit_hash TEXT,
1268 original_commit_hash TEXT
1269 ) STRICT;
1270
1271 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
1272 session_id TEXT NOT NULL,
1273 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1274 PRIMARY KEY (session_id, archived_worktree_id)
1275 ) STRICT;
1276 ),
1277 sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT),
1278 sql!(ALTER TABLE sidebar_threads ADD COLUMN thread_id BLOB),
1279 sql!(
1280 UPDATE sidebar_threads SET thread_id = randomblob(16) WHERE thread_id IS NULL;
1281
1282 CREATE TABLE thread_archived_worktrees_v2(
1283 thread_id BLOB NOT NULL,
1284 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1285 PRIMARY KEY (thread_id, archived_worktree_id)
1286 ) STRICT;
1287
1288 INSERT INTO thread_archived_worktrees_v2(thread_id, archived_worktree_id)
1289 SELECT s.thread_id, t.archived_worktree_id
1290 FROM thread_archived_worktrees t
1291 JOIN sidebar_threads s ON s.session_id = t.session_id;
1292
1293 DROP TABLE thread_archived_worktrees;
1294 ALTER TABLE thread_archived_worktrees_v2 RENAME TO thread_archived_worktrees;
1295
1296 CREATE TABLE sidebar_threads_v2(
1297 thread_id BLOB PRIMARY KEY,
1298 session_id TEXT,
1299 agent_id TEXT,
1300 title TEXT NOT NULL,
1301 updated_at TEXT NOT NULL,
1302 created_at TEXT,
1303 folder_paths TEXT,
1304 folder_paths_order TEXT,
1305 archived INTEGER DEFAULT 0,
1306 main_worktree_paths TEXT,
1307 main_worktree_paths_order TEXT,
1308 remote_connection TEXT
1309 ) STRICT;
1310
1311 INSERT INTO sidebar_threads_v2(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection)
1312 SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection
1313 FROM sidebar_threads;
1314
1315 DROP TABLE sidebar_threads;
1316 ALTER TABLE sidebar_threads_v2 RENAME TO sidebar_threads;
1317 ),
1318 sql!(
1319 DELETE FROM thread_archived_worktrees
1320 WHERE thread_id IN (
1321 SELECT thread_id FROM sidebar_threads WHERE session_id IS NULL
1322 );
1323
1324 DELETE FROM sidebar_threads WHERE session_id IS NULL;
1325
1326 DELETE FROM archived_git_worktrees
1327 WHERE id NOT IN (
1328 SELECT archived_worktree_id FROM thread_archived_worktrees
1329 );
1330 ),
1331 sql!(
1332 ALTER TABLE sidebar_threads ADD COLUMN interacted_at TEXT;
1333 ),
1334 ];
1335}
1336
1337db::static_connection!(ThreadMetadataDb, []);
1338
1339impl ThreadMetadataDb {
1340 #[allow(dead_code)]
1341 pub fn list_ids(&self) -> anyhow::Result<Vec<ThreadId>> {
1342 self.select::<ThreadId>(
1343 "SELECT thread_id FROM sidebar_threads \
1344 WHERE session_id IS NOT NULL \
1345 ORDER BY updated_at DESC",
1346 )?()
1347 }
1348
1349 const LIST_QUERY: &str = "SELECT thread_id, session_id, agent_id, title, updated_at, \
1350 created_at, interacted_at, folder_paths, folder_paths_order, archived, main_worktree_paths, \
1351 main_worktree_paths_order, remote_connection \
1352 FROM sidebar_threads \
1353 WHERE session_id IS NOT NULL \
1354 ORDER BY updated_at DESC";
1355
1356 /// List all sidebar thread metadata, ordered by updated_at descending.
1357 ///
1358 /// Only returns threads that have a `session_id`.
1359 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
1360 self.select::<ThreadMetadata>(Self::LIST_QUERY)?()
1361 }
1362
1363 /// Upsert metadata for a thread.
1364 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
1365 anyhow::ensure!(
1366 row.session_id.is_some(),
1367 "refusing to persist thread metadata without a session_id"
1368 );
1369
1370 let session_id = row.session_id.as_ref().map(|s| s.0.clone());
1371 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
1372 None
1373 } else {
1374 Some(row.agent_id.to_string())
1375 };
1376 let title = row
1377 .title
1378 .as_ref()
1379 .map(|t| t.to_string())
1380 .unwrap_or_default();
1381 let updated_at = row.updated_at.to_rfc3339();
1382 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
1383 let interacted_at = row.interacted_at.map(|dt| dt.to_rfc3339());
1384 let serialized = row.folder_paths().serialize();
1385 let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
1386 (None, None)
1387 } else {
1388 (Some(serialized.paths), Some(serialized.order))
1389 };
1390 let main_serialized = row.main_worktree_paths().serialize();
1391 let (main_worktree_paths, main_worktree_paths_order) =
1392 if row.main_worktree_paths().is_empty() {
1393 (None, None)
1394 } else {
1395 (Some(main_serialized.paths), Some(main_serialized.order))
1396 };
1397 let remote_connection = row
1398 .remote_connection
1399 .as_ref()
1400 .map(serde_json::to_string)
1401 .transpose()
1402 .context("serialize thread metadata remote connection")?;
1403 let thread_id = row.thread_id;
1404 let archived = row.archived;
1405
1406 self.write(move |conn| {
1407 let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, interacted_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
1408 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13) \
1409 ON CONFLICT(thread_id) DO UPDATE SET \
1410 session_id = excluded.session_id, \
1411 agent_id = excluded.agent_id, \
1412 title = excluded.title, \
1413 updated_at = excluded.updated_at, \
1414 created_at = excluded.created_at, \
1415 interacted_at = excluded.interacted_at, \
1416 folder_paths = excluded.folder_paths, \
1417 folder_paths_order = excluded.folder_paths_order, \
1418 archived = excluded.archived, \
1419 main_worktree_paths = excluded.main_worktree_paths, \
1420 main_worktree_paths_order = excluded.main_worktree_paths_order, \
1421 remote_connection = excluded.remote_connection";
1422 let mut stmt = Statement::prepare(conn, sql)?;
1423 let mut i = stmt.bind(&thread_id, 1)?;
1424 i = stmt.bind(&session_id, i)?;
1425 i = stmt.bind(&agent_id, i)?;
1426 i = stmt.bind(&title, i)?;
1427 i = stmt.bind(&updated_at, i)?;
1428 i = stmt.bind(&created_at, i)?;
1429 i = stmt.bind(&interacted_at, i)?;
1430 i = stmt.bind(&folder_paths, i)?;
1431 i = stmt.bind(&folder_paths_order, i)?;
1432 i = stmt.bind(&archived, i)?;
1433 i = stmt.bind(&main_worktree_paths, i)?;
1434 i = stmt.bind(&main_worktree_paths_order, i)?;
1435 stmt.bind(&remote_connection, i)?;
1436 stmt.exec()
1437 })
1438 .await
1439 }
1440
1441 /// Delete metadata for a single thread.
1442 pub async fn delete(&self, thread_id: ThreadId) -> anyhow::Result<()> {
1443 self.write(move |conn| {
1444 let mut stmt =
1445 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE thread_id = ?")?;
1446 stmt.bind(&thread_id, 1)?;
1447 stmt.exec()
1448 })
1449 .await
1450 }
1451
1452 pub async fn create_archived_worktree(
1453 &self,
1454 worktree_path: String,
1455 main_repo_path: String,
1456 branch_name: Option<String>,
1457 staged_commit_hash: String,
1458 unstaged_commit_hash: String,
1459 original_commit_hash: String,
1460 ) -> anyhow::Result<i64> {
1461 self.write(move |conn| {
1462 let mut stmt = Statement::prepare(
1463 conn,
1464 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
1465 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
1466 RETURNING id",
1467 )?;
1468 let mut i = stmt.bind(&worktree_path, 1)?;
1469 i = stmt.bind(&main_repo_path, i)?;
1470 i = stmt.bind(&branch_name, i)?;
1471 i = stmt.bind(&staged_commit_hash, i)?;
1472 i = stmt.bind(&unstaged_commit_hash, i)?;
1473 stmt.bind(&original_commit_hash, i)?;
1474 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
1475 })
1476 .await
1477 }
1478
1479 pub async fn link_thread_to_archived_worktree(
1480 &self,
1481 thread_id: ThreadId,
1482 archived_worktree_id: i64,
1483 ) -> anyhow::Result<()> {
1484 self.write(move |conn| {
1485 let mut stmt = Statement::prepare(
1486 conn,
1487 "INSERT INTO thread_archived_worktrees(thread_id, archived_worktree_id) \
1488 VALUES (?1, ?2)",
1489 )?;
1490 let i = stmt.bind(&thread_id, 1)?;
1491 stmt.bind(&archived_worktree_id, i)?;
1492 stmt.exec()
1493 })
1494 .await
1495 }
1496
1497 pub async fn get_archived_worktrees_for_thread(
1498 &self,
1499 thread_id: ThreadId,
1500 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
1501 self.select_bound::<ThreadId, ArchivedGitWorktree>(
1502 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
1503 FROM archived_git_worktrees a \
1504 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
1505 WHERE t.thread_id = ?1",
1506 )?(thread_id)
1507 }
1508
1509 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
1510 self.write(move |conn| {
1511 let mut stmt = Statement::prepare(
1512 conn,
1513 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
1514 )?;
1515 stmt.bind(&id, 1)?;
1516 stmt.exec()?;
1517
1518 let mut stmt =
1519 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
1520 stmt.bind(&id, 1)?;
1521 stmt.exec()
1522 })
1523 .await
1524 }
1525
1526 pub async fn unlink_thread_from_all_archived_worktrees(
1527 &self,
1528 thread_id: ThreadId,
1529 ) -> anyhow::Result<()> {
1530 self.write(move |conn| {
1531 let mut stmt = Statement::prepare(
1532 conn,
1533 "DELETE FROM thread_archived_worktrees WHERE thread_id = ?",
1534 )?;
1535 stmt.bind(&thread_id, 1)?;
1536 stmt.exec()
1537 })
1538 .await
1539 }
1540
1541 pub async fn is_archived_worktree_referenced(
1542 &self,
1543 archived_worktree_id: i64,
1544 ) -> anyhow::Result<bool> {
1545 self.select_row_bound::<i64, i64>(
1546 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1547 )?(archived_worktree_id)
1548 .map(|count| count.unwrap_or(0) > 0)
1549 }
1550
1551 pub fn get_all_archived_branch_names(
1552 &self,
1553 ) -> anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>> {
1554 let rows = self.select::<(ThreadId, String, String)>(
1555 "SELECT t.thread_id, a.worktree_path, a.branch_name \
1556 FROM thread_archived_worktrees t \
1557 JOIN archived_git_worktrees a ON a.id = t.archived_worktree_id \
1558 WHERE a.branch_name IS NOT NULL \
1559 ORDER BY a.id ASC",
1560 )?()?;
1561
1562 let mut result: HashMap<ThreadId, HashMap<PathBuf, String>> = HashMap::default();
1563 for (thread_id, worktree_path, branch_name) in rows {
1564 result
1565 .entry(thread_id)
1566 .or_default()
1567 .insert(PathBuf::from(worktree_path), branch_name);
1568 }
1569 Ok(result)
1570 }
1571}
1572
1573impl Column for ThreadMetadata {
1574 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1575 let (thread_id_uuid, next): (uuid::Uuid, i32) = Column::column(statement, start_index)?;
1576 let (id, next): (Option<Arc<str>>, i32) = Column::column(statement, next)?;
1577 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1578 let (title, next): (String, i32) = Column::column(statement, next)?;
1579 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1580 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1581 let (interacted_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1582 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1583 let (folder_paths_order_str, next): (Option<String>, i32) =
1584 Column::column(statement, next)?;
1585 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1586 let (main_worktree_paths_str, next): (Option<String>, i32) =
1587 Column::column(statement, next)?;
1588 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1589 Column::column(statement, next)?;
1590 let (remote_connection_json, next): (Option<String>, i32) =
1591 Column::column(statement, next)?;
1592
1593 let agent_id = agent_id
1594 .map(|id| AgentId::new(id))
1595 .unwrap_or(ZED_AGENT_ID.clone());
1596
1597 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1598 let created_at = created_at_str
1599 .as_deref()
1600 .map(DateTime::parse_from_rfc3339)
1601 .transpose()?
1602 .map(|dt| dt.with_timezone(&Utc));
1603
1604 let interacted_at = interacted_at_str
1605 .as_deref()
1606 .map(DateTime::parse_from_rfc3339)
1607 .transpose()?
1608 .map(|dt| dt.with_timezone(&Utc));
1609
1610 let folder_paths = folder_paths_str
1611 .map(|paths| {
1612 PathList::deserialize(&util::path_list::SerializedPathList {
1613 paths,
1614 order: folder_paths_order_str.unwrap_or_default(),
1615 })
1616 })
1617 .unwrap_or_default();
1618
1619 let main_worktree_paths = main_worktree_paths_str
1620 .map(|paths| {
1621 PathList::deserialize(&util::path_list::SerializedPathList {
1622 paths,
1623 order: main_worktree_paths_order_str.unwrap_or_default(),
1624 })
1625 })
1626 .unwrap_or_default();
1627
1628 let remote_connection = remote_connection_json
1629 .as_deref()
1630 .map(serde_json::from_str::<RemoteConnectionOptions>)
1631 .transpose()
1632 .context("deserialize thread metadata remote connection")?;
1633
1634 let worktree_paths = WorktreePaths::from_path_lists(main_worktree_paths, folder_paths)
1635 .unwrap_or_else(|_| WorktreePaths::default());
1636
1637 let thread_id = ThreadId(thread_id_uuid);
1638
1639 Ok((
1640 ThreadMetadata {
1641 thread_id,
1642 session_id: id.map(acp::SessionId::new),
1643 agent_id,
1644 title: if title.is_empty() || title == DEFAULT_THREAD_TITLE {
1645 None
1646 } else {
1647 Some(title.into())
1648 },
1649 updated_at,
1650 created_at,
1651 interacted_at,
1652 worktree_paths,
1653 remote_connection,
1654 archived,
1655 },
1656 next,
1657 ))
1658 }
1659}
1660
1661impl Column for ArchivedGitWorktree {
1662 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1663 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1664 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1665 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1666 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1667 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1668 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1669 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1670
1671 Ok((
1672 ArchivedGitWorktree {
1673 id,
1674 worktree_path: PathBuf::from(worktree_path_str),
1675 main_repo_path: PathBuf::from(main_repo_path_str),
1676 branch_name,
1677 staged_commit_hash,
1678 unstaged_commit_hash,
1679 original_commit_hash,
1680 },
1681 next,
1682 ))
1683 }
1684}
1685
1686#[cfg(test)]
1687mod tests {
1688 use super::*;
1689 use acp_thread::StubAgentConnection;
1690 use action_log::ActionLog;
1691 use agent::DbThread;
1692 use agent_client_protocol::schema as acp;
1693 use gpui::{TestAppContext, VisualTestContext};
1694 use project::FakeFs;
1695 use project::Project;
1696 use remote::WslConnectionOptions;
1697 use std::path::Path;
1698 use std::rc::Rc;
1699 use workspace::MultiWorkspace;
1700
1701 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1702 DbThread {
1703 title: title.to_string().into(),
1704 messages: Vec::new(),
1705 updated_at,
1706 detailed_summary: None,
1707 initial_project_snapshot: None,
1708 cumulative_token_usage: Default::default(),
1709 request_token_usage: Default::default(),
1710 model: None,
1711 profile: None,
1712 imported: false,
1713 subagent_context: None,
1714 speed: None,
1715 thinking_enabled: false,
1716 thinking_effort: None,
1717 draft_prompt: None,
1718 ui_scroll_position: None,
1719 }
1720 }
1721
1722 fn make_metadata(
1723 session_id: &str,
1724 title: &str,
1725 updated_at: DateTime<Utc>,
1726 folder_paths: PathList,
1727 ) -> ThreadMetadata {
1728 ThreadMetadata {
1729 thread_id: ThreadId::new(),
1730 archived: false,
1731 session_id: Some(acp::SessionId::new(session_id)),
1732 agent_id: agent::ZED_AGENT_ID.clone(),
1733 title: if title.is_empty() {
1734 None
1735 } else {
1736 Some(title.to_string().into())
1737 },
1738 updated_at,
1739 created_at: Some(updated_at),
1740 interacted_at: None,
1741 worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
1742 remote_connection: None,
1743 }
1744 }
1745
1746 fn init_test(cx: &mut TestAppContext) {
1747 let fs = FakeFs::new(cx.executor());
1748 cx.update(|cx| {
1749 let settings_store = settings::SettingsStore::test(cx);
1750 cx.set_global(settings_store);
1751 theme_settings::init(theme::LoadThemes::JustBase, cx);
1752 editor::init(cx);
1753 release_channel::init("0.0.0".parse().unwrap(), cx);
1754 prompt_store::init(cx);
1755 <dyn Fs>::set_global(fs, cx);
1756 ThreadMetadataStore::init_global(cx);
1757 ThreadStore::init_global(cx);
1758 language_model::LanguageModelRegistry::test(cx);
1759 });
1760 cx.run_until_parked();
1761 }
1762
1763 fn setup_panel_with_project(
1764 project: Entity<Project>,
1765 cx: &mut TestAppContext,
1766 ) -> (Entity<crate::AgentPanel>, VisualTestContext) {
1767 let multi_workspace =
1768 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1769 let workspace_entity = multi_workspace
1770 .read_with(cx, |mw, _cx| mw.workspace().clone())
1771 .unwrap();
1772 let mut vcx = VisualTestContext::from_window(multi_workspace.into(), cx);
1773 let panel = workspace_entity.update_in(&mut vcx, |workspace, window, cx| {
1774 cx.new(|cx| crate::AgentPanel::new(workspace, None, window, cx))
1775 });
1776 (panel, vcx)
1777 }
1778
1779 fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) {
1780 let kvp = cx.update(|cx| KeyValueStore::global(cx));
1781 smol::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string()))
1782 .unwrap();
1783 }
1784
1785 fn run_store_migrations(cx: &mut TestAppContext) {
1786 clear_thread_metadata_remote_connection_backfill(cx);
1787 cx.update(|cx| {
1788 let migration_task = migrate_thread_metadata(cx);
1789 migrate_thread_remote_connections(cx, migration_task);
1790 });
1791 cx.run_until_parked();
1792 }
1793
1794 #[gpui::test]
1795 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1796 let first_paths = PathList::new(&[Path::new("/project-a")]);
1797 let second_paths = PathList::new(&[Path::new("/project-b")]);
1798 let now = Utc::now();
1799 let older = now - chrono::Duration::seconds(1);
1800
1801 let thread = std::thread::current();
1802 let test_name = thread.name().unwrap_or("unknown_test");
1803 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1804 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1805 &db_name,
1806 )));
1807
1808 db.save(make_metadata(
1809 "session-1",
1810 "First Thread",
1811 now,
1812 first_paths.clone(),
1813 ))
1814 .await
1815 .unwrap();
1816 db.save(make_metadata(
1817 "session-2",
1818 "Second Thread",
1819 older,
1820 second_paths.clone(),
1821 ))
1822 .await
1823 .unwrap();
1824
1825 cx.update(|cx| {
1826 let settings_store = settings::SettingsStore::test(cx);
1827 cx.set_global(settings_store);
1828 ThreadMetadataStore::init_global(cx);
1829 });
1830
1831 cx.run_until_parked();
1832
1833 cx.update(|cx| {
1834 let store = ThreadMetadataStore::global(cx);
1835 let store = store.read(cx);
1836
1837 assert_eq!(store.entry_ids().count(), 2);
1838 assert!(
1839 store
1840 .entry_by_session(&acp::SessionId::new("session-1"))
1841 .is_some()
1842 );
1843 assert!(
1844 store
1845 .entry_by_session(&acp::SessionId::new("session-2"))
1846 .is_some()
1847 );
1848
1849 let first_path_entries: Vec<_> = store
1850 .entries_for_path(&first_paths, None)
1851 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1852 .collect();
1853 assert_eq!(first_path_entries, vec!["session-1"]);
1854
1855 let second_path_entries: Vec<_> = store
1856 .entries_for_path(&second_paths, None)
1857 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1858 .collect();
1859 assert_eq!(second_path_entries, vec!["session-2"]);
1860 });
1861 }
1862
1863 #[gpui::test]
1864 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1865 init_test(cx);
1866
1867 let first_paths = PathList::new(&[Path::new("/project-a")]);
1868 let second_paths = PathList::new(&[Path::new("/project-b")]);
1869 let initial_time = Utc::now();
1870 let updated_time = initial_time + chrono::Duration::seconds(1);
1871
1872 let initial_metadata = make_metadata(
1873 "session-1",
1874 "First Thread",
1875 initial_time,
1876 first_paths.clone(),
1877 );
1878 let session1_thread_id = initial_metadata.thread_id;
1879
1880 let second_metadata = make_metadata(
1881 "session-2",
1882 "Second Thread",
1883 initial_time,
1884 second_paths.clone(),
1885 );
1886 let session2_thread_id = second_metadata.thread_id;
1887
1888 cx.update(|cx| {
1889 let store = ThreadMetadataStore::global(cx);
1890 store.update(cx, |store, cx| {
1891 store.save(initial_metadata, cx);
1892 store.save(second_metadata, cx);
1893 });
1894 });
1895
1896 cx.run_until_parked();
1897
1898 cx.update(|cx| {
1899 let store = ThreadMetadataStore::global(cx);
1900 let store = store.read(cx);
1901
1902 let first_path_entries: Vec<_> = store
1903 .entries_for_path(&first_paths, None)
1904 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1905 .collect();
1906 assert_eq!(first_path_entries, vec!["session-1"]);
1907
1908 let second_path_entries: Vec<_> = store
1909 .entries_for_path(&second_paths, None)
1910 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1911 .collect();
1912 assert_eq!(second_path_entries, vec!["session-2"]);
1913 });
1914
1915 let moved_metadata = ThreadMetadata {
1916 thread_id: session1_thread_id,
1917 session_id: Some(acp::SessionId::new("session-1")),
1918 agent_id: agent::ZED_AGENT_ID.clone(),
1919 title: Some("First Thread".into()),
1920 updated_at: updated_time,
1921 created_at: Some(updated_time),
1922 interacted_at: None,
1923 worktree_paths: WorktreePaths::from_folder_paths(&second_paths),
1924 remote_connection: None,
1925 archived: false,
1926 };
1927
1928 cx.update(|cx| {
1929 let store = ThreadMetadataStore::global(cx);
1930 store.update(cx, |store, cx| {
1931 store.save(moved_metadata, cx);
1932 });
1933 });
1934
1935 cx.run_until_parked();
1936
1937 cx.update(|cx| {
1938 let store = ThreadMetadataStore::global(cx);
1939 let store = store.read(cx);
1940
1941 assert_eq!(store.entry_ids().count(), 2);
1942 assert!(
1943 store
1944 .entry_by_session(&acp::SessionId::new("session-1"))
1945 .is_some()
1946 );
1947 assert!(
1948 store
1949 .entry_by_session(&acp::SessionId::new("session-2"))
1950 .is_some()
1951 );
1952
1953 let first_path_entries: Vec<_> = store
1954 .entries_for_path(&first_paths, None)
1955 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1956 .collect();
1957 assert!(first_path_entries.is_empty());
1958
1959 let second_path_entries: Vec<_> = store
1960 .entries_for_path(&second_paths, None)
1961 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1962 .collect();
1963 assert_eq!(second_path_entries.len(), 2);
1964 assert!(second_path_entries.contains(&"session-1".to_string()));
1965 assert!(second_path_entries.contains(&"session-2".to_string()));
1966 });
1967
1968 cx.update(|cx| {
1969 let store = ThreadMetadataStore::global(cx);
1970 store.update(cx, |store, cx| {
1971 store.delete(session2_thread_id, cx);
1972 });
1973 });
1974
1975 cx.run_until_parked();
1976
1977 cx.update(|cx| {
1978 let store = ThreadMetadataStore::global(cx);
1979 let store = store.read(cx);
1980
1981 assert_eq!(store.entry_ids().count(), 1);
1982
1983 let second_path_entries: Vec<_> = store
1984 .entries_for_path(&second_paths, None)
1985 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1986 .collect();
1987 assert_eq!(second_path_entries, vec!["session-1"]);
1988 });
1989 }
1990
1991 #[gpui::test]
1992 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1993 init_test(cx);
1994
1995 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1996 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1997 let now = Utc::now();
1998
1999 let existing_metadata = ThreadMetadata {
2000 thread_id: ThreadId::new(),
2001 session_id: Some(acp::SessionId::new("a-session-0")),
2002 agent_id: agent::ZED_AGENT_ID.clone(),
2003 title: Some("Existing Metadata".into()),
2004 updated_at: now - chrono::Duration::seconds(10),
2005 created_at: Some(now - chrono::Duration::seconds(10)),
2006 interacted_at: None,
2007 worktree_paths: WorktreePaths::from_folder_paths(&project_a_paths),
2008 remote_connection: None,
2009 archived: false,
2010 };
2011
2012 cx.update(|cx| {
2013 let store = ThreadMetadataStore::global(cx);
2014 store.update(cx, |store, cx| {
2015 store.save(existing_metadata, cx);
2016 });
2017 });
2018 cx.run_until_parked();
2019
2020 let threads_to_save = vec![
2021 (
2022 "a-session-0",
2023 "Thread A0 From Native Store",
2024 project_a_paths.clone(),
2025 now,
2026 ),
2027 (
2028 "a-session-1",
2029 "Thread A1",
2030 project_a_paths.clone(),
2031 now + chrono::Duration::seconds(1),
2032 ),
2033 (
2034 "b-session-0",
2035 "Thread B0",
2036 project_b_paths.clone(),
2037 now + chrono::Duration::seconds(2),
2038 ),
2039 (
2040 "projectless",
2041 "Projectless",
2042 PathList::default(),
2043 now + chrono::Duration::seconds(3),
2044 ),
2045 ];
2046
2047 for (session_id, title, paths, updated_at) in &threads_to_save {
2048 let save_task = cx.update(|cx| {
2049 let thread_store = ThreadStore::global(cx);
2050 let session_id = session_id.to_string();
2051 let title = title.to_string();
2052 let paths = paths.clone();
2053 thread_store.update(cx, |store, cx| {
2054 store.save_thread(
2055 acp::SessionId::new(session_id),
2056 make_db_thread(&title, *updated_at),
2057 paths,
2058 cx,
2059 )
2060 })
2061 });
2062 save_task.await.unwrap();
2063 cx.run_until_parked();
2064 }
2065
2066 run_store_migrations(cx);
2067
2068 let list = cx.update(|cx| {
2069 let store = ThreadMetadataStore::global(cx);
2070 store.read(cx).entries().cloned().collect::<Vec<_>>()
2071 });
2072
2073 assert_eq!(list.len(), 4);
2074 assert!(
2075 list.iter()
2076 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
2077 );
2078
2079 let existing_metadata = list
2080 .iter()
2081 .find(|metadata| {
2082 metadata
2083 .session_id
2084 .as_ref()
2085 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2086 })
2087 .unwrap();
2088 assert_eq!(existing_metadata.display_title(), "Existing Metadata");
2089 assert!(!existing_metadata.archived);
2090
2091 let migrated_session_ids: Vec<_> = list
2092 .iter()
2093 .filter_map(|metadata| metadata.session_id.as_ref().map(|s| s.0.to_string()))
2094 .collect();
2095 assert!(migrated_session_ids.iter().any(|s| s == "a-session-1"));
2096 assert!(migrated_session_ids.iter().any(|s| s == "b-session-0"));
2097 assert!(migrated_session_ids.iter().any(|s| s == "projectless"));
2098
2099 let migrated_entries: Vec<_> = list
2100 .iter()
2101 .filter(|metadata| {
2102 !metadata
2103 .session_id
2104 .as_ref()
2105 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2106 })
2107 .collect();
2108 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
2109 }
2110
2111 #[gpui::test]
2112 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
2113 cx: &mut TestAppContext,
2114 ) {
2115 init_test(cx);
2116
2117 let project_paths = PathList::new(&[Path::new("/project-a")]);
2118 let existing_updated_at = Utc::now();
2119
2120 let existing_metadata = ThreadMetadata {
2121 thread_id: ThreadId::new(),
2122 session_id: Some(acp::SessionId::new("existing-session")),
2123 agent_id: agent::ZED_AGENT_ID.clone(),
2124 title: Some("Existing Metadata".into()),
2125 updated_at: existing_updated_at,
2126 created_at: Some(existing_updated_at),
2127 interacted_at: None,
2128 worktree_paths: WorktreePaths::from_folder_paths(&project_paths),
2129 remote_connection: None,
2130 archived: false,
2131 };
2132
2133 cx.update(|cx| {
2134 let store = ThreadMetadataStore::global(cx);
2135 store.update(cx, |store, cx| {
2136 store.save(existing_metadata, cx);
2137 });
2138 });
2139 cx.run_until_parked();
2140
2141 let save_task = cx.update(|cx| {
2142 let thread_store = ThreadStore::global(cx);
2143 thread_store.update(cx, |store, cx| {
2144 store.save_thread(
2145 acp::SessionId::new("existing-session"),
2146 make_db_thread(
2147 "Updated Native Thread Title",
2148 existing_updated_at + chrono::Duration::seconds(1),
2149 ),
2150 project_paths.clone(),
2151 cx,
2152 )
2153 })
2154 });
2155 save_task.await.unwrap();
2156 cx.run_until_parked();
2157
2158 run_store_migrations(cx);
2159
2160 let list = cx.update(|cx| {
2161 let store = ThreadMetadataStore::global(cx);
2162 store.read(cx).entries().cloned().collect::<Vec<_>>()
2163 });
2164
2165 assert_eq!(list.len(), 1);
2166 assert_eq!(
2167 list[0].session_id.as_ref().unwrap().0.as_ref(),
2168 "existing-session"
2169 );
2170 }
2171
2172 #[gpui::test]
2173 async fn test_migrate_thread_remote_connections_backfills_from_workspace_db(
2174 cx: &mut TestAppContext,
2175 ) {
2176 init_test(cx);
2177
2178 let folder_paths = PathList::new(&[Path::new("/remote-project")]);
2179 let updated_at = Utc::now();
2180 let metadata = make_metadata(
2181 "remote-session",
2182 "Remote Thread",
2183 updated_at,
2184 folder_paths.clone(),
2185 );
2186
2187 cx.update(|cx| {
2188 let store = ThreadMetadataStore::global(cx);
2189 store.update(cx, |store, cx| {
2190 store.save(metadata, cx);
2191 });
2192 });
2193 cx.run_until_parked();
2194
2195 let workspace_db = cx.update(|cx| WorkspaceDb::global(cx));
2196 let workspace_id = workspace_db.next_id().await.unwrap();
2197 let serialized_paths = folder_paths.serialize();
2198 let remote_connection_id = 1_i64;
2199 workspace_db
2200 .write(move |conn| {
2201 let mut stmt = Statement::prepare(
2202 conn,
2203 "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)",
2204 )?;
2205 let mut next_index = stmt.bind(&remote_connection_id, 1)?;
2206 next_index = stmt.bind(&"wsl", next_index)?;
2207 next_index = stmt.bind(&Some("anth".to_string()), next_index)?;
2208 stmt.bind(&Some("Ubuntu".to_string()), next_index)?;
2209 stmt.exec()?;
2210
2211 let mut stmt = Statement::prepare(
2212 conn,
2213 "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1",
2214 )?;
2215 let mut next_index = stmt.bind(&workspace_id, 1)?;
2216 next_index = stmt.bind(&serialized_paths.paths, next_index)?;
2217 next_index = stmt.bind(&serialized_paths.order, next_index)?;
2218 stmt.bind(&Some(remote_connection_id as i32), next_index)?;
2219 stmt.exec()
2220 })
2221 .await
2222 .unwrap();
2223
2224 clear_thread_metadata_remote_connection_backfill(cx);
2225 cx.update(|cx| {
2226 migrate_thread_remote_connections(cx, Task::ready(Ok(())));
2227 });
2228 cx.run_until_parked();
2229
2230 let metadata = cx.update(|cx| {
2231 let store = ThreadMetadataStore::global(cx);
2232 store
2233 .read(cx)
2234 .entry_by_session(&acp::SessionId::new("remote-session"))
2235 .cloned()
2236 .expect("expected migrated metadata row")
2237 });
2238
2239 assert_eq!(
2240 metadata.remote_connection,
2241 Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
2242 distro_name: "Ubuntu".to_string(),
2243 user: Some("anth".to_string()),
2244 }))
2245 );
2246 }
2247
2248 #[gpui::test]
2249 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
2250 cx: &mut TestAppContext,
2251 ) {
2252 init_test(cx);
2253
2254 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
2255 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
2256 let now = Utc::now();
2257
2258 // Create 7 threads for project A and 3 for project B
2259 let mut threads_to_save = Vec::new();
2260 for i in 0..7 {
2261 threads_to_save.push((
2262 format!("a-session-{i}"),
2263 format!("Thread A{i}"),
2264 project_a_paths.clone(),
2265 now + chrono::Duration::seconds(i as i64),
2266 ));
2267 }
2268 for i in 0..3 {
2269 threads_to_save.push((
2270 format!("b-session-{i}"),
2271 format!("Thread B{i}"),
2272 project_b_paths.clone(),
2273 now + chrono::Duration::seconds(i as i64),
2274 ));
2275 }
2276
2277 for (session_id, title, paths, updated_at) in &threads_to_save {
2278 let save_task = cx.update(|cx| {
2279 let thread_store = ThreadStore::global(cx);
2280 let session_id = session_id.to_string();
2281 let title = title.to_string();
2282 let paths = paths.clone();
2283 thread_store.update(cx, |store, cx| {
2284 store.save_thread(
2285 acp::SessionId::new(session_id),
2286 make_db_thread(&title, *updated_at),
2287 paths,
2288 cx,
2289 )
2290 })
2291 });
2292 save_task.await.unwrap();
2293 cx.run_until_parked();
2294 }
2295
2296 run_store_migrations(cx);
2297
2298 let list = cx.update(|cx| {
2299 let store = ThreadMetadataStore::global(cx);
2300 store.read(cx).entries().cloned().collect::<Vec<_>>()
2301 });
2302
2303 assert_eq!(list.len(), 10);
2304
2305 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
2306 let mut project_a_entries: Vec<_> = list
2307 .iter()
2308 .filter(|m| *m.folder_paths() == project_a_paths)
2309 .collect();
2310 assert_eq!(project_a_entries.len(), 7);
2311 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
2312
2313 for entry in &project_a_entries[..5] {
2314 assert!(
2315 !entry.archived,
2316 "Expected {:?} to be unarchived (top 5 most recent)",
2317 entry.session_id
2318 );
2319 }
2320 for entry in &project_a_entries[5..] {
2321 assert!(
2322 entry.archived,
2323 "Expected {:?} to be archived (older than top 5)",
2324 entry.session_id
2325 );
2326 }
2327
2328 // Project B: all 3 should be unarchived (under the limit)
2329 let project_b_entries: Vec<_> = list
2330 .iter()
2331 .filter(|m| *m.folder_paths() == project_b_paths)
2332 .collect();
2333 assert_eq!(project_b_entries.len(), 3);
2334 assert!(project_b_entries.iter().all(|m| !m.archived));
2335 }
2336
2337 #[gpui::test]
2338 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
2339 init_test(cx);
2340
2341 let fs = FakeFs::new(cx.executor());
2342 let project = Project::test(fs, None::<&Path>, cx).await;
2343 let connection = StubAgentConnection::new();
2344
2345 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2346 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2347
2348 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2349 let session_id = thread.read_with(&vcx, |t, _| t.session_id().clone());
2350 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
2351
2352 // Draft threads no longer create metadata entries.
2353 cx.read(|cx| {
2354 let store = ThreadMetadataStore::global(cx).read(cx);
2355 assert_eq!(store.entry_ids().count(), 0);
2356 });
2357
2358 // Setting a title on an empty thread should be ignored by the
2359 // event handler (entries are empty), so no metadata is created.
2360 thread.update_in(&mut vcx, |thread, _window, cx| {
2361 thread.set_title("Draft Thread".into(), cx).detach();
2362 });
2363 vcx.run_until_parked();
2364
2365 cx.read(|cx| {
2366 let store = ThreadMetadataStore::global(cx).read(cx);
2367 assert_eq!(
2368 store.entry_ids().count(),
2369 0,
2370 "expected title updates on empty thread to not create metadata"
2371 );
2372 });
2373
2374 // Pushing content makes entries non-empty, so the event handler
2375 // should now update metadata with the real session_id.
2376 thread.update_in(&mut vcx, |thread, _window, cx| {
2377 thread.push_user_content_block(None, "Hello".into(), cx);
2378 });
2379 vcx.run_until_parked();
2380
2381 cx.read(|cx| {
2382 let store = ThreadMetadataStore::global(cx).read(cx);
2383 assert_eq!(store.entry_ids().count(), 1);
2384 assert_eq!(
2385 store.entry(thread_id).unwrap().session_id.as_ref(),
2386 Some(&session_id),
2387 );
2388 });
2389 }
2390
2391 #[gpui::test]
2392 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
2393 init_test(cx);
2394
2395 let fs = FakeFs::new(cx.executor());
2396 let project = Project::test(fs, None::<&Path>, cx).await;
2397 let connection = StubAgentConnection::new();
2398
2399 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2400 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2401
2402 let session_id = crate::test_support::active_session_id(&panel, &vcx);
2403 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2404
2405 thread.update_in(&mut vcx, |thread, _window, cx| {
2406 thread.push_user_content_block(None, "Hello".into(), cx);
2407 });
2408 vcx.run_until_parked();
2409
2410 cx.read(|cx| {
2411 let store = ThreadMetadataStore::global(cx).read(cx);
2412 assert_eq!(store.entry_ids().count(), 1);
2413 assert!(store.entry_by_session(&session_id).is_some());
2414 });
2415
2416 // Dropping the panel releases the ConversationView and its thread.
2417 drop(panel);
2418 cx.update(|_| {});
2419 cx.run_until_parked();
2420
2421 cx.read(|cx| {
2422 let store = ThreadMetadataStore::global(cx).read(cx);
2423 assert_eq!(store.entry_ids().count(), 1);
2424 assert!(store.entry_by_session(&session_id).is_some());
2425 });
2426 }
2427
2428 #[gpui::test]
2429 async fn test_threads_without_project_association_are_archived_by_default(
2430 cx: &mut TestAppContext,
2431 ) {
2432 init_test(cx);
2433
2434 let fs = FakeFs::new(cx.executor());
2435 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
2436 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
2437
2438 // Thread in project without worktree
2439 let (panel_no_wt, mut vcx_no_wt) = setup_panel_with_project(project_without_worktree, cx);
2440 crate::test_support::open_thread_with_connection(
2441 &panel_no_wt,
2442 StubAgentConnection::new(),
2443 &mut vcx_no_wt,
2444 );
2445 let thread_no_wt = panel_no_wt.read_with(&vcx_no_wt, |panel, cx| {
2446 panel.active_agent_thread(cx).unwrap()
2447 });
2448 thread_no_wt.update_in(&mut vcx_no_wt, |thread, _window, cx| {
2449 thread.push_user_content_block(None, "content".into(), cx);
2450 thread.set_title("No Project Thread".into(), cx).detach();
2451 });
2452 vcx_no_wt.run_until_parked();
2453 let session_without_worktree =
2454 crate::test_support::active_session_id(&panel_no_wt, &vcx_no_wt);
2455
2456 // Thread in project with worktree
2457 let (panel_wt, mut vcx_wt) = setup_panel_with_project(project_with_worktree, cx);
2458 crate::test_support::open_thread_with_connection(
2459 &panel_wt,
2460 StubAgentConnection::new(),
2461 &mut vcx_wt,
2462 );
2463 let thread_wt =
2464 panel_wt.read_with(&vcx_wt, |panel, cx| panel.active_agent_thread(cx).unwrap());
2465 thread_wt.update_in(&mut vcx_wt, |thread, _window, cx| {
2466 thread.push_user_content_block(None, "content".into(), cx);
2467 thread.set_title("Project Thread".into(), cx).detach();
2468 });
2469 vcx_wt.run_until_parked();
2470 let session_with_worktree = crate::test_support::active_session_id(&panel_wt, &vcx_wt);
2471
2472 cx.update(|cx| {
2473 let store = ThreadMetadataStore::global(cx);
2474 let store = store.read(cx);
2475
2476 let without_worktree = store
2477 .entry_by_session(&session_without_worktree)
2478 .expect("missing metadata for thread without project association");
2479 assert!(without_worktree.folder_paths().is_empty());
2480 assert!(
2481 without_worktree.archived,
2482 "expected thread without project association to be archived"
2483 );
2484
2485 let with_worktree = store
2486 .entry_by_session(&session_with_worktree)
2487 .expect("missing metadata for thread with project association");
2488 assert_eq!(
2489 *with_worktree.folder_paths(),
2490 PathList::new(&[Path::new("/project-a")])
2491 );
2492 assert!(
2493 !with_worktree.archived,
2494 "expected thread with project association to remain unarchived"
2495 );
2496 });
2497 }
2498
2499 #[gpui::test]
2500 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
2501 init_test(cx);
2502
2503 let fs = FakeFs::new(cx.executor());
2504 let project = Project::test(fs, None::<&Path>, cx).await;
2505 let connection = Rc::new(StubAgentConnection::new());
2506
2507 // Create a regular (non-subagent) thread through the panel.
2508 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
2509 crate::test_support::open_thread_with_connection(&panel, (*connection).clone(), &mut vcx);
2510
2511 let regular_thread =
2512 panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2513 let regular_session_id = regular_thread.read_with(&vcx, |t, _| t.session_id().clone());
2514
2515 regular_thread.update_in(&mut vcx, |thread, _window, cx| {
2516 thread.push_user_content_block(None, "content".into(), cx);
2517 thread.set_title("Regular Thread".into(), cx).detach();
2518 });
2519 vcx.run_until_parked();
2520
2521 // Create a standalone subagent AcpThread (not wrapped in a
2522 // ConversationView). The ThreadMetadataStore only observes
2523 // ConversationView events, so this thread's events should
2524 // have no effect on sidebar metadata.
2525 let subagent_session_id = acp::SessionId::new("subagent-session");
2526 let subagent_thread = cx.update(|cx| {
2527 let action_log = cx.new(|_| ActionLog::new(project.clone()));
2528 cx.new(|cx| {
2529 acp_thread::AcpThread::new(
2530 Some(regular_session_id.clone()),
2531 Some("Subagent Thread".into()),
2532 None,
2533 connection.clone(),
2534 project.clone(),
2535 action_log,
2536 subagent_session_id.clone(),
2537 watch::Receiver::constant(acp::PromptCapabilities::new()),
2538 cx,
2539 )
2540 })
2541 });
2542
2543 cx.update(|cx| {
2544 subagent_thread.update(cx, |thread, cx| {
2545 thread
2546 .set_title("Subagent Thread Title".into(), cx)
2547 .detach();
2548 });
2549 });
2550 cx.run_until_parked();
2551
2552 // Only the regular thread should appear in sidebar metadata.
2553 // The subagent thread is excluded because the metadata store
2554 // only observes ConversationView events.
2555 let list = cx.update(|cx| {
2556 let store = ThreadMetadataStore::global(cx);
2557 store.read(cx).entries().cloned().collect::<Vec<_>>()
2558 });
2559
2560 assert_eq!(
2561 list.len(),
2562 1,
2563 "Expected only the regular thread in sidebar metadata, \
2564 but found {} entries (subagent threads are leaking into the sidebar)",
2565 list.len(),
2566 );
2567 assert_eq!(list[0].session_id.as_ref().unwrap(), ®ular_session_id);
2568 assert_eq!(list[0].display_title(), "Regular Thread");
2569 }
2570
2571 #[test]
2572 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
2573 let now = Utc::now();
2574
2575 let meta = make_metadata("session-1", "First Thread", now, PathList::default());
2576 let thread_id = meta.thread_id;
2577 let operations = vec![DbOperation::Upsert(meta), DbOperation::Delete(thread_id)];
2578
2579 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
2580
2581 assert_eq!(deduped.len(), 1);
2582 assert_eq!(deduped[0], DbOperation::Delete(thread_id));
2583 }
2584
2585 #[test]
2586 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
2587 let now = Utc::now();
2588 let later = now + chrono::Duration::seconds(1);
2589
2590 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
2591 let shared_thread_id = old_metadata.thread_id;
2592 let new_metadata = ThreadMetadata {
2593 thread_id: shared_thread_id,
2594 ..make_metadata("session-1", "New Title", later, PathList::default())
2595 };
2596
2597 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2598 DbOperation::Upsert(old_metadata),
2599 DbOperation::Upsert(new_metadata.clone()),
2600 ]);
2601
2602 assert_eq!(deduped.len(), 1);
2603 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
2604 }
2605
2606 #[test]
2607 fn test_dedup_db_operations_preserves_distinct_sessions() {
2608 let now = Utc::now();
2609
2610 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
2611 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
2612 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2613 DbOperation::Upsert(metadata1.clone()),
2614 DbOperation::Upsert(metadata2.clone()),
2615 ]);
2616
2617 assert_eq!(deduped.len(), 2);
2618 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
2619 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
2620 }
2621
2622 #[gpui::test]
2623 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
2624 init_test(cx);
2625
2626 let paths = PathList::new(&[Path::new("/project-a")]);
2627 let now = Utc::now();
2628 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2629 let thread_id = metadata.thread_id;
2630
2631 cx.update(|cx| {
2632 let store = ThreadMetadataStore::global(cx);
2633 store.update(cx, |store, cx| {
2634 store.save(metadata, cx);
2635 });
2636 });
2637
2638 cx.run_until_parked();
2639
2640 cx.update(|cx| {
2641 let store = ThreadMetadataStore::global(cx);
2642 let store = store.read(cx);
2643
2644 let path_entries: Vec<_> = store
2645 .entries_for_path(&paths, None)
2646 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2647 .collect();
2648 assert_eq!(path_entries, vec!["session-1"]);
2649
2650 assert_eq!(store.archived_entries().count(), 0);
2651 });
2652
2653 cx.update(|cx| {
2654 let store = ThreadMetadataStore::global(cx);
2655 store.update(cx, |store, cx| {
2656 store.archive(thread_id, None, cx);
2657 });
2658 });
2659
2660 // Thread 1 should now be archived
2661 cx.run_until_parked();
2662
2663 cx.update(|cx| {
2664 let store = ThreadMetadataStore::global(cx);
2665 let store = store.read(cx);
2666
2667 let path_entries: Vec<_> = store
2668 .entries_for_path(&paths, None)
2669 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2670 .collect();
2671 assert!(path_entries.is_empty());
2672
2673 let archived: Vec<_> = store.archived_entries().collect();
2674 assert_eq!(archived.len(), 1);
2675 assert_eq!(
2676 archived[0].session_id.as_ref().unwrap().0.as_ref(),
2677 "session-1"
2678 );
2679 assert!(archived[0].archived);
2680 });
2681
2682 cx.update(|cx| {
2683 let store = ThreadMetadataStore::global(cx);
2684 store.update(cx, |store, cx| {
2685 store.unarchive(thread_id, cx);
2686 });
2687 });
2688
2689 cx.run_until_parked();
2690
2691 cx.update(|cx| {
2692 let store = ThreadMetadataStore::global(cx);
2693 let store = store.read(cx);
2694
2695 let path_entries: Vec<_> = store
2696 .entries_for_path(&paths, None)
2697 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2698 .collect();
2699 assert_eq!(path_entries, vec!["session-1"]);
2700
2701 assert_eq!(store.archived_entries().count(), 0);
2702 });
2703 }
2704
2705 #[gpui::test]
2706 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2707 init_test(cx);
2708
2709 let paths = PathList::new(&[Path::new("/project-a")]);
2710 let now = Utc::now();
2711
2712 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2713 let metadata2 = make_metadata(
2714 "session-2",
2715 "Archived Thread",
2716 now - chrono::Duration::seconds(1),
2717 paths.clone(),
2718 );
2719 let session2_thread_id = metadata2.thread_id;
2720
2721 cx.update(|cx| {
2722 let store = ThreadMetadataStore::global(cx);
2723 store.update(cx, |store, cx| {
2724 store.save(metadata1, cx);
2725 store.save(metadata2, cx);
2726 });
2727 });
2728
2729 cx.run_until_parked();
2730
2731 cx.update(|cx| {
2732 let store = ThreadMetadataStore::global(cx);
2733 store.update(cx, |store, cx| {
2734 store.archive(session2_thread_id, None, cx);
2735 });
2736 });
2737
2738 cx.run_until_parked();
2739
2740 cx.update(|cx| {
2741 let store = ThreadMetadataStore::global(cx);
2742 let store = store.read(cx);
2743
2744 let path_entries: Vec<_> = store
2745 .entries_for_path(&paths, None)
2746 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2747 .collect();
2748 assert_eq!(path_entries, vec!["session-1"]);
2749
2750 assert_eq!(store.entries().count(), 2);
2751
2752 let archived: Vec<_> = store
2753 .archived_entries()
2754 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2755 .collect();
2756 assert_eq!(archived, vec!["session-2"]);
2757 });
2758 }
2759
2760 #[gpui::test]
2761 async fn test_entries_filter_by_remote_connection(cx: &mut TestAppContext) {
2762 init_test(cx);
2763
2764 let main_paths = PathList::new(&[Path::new("/project-a")]);
2765 let linked_paths = PathList::new(&[Path::new("/wt-feature")]);
2766 let now = Utc::now();
2767
2768 let remote_a = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 1 });
2769 let remote_b = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 2 });
2770
2771 // Three threads at the same folder_paths but different hosts.
2772 let local_thread = make_metadata("local-session", "Local Thread", now, main_paths.clone());
2773
2774 let mut remote_a_thread = make_metadata(
2775 "remote-a-session",
2776 "Remote A Thread",
2777 now - chrono::Duration::seconds(1),
2778 main_paths.clone(),
2779 );
2780 remote_a_thread.remote_connection = Some(remote_a.clone());
2781
2782 let mut remote_b_thread = make_metadata(
2783 "remote-b-session",
2784 "Remote B Thread",
2785 now - chrono::Duration::seconds(2),
2786 main_paths.clone(),
2787 );
2788 remote_b_thread.remote_connection = Some(remote_b.clone());
2789
2790 let linked_worktree_paths =
2791 WorktreePaths::from_path_lists(main_paths.clone(), linked_paths).unwrap();
2792
2793 let local_linked_thread = ThreadMetadata {
2794 thread_id: ThreadId::new(),
2795 archived: false,
2796 session_id: Some(acp::SessionId::new("local-linked")),
2797 agent_id: agent::ZED_AGENT_ID.clone(),
2798 title: Some("Local Linked".into()),
2799 updated_at: now,
2800 created_at: Some(now),
2801 interacted_at: None,
2802 worktree_paths: linked_worktree_paths.clone(),
2803 remote_connection: None,
2804 };
2805
2806 let remote_linked_thread = ThreadMetadata {
2807 thread_id: ThreadId::new(),
2808 archived: false,
2809 session_id: Some(acp::SessionId::new("remote-linked")),
2810 agent_id: agent::ZED_AGENT_ID.clone(),
2811 title: Some("Remote Linked".into()),
2812 updated_at: now - chrono::Duration::seconds(1),
2813 created_at: Some(now - chrono::Duration::seconds(1)),
2814 interacted_at: None,
2815 worktree_paths: linked_worktree_paths,
2816 remote_connection: Some(remote_a.clone()),
2817 };
2818
2819 cx.update(|cx| {
2820 let store = ThreadMetadataStore::global(cx);
2821 store.update(cx, |store, cx| {
2822 store.save(local_thread, cx);
2823 store.save(remote_a_thread, cx);
2824 store.save(remote_b_thread, cx);
2825 store.save(local_linked_thread, cx);
2826 store.save(remote_linked_thread, cx);
2827 });
2828 });
2829 cx.run_until_parked();
2830
2831 cx.update(|cx| {
2832 let store = ThreadMetadataStore::global(cx);
2833 let store = store.read(cx);
2834
2835 let local_entries: Vec<_> = store
2836 .entries_for_path(&main_paths, None)
2837 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2838 .collect();
2839 assert_eq!(local_entries, vec!["local-session"]);
2840
2841 let remote_a_entries: Vec<_> = store
2842 .entries_for_path(&main_paths, Some(&remote_a))
2843 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2844 .collect();
2845 assert_eq!(remote_a_entries, vec!["remote-a-session"]);
2846
2847 let remote_b_entries: Vec<_> = store
2848 .entries_for_path(&main_paths, Some(&remote_b))
2849 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2850 .collect();
2851 assert_eq!(remote_b_entries, vec!["remote-b-session"]);
2852
2853 let mut local_main_entries: Vec<_> = store
2854 .entries_for_main_worktree_path(&main_paths, None)
2855 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2856 .collect();
2857 local_main_entries.sort();
2858 assert_eq!(local_main_entries, vec!["local-linked", "local-session"]);
2859
2860 let mut remote_main_entries: Vec<_> = store
2861 .entries_for_main_worktree_path(&main_paths, Some(&remote_a))
2862 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2863 .collect();
2864 remote_main_entries.sort();
2865 assert_eq!(
2866 remote_main_entries,
2867 vec!["remote-a-session", "remote-linked"]
2868 );
2869 });
2870 }
2871
2872 #[gpui::test]
2873 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2874 init_test(cx);
2875
2876 let paths = PathList::new(&[Path::new("/project-a")]);
2877 let now = Utc::now();
2878
2879 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2880 let m2 = make_metadata(
2881 "session-2",
2882 "Thread Two",
2883 now - chrono::Duration::seconds(1),
2884 paths.clone(),
2885 );
2886 let m3 = make_metadata(
2887 "session-3",
2888 "Thread Three",
2889 now - chrono::Duration::seconds(2),
2890 paths,
2891 );
2892
2893 cx.update(|cx| {
2894 let store = ThreadMetadataStore::global(cx);
2895 store.update(cx, |store, cx| {
2896 store.save_all(vec![m1, m2, m3], cx);
2897 });
2898 });
2899
2900 cx.run_until_parked();
2901
2902 cx.update(|cx| {
2903 let store = ThreadMetadataStore::global(cx);
2904 let store = store.read(cx);
2905
2906 assert_eq!(store.entries().count(), 3);
2907 assert!(
2908 store
2909 .entry_by_session(&acp::SessionId::new("session-1"))
2910 .is_some()
2911 );
2912 assert!(
2913 store
2914 .entry_by_session(&acp::SessionId::new("session-2"))
2915 .is_some()
2916 );
2917 assert!(
2918 store
2919 .entry_by_session(&acp::SessionId::new("session-3"))
2920 .is_some()
2921 );
2922
2923 assert_eq!(store.entry_ids().count(), 3);
2924 });
2925 }
2926
2927 #[gpui::test]
2928 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2929 init_test(cx);
2930
2931 let paths = PathList::new(&[Path::new("/project-a")]);
2932 let now = Utc::now();
2933 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2934 let thread_id = metadata.thread_id;
2935
2936 cx.update(|cx| {
2937 let store = ThreadMetadataStore::global(cx);
2938 store.update(cx, |store, cx| {
2939 store.save(metadata, cx);
2940 });
2941 });
2942
2943 cx.run_until_parked();
2944
2945 cx.update(|cx| {
2946 let store = ThreadMetadataStore::global(cx);
2947 store.update(cx, |store, cx| {
2948 store.archive(thread_id, None, cx);
2949 });
2950 });
2951
2952 cx.run_until_parked();
2953
2954 cx.update(|cx| {
2955 let store = ThreadMetadataStore::global(cx);
2956 store.update(cx, |store, cx| {
2957 let _ = store.reload(cx);
2958 });
2959 });
2960
2961 cx.run_until_parked();
2962
2963 cx.update(|cx| {
2964 let store = ThreadMetadataStore::global(cx);
2965 let store = store.read(cx);
2966
2967 let thread = store
2968 .entry_by_session(&acp::SessionId::new("session-1"))
2969 .expect("thread should exist after reload");
2970 assert!(thread.archived);
2971
2972 let path_entries: Vec<_> = store
2973 .entries_for_path(&paths, None)
2974 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2975 .collect();
2976 assert!(path_entries.is_empty());
2977
2978 let archived: Vec<_> = store
2979 .archived_entries()
2980 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2981 .collect();
2982 assert_eq!(archived, vec!["session-1"]);
2983 });
2984 }
2985
2986 #[gpui::test]
2987 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2988 init_test(cx);
2989
2990 cx.run_until_parked();
2991
2992 cx.update(|cx| {
2993 let store = ThreadMetadataStore::global(cx);
2994 store.update(cx, |store, cx| {
2995 store.archive(ThreadId::new(), None, cx);
2996 });
2997 });
2998
2999 cx.run_until_parked();
3000
3001 cx.update(|cx| {
3002 let store = ThreadMetadataStore::global(cx);
3003 let store = store.read(cx);
3004
3005 assert!(store.is_empty());
3006 assert_eq!(store.entries().count(), 0);
3007 assert_eq!(store.archived_entries().count(), 0);
3008 });
3009 }
3010
3011 #[gpui::test]
3012 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
3013 init_test(cx);
3014
3015 let paths = PathList::new(&[Path::new("/project-a")]);
3016 let now = Utc::now();
3017 let metadata = make_metadata("session-1", "Thread 1", now, paths);
3018 let thread_id = metadata.thread_id;
3019
3020 cx.update(|cx| {
3021 let store = ThreadMetadataStore::global(cx);
3022 store.update(cx, |store, cx| {
3023 store.save(metadata.clone(), cx);
3024 store.archive(thread_id, None, cx);
3025 });
3026 });
3027
3028 cx.run_until_parked();
3029
3030 cx.update(|cx| {
3031 let store = ThreadMetadataStore::global(cx);
3032 let store = store.read(cx);
3033
3034 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
3035 pretty_assertions::assert_eq!(
3036 entries,
3037 vec![ThreadMetadata {
3038 archived: true,
3039 ..metadata
3040 }]
3041 );
3042 });
3043 }
3044
3045 #[gpui::test]
3046 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
3047 init_test(cx);
3048 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3049
3050 let id = store
3051 .read_with(cx, |store, cx| {
3052 store.create_archived_worktree(
3053 "/tmp/worktree".to_string(),
3054 "/home/user/repo".to_string(),
3055 Some("feature-branch".to_string()),
3056 "staged_aaa".to_string(),
3057 "unstaged_bbb".to_string(),
3058 "original_000".to_string(),
3059 cx,
3060 )
3061 })
3062 .await
3063 .unwrap();
3064
3065 let thread_id_1 = ThreadId::new();
3066
3067 store
3068 .read_with(cx, |store, cx| {
3069 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3070 })
3071 .await
3072 .unwrap();
3073
3074 let worktrees = store
3075 .read_with(cx, |store, cx| {
3076 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3077 })
3078 .await
3079 .unwrap();
3080
3081 assert_eq!(worktrees.len(), 1);
3082 let wt = &worktrees[0];
3083 assert_eq!(wt.id, id);
3084 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
3085 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
3086 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
3087 assert_eq!(wt.staged_commit_hash, "staged_aaa");
3088 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
3089 assert_eq!(wt.original_commit_hash, "original_000");
3090 }
3091
3092 #[gpui::test]
3093 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
3094 init_test(cx);
3095 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3096
3097 let id = store
3098 .read_with(cx, |store, cx| {
3099 store.create_archived_worktree(
3100 "/tmp/worktree".to_string(),
3101 "/home/user/repo".to_string(),
3102 Some("main".to_string()),
3103 "deadbeef".to_string(),
3104 "deadbeef".to_string(),
3105 "original_000".to_string(),
3106 cx,
3107 )
3108 })
3109 .await
3110 .unwrap();
3111
3112 let thread_id_1 = ThreadId::new();
3113
3114 store
3115 .read_with(cx, |store, cx| {
3116 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3117 })
3118 .await
3119 .unwrap();
3120
3121 store
3122 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
3123 .await
3124 .unwrap();
3125
3126 let worktrees = store
3127 .read_with(cx, |store, cx| {
3128 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3129 })
3130 .await
3131 .unwrap();
3132 assert!(worktrees.is_empty());
3133 }
3134
3135 #[gpui::test]
3136 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
3137 init_test(cx);
3138 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3139
3140 let id = store
3141 .read_with(cx, |store, cx| {
3142 store.create_archived_worktree(
3143 "/tmp/worktree".to_string(),
3144 "/home/user/repo".to_string(),
3145 None,
3146 "abc123".to_string(),
3147 "abc123".to_string(),
3148 "original_000".to_string(),
3149 cx,
3150 )
3151 })
3152 .await
3153 .unwrap();
3154
3155 let thread_id_1 = ThreadId::new();
3156 let thread_id_2 = ThreadId::new();
3157
3158 store
3159 .read_with(cx, |store, cx| {
3160 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3161 })
3162 .await
3163 .unwrap();
3164
3165 store
3166 .read_with(cx, |store, cx| {
3167 store.link_thread_to_archived_worktree(thread_id_2, id, cx)
3168 })
3169 .await
3170 .unwrap();
3171
3172 let wt1 = store
3173 .read_with(cx, |store, cx| {
3174 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3175 })
3176 .await
3177 .unwrap();
3178
3179 let wt2 = store
3180 .read_with(cx, |store, cx| {
3181 store.get_archived_worktrees_for_thread(thread_id_2, cx)
3182 })
3183 .await
3184 .unwrap();
3185
3186 assert_eq!(wt1.len(), 1);
3187 assert_eq!(wt2.len(), 1);
3188 assert_eq!(wt1[0].id, wt2[0].id);
3189 }
3190
3191 #[gpui::test]
3192 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
3193 init_test(cx);
3194 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3195
3196 let original_paths = PathList::new(&[
3197 Path::new("/projects/worktree-a"),
3198 Path::new("/projects/worktree-b"),
3199 Path::new("/other/unrelated"),
3200 ]);
3201 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3202 let thread_id = meta.thread_id;
3203
3204 store.update(cx, |store, cx| {
3205 store.save(meta, cx);
3206 });
3207
3208 let replacements = vec![
3209 (
3210 PathBuf::from("/projects/worktree-a"),
3211 PathBuf::from("/restored/worktree-a"),
3212 ),
3213 (
3214 PathBuf::from("/projects/worktree-b"),
3215 PathBuf::from("/restored/worktree-b"),
3216 ),
3217 ];
3218
3219 store.update(cx, |store, cx| {
3220 store.complete_worktree_restore(thread_id, &replacements, cx);
3221 });
3222
3223 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3224 let entry = entry.unwrap();
3225 let paths = entry.folder_paths().paths();
3226 assert_eq!(paths.len(), 3);
3227 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3228 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3229 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3230 }
3231
3232 #[gpui::test]
3233 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
3234 init_test(cx);
3235 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3236
3237 let original_paths =
3238 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3239 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3240 let thread_id = meta.thread_id;
3241
3242 store.update(cx, |store, cx| {
3243 store.save(meta, cx);
3244 });
3245
3246 let replacements = vec![
3247 (
3248 PathBuf::from("/projects/worktree-a"),
3249 PathBuf::from("/new/worktree-a"),
3250 ),
3251 (
3252 PathBuf::from("/nonexistent/path"),
3253 PathBuf::from("/should/not/appear"),
3254 ),
3255 ];
3256
3257 store.update(cx, |store, cx| {
3258 store.complete_worktree_restore(thread_id, &replacements, cx);
3259 });
3260
3261 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3262 let entry = entry.unwrap();
3263 let paths = entry.folder_paths().paths();
3264 assert_eq!(paths.len(), 2);
3265 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3266 assert!(paths.contains(&PathBuf::from("/other/path")));
3267 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3268 }
3269
3270 #[gpui::test]
3271 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
3272 init_test(cx);
3273 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3274
3275 let original_paths = PathList::new(&[
3276 Path::new("/projects/worktree-a"),
3277 Path::new("/projects/worktree-b"),
3278 Path::new("/other/unrelated"),
3279 ]);
3280 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3281 let thread_id = meta.thread_id;
3282
3283 store.update(cx, |store, cx| {
3284 store.save(meta, cx);
3285 });
3286
3287 let replacements = vec![
3288 (
3289 PathBuf::from("/projects/worktree-a"),
3290 PathBuf::from("/restored/worktree-a"),
3291 ),
3292 (
3293 PathBuf::from("/projects/worktree-b"),
3294 PathBuf::from("/restored/worktree-b"),
3295 ),
3296 ];
3297
3298 store.update(cx, |store, cx| {
3299 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3300 });
3301
3302 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3303 let entry = entry.unwrap();
3304 let paths = entry.folder_paths().paths();
3305 assert_eq!(paths.len(), 3);
3306 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3307 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3308 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3309 }
3310
3311 #[gpui::test]
3312 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
3313 init_test(cx);
3314 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3315
3316 let original_paths =
3317 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3318 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3319 let thread_id = meta.thread_id;
3320
3321 store.update(cx, |store, cx| {
3322 store.save(meta, cx);
3323 });
3324
3325 let replacements = vec![
3326 (
3327 PathBuf::from("/projects/worktree-a"),
3328 PathBuf::from("/new/worktree-a"),
3329 ),
3330 (
3331 PathBuf::from("/nonexistent/path"),
3332 PathBuf::from("/should/not/appear"),
3333 ),
3334 ];
3335
3336 store.update(cx, |store, cx| {
3337 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3338 });
3339
3340 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3341 let entry = entry.unwrap();
3342 let paths = entry.folder_paths().paths();
3343 assert_eq!(paths.len(), 2);
3344 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3345 assert!(paths.contains(&PathBuf::from("/other/path")));
3346 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3347 }
3348
3349 #[gpui::test]
3350 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
3351 init_test(cx);
3352 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3353
3354 let id1 = store
3355 .read_with(cx, |store, cx| {
3356 store.create_archived_worktree(
3357 "/projects/worktree-a".to_string(),
3358 "/home/user/repo".to_string(),
3359 Some("branch-a".to_string()),
3360 "staged_a".to_string(),
3361 "unstaged_a".to_string(),
3362 "original_000".to_string(),
3363 cx,
3364 )
3365 })
3366 .await
3367 .unwrap();
3368
3369 let id2 = store
3370 .read_with(cx, |store, cx| {
3371 store.create_archived_worktree(
3372 "/projects/worktree-b".to_string(),
3373 "/home/user/repo".to_string(),
3374 Some("branch-b".to_string()),
3375 "staged_b".to_string(),
3376 "unstaged_b".to_string(),
3377 "original_000".to_string(),
3378 cx,
3379 )
3380 })
3381 .await
3382 .unwrap();
3383
3384 let thread_id_1 = ThreadId::new();
3385
3386 store
3387 .read_with(cx, |store, cx| {
3388 store.link_thread_to_archived_worktree(thread_id_1, id1, cx)
3389 })
3390 .await
3391 .unwrap();
3392
3393 store
3394 .read_with(cx, |store, cx| {
3395 store.link_thread_to_archived_worktree(thread_id_1, id2, cx)
3396 })
3397 .await
3398 .unwrap();
3399
3400 let worktrees = store
3401 .read_with(cx, |store, cx| {
3402 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3403 })
3404 .await
3405 .unwrap();
3406
3407 assert_eq!(worktrees.len(), 2);
3408
3409 let paths: Vec<&Path> = worktrees
3410 .iter()
3411 .map(|w| w.worktree_path.as_path())
3412 .collect();
3413 assert!(paths.contains(&Path::new("/projects/worktree-a")));
3414 assert!(paths.contains(&Path::new("/projects/worktree-b")));
3415 }
3416
3417 // ── Migration tests ────────────────────────────────────────────────
3418
3419 #[test]
3420 fn test_thread_id_primary_key_migration_backfills_null_thread_ids() {
3421 use db::sqlez::connection::Connection;
3422
3423 let connection =
3424 Connection::open_memory(Some("test_thread_id_pk_migration_backfills_nulls"));
3425
3426 // Run migrations 0-6 (the old schema, before the thread_id PK migration).
3427 let old_migrations: &[&str] = &ThreadMetadataDb::MIGRATIONS[..7];
3428 connection
3429 .migrate(ThreadMetadataDb::NAME, old_migrations, &mut |_, _, _| false)
3430 .expect("old migrations should succeed");
3431
3432 // Insert rows: one with a thread_id, two without.
3433 connection
3434 .exec(
3435 "INSERT INTO sidebar_threads \
3436 (session_id, title, updated_at, thread_id) \
3437 VALUES ('has-tid', 'Has ThreadId', '2025-01-01T00:00:00Z', X'0102030405060708090A0B0C0D0E0F10')",
3438 )
3439 .unwrap()()
3440 .unwrap();
3441 connection
3442 .exec(
3443 "INSERT INTO sidebar_threads \
3444 (session_id, title, updated_at) \
3445 VALUES ('no-tid-1', 'No ThreadId 1', '2025-01-02T00:00:00Z')",
3446 )
3447 .unwrap()()
3448 .unwrap();
3449 connection
3450 .exec(
3451 "INSERT INTO sidebar_threads \
3452 (session_id, title, updated_at) \
3453 VALUES ('no-tid-2', 'No ThreadId 2', '2025-01-03T00:00:00Z')",
3454 )
3455 .unwrap()()
3456 .unwrap();
3457
3458 // Set up archived_git_worktrees + thread_archived_worktrees rows
3459 // referencing the session without a thread_id.
3460 connection
3461 .exec(
3462 "INSERT INTO archived_git_worktrees \
3463 (id, worktree_path, main_repo_path, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
3464 VALUES (1, '/wt', '/main', 'abc', 'def', '000')",
3465 )
3466 .unwrap()()
3467 .unwrap();
3468 connection
3469 .exec(
3470 "INSERT INTO thread_archived_worktrees \
3471 (session_id, archived_worktree_id) \
3472 VALUES ('no-tid-1', 1)",
3473 )
3474 .unwrap()()
3475 .unwrap();
3476
3477 // Run all current migrations. sqlez skips the already-applied ones and
3478 // runs the remaining migrations.
3479 run_thread_metadata_migrations(&connection);
3480
3481 // All 3 rows should survive with non-NULL thread_ids.
3482 let count: i64 = connection
3483 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM sidebar_threads")
3484 .unwrap()(())
3485 .unwrap()
3486 .unwrap();
3487 assert_eq!(count, 3, "all 3 rows should survive the migration");
3488
3489 let null_count: i64 = connection
3490 .select_row_bound::<(), i64>(
3491 "SELECT COUNT(*) FROM sidebar_threads WHERE thread_id IS NULL",
3492 )
3493 .unwrap()(())
3494 .unwrap()
3495 .unwrap();
3496 assert_eq!(
3497 null_count, 0,
3498 "no rows should have NULL thread_id after migration"
3499 );
3500
3501 // The row that already had a thread_id should keep its original value.
3502 let original_tid: Vec<u8> = connection
3503 .select_row_bound::<&str, Vec<u8>>(
3504 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3505 )
3506 .unwrap()("has-tid")
3507 .unwrap()
3508 .unwrap();
3509 assert_eq!(
3510 original_tid,
3511 vec![
3512 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
3513 0x0F, 0x10
3514 ],
3515 "pre-existing thread_id should be preserved"
3516 );
3517
3518 // The two rows that had NULL thread_ids should now have distinct non-empty blobs.
3519 let generated_tid_1: Vec<u8> = connection
3520 .select_row_bound::<&str, Vec<u8>>(
3521 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3522 )
3523 .unwrap()("no-tid-1")
3524 .unwrap()
3525 .unwrap();
3526 let generated_tid_2: Vec<u8> = connection
3527 .select_row_bound::<&str, Vec<u8>>(
3528 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3529 )
3530 .unwrap()("no-tid-2")
3531 .unwrap()
3532 .unwrap();
3533 assert_eq!(
3534 generated_tid_1.len(),
3535 16,
3536 "generated thread_id should be 16 bytes"
3537 );
3538 assert_eq!(
3539 generated_tid_2.len(),
3540 16,
3541 "generated thread_id should be 16 bytes"
3542 );
3543 assert_ne!(
3544 generated_tid_1, generated_tid_2,
3545 "each generated thread_id should be unique"
3546 );
3547
3548 // The thread_archived_worktrees join row should have migrated
3549 // using the backfilled thread_id from the session without a
3550 // pre-existing thread_id.
3551 let archived_count: i64 = connection
3552 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM thread_archived_worktrees")
3553 .unwrap()(())
3554 .unwrap()
3555 .unwrap();
3556 assert_eq!(
3557 archived_count, 1,
3558 "thread_archived_worktrees row should survive migration"
3559 );
3560
3561 // The thread_archived_worktrees row should reference the
3562 // backfilled thread_id of the 'no-tid-1' session.
3563 let archived_tid: Vec<u8> = connection
3564 .select_row_bound::<(), Vec<u8>>(
3565 "SELECT thread_id FROM thread_archived_worktrees LIMIT 1",
3566 )
3567 .unwrap()(())
3568 .unwrap()
3569 .unwrap();
3570 assert_eq!(
3571 archived_tid, generated_tid_1,
3572 "thread_archived_worktrees should reference the backfilled thread_id"
3573 );
3574 }
3575
3576 // ── ThreadWorktreePaths tests ──────────────────────────────────────
3577
3578 /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs.
3579 fn make_worktree_paths(pairs: &[(&str, &str)]) -> WorktreePaths {
3580 let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs
3581 .iter()
3582 .map(|(m, f)| (Path::new(*m), Path::new(*f)))
3583 .unzip();
3584 WorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders)).unwrap()
3585 }
3586
3587 #[test]
3588 fn test_thread_worktree_paths_full_add_then_remove_cycle() {
3589 // Full scenario from the issue:
3590 // 1. Start with linked worktree selectric → zed
3591 // 2. Add cloud
3592 // 3. Remove zed
3593
3594 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3595
3596 // Step 2: add cloud
3597 paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud"));
3598
3599 assert_eq!(paths.ordered_pairs().count(), 2);
3600 assert_eq!(
3601 paths.folder_path_list(),
3602 &PathList::new(&[
3603 Path::new("/worktrees/selectric/zed"),
3604 Path::new("/projects/cloud"),
3605 ])
3606 );
3607 assert_eq!(
3608 paths.main_worktree_path_list(),
3609 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),])
3610 );
3611
3612 // Step 3: remove zed
3613 paths.remove_main_path(Path::new("/projects/zed"));
3614
3615 assert_eq!(paths.ordered_pairs().count(), 1);
3616 assert_eq!(
3617 paths.folder_path_list(),
3618 &PathList::new(&[Path::new("/projects/cloud")])
3619 );
3620 assert_eq!(
3621 paths.main_worktree_path_list(),
3622 &PathList::new(&[Path::new("/projects/cloud")])
3623 );
3624 }
3625
3626 #[test]
3627 fn test_thread_worktree_paths_add_is_idempotent() {
3628 let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]);
3629
3630 paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed"));
3631
3632 assert_eq!(paths.ordered_pairs().count(), 1);
3633 }
3634
3635 #[test]
3636 fn test_thread_worktree_paths_remove_nonexistent_is_noop() {
3637 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3638
3639 paths.remove_main_path(Path::new("/projects/nonexistent"));
3640
3641 assert_eq!(paths.ordered_pairs().count(), 1);
3642 }
3643
3644 #[test]
3645 fn test_thread_worktree_paths_from_path_lists_preserves_association() {
3646 let folder = PathList::new(&[
3647 Path::new("/worktrees/selectric/zed"),
3648 Path::new("/projects/cloud"),
3649 ]);
3650 let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]);
3651
3652 let paths = WorktreePaths::from_path_lists(main, folder).unwrap();
3653
3654 let pairs: Vec<_> = paths
3655 .ordered_pairs()
3656 .map(|(m, f)| (m.clone(), f.clone()))
3657 .collect();
3658 assert_eq!(pairs.len(), 2);
3659 assert!(pairs.contains(&(
3660 PathBuf::from("/projects/zed"),
3661 PathBuf::from("/worktrees/selectric/zed")
3662 )));
3663 assert!(pairs.contains(&(
3664 PathBuf::from("/projects/cloud"),
3665 PathBuf::from("/projects/cloud")
3666 )));
3667 }
3668
3669 #[test]
3670 fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() {
3671 // Two linked worktrees of the same main repo: the main_worktree_path_list
3672 // deduplicates because PathList stores unique sorted paths, but
3673 // ordered_pairs still has both entries.
3674 let paths = make_worktree_paths(&[
3675 ("/projects/zed", "/worktrees/selectric/zed"),
3676 ("/projects/zed", "/worktrees/feature/zed"),
3677 ]);
3678
3679 // main_worktree_path_list has the duplicate main path twice
3680 // (PathList keeps all entries from its input)
3681 assert_eq!(paths.ordered_pairs().count(), 2);
3682 assert_eq!(
3683 paths.folder_path_list(),
3684 &PathList::new(&[
3685 Path::new("/worktrees/selectric/zed"),
3686 Path::new("/worktrees/feature/zed"),
3687 ])
3688 );
3689 assert_eq!(
3690 paths.main_worktree_path_list(),
3691 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),])
3692 );
3693 }
3694
3695 #[test]
3696 fn test_thread_worktree_paths_mismatched_lengths_returns_error() {
3697 let folder = PathList::new(&[
3698 Path::new("/worktrees/selectric/zed"),
3699 Path::new("/projects/cloud"),
3700 ]);
3701 let main = PathList::new(&[Path::new("/projects/zed")]);
3702
3703 let result = WorktreePaths::from_path_lists(main, folder);
3704 assert!(result.is_err());
3705 }
3706
3707 /// Regression test: archiving a thread created in a git worktree must
3708 /// preserve the thread's folder paths so that restoring it later does
3709 /// not prompt the user to re-associate a project.
3710 #[gpui::test]
3711 async fn test_archived_thread_retains_paths_after_worktree_removal(cx: &mut TestAppContext) {
3712 init_test(cx);
3713
3714 let fs = FakeFs::new(cx.executor());
3715 fs.insert_tree(
3716 "/worktrees/feature",
3717 serde_json::json!({ "src": { "main.rs": "" } }),
3718 )
3719 .await;
3720 let project = Project::test(fs, [Path::new("/worktrees/feature")], cx).await;
3721 let connection = StubAgentConnection::new();
3722
3723 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3724 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
3725
3726 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3727 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3728
3729 // Push content so the event handler saves metadata with the
3730 // project's worktree paths.
3731 thread.update_in(&mut vcx, |thread, _window, cx| {
3732 thread.push_user_content_block(None, "Hello".into(), cx);
3733 });
3734 vcx.run_until_parked();
3735
3736 // Verify paths were saved correctly.
3737 let (folder_paths_before, main_paths_before) = cx.read(|cx| {
3738 let store = ThreadMetadataStore::global(cx).read(cx);
3739 let entry = store.entry(thread_id).unwrap();
3740 assert!(
3741 !entry.folder_paths().is_empty(),
3742 "thread should have folder paths before archiving"
3743 );
3744 (
3745 entry.folder_paths().clone(),
3746 entry.main_worktree_paths().clone(),
3747 )
3748 });
3749
3750 // Archive the thread.
3751 cx.update(|cx| {
3752 ThreadMetadataStore::global(cx).update(cx, |store, cx| {
3753 store.archive(thread_id, None, cx);
3754 });
3755 });
3756 cx.run_until_parked();
3757
3758 // Remove the worktree from the project, simulating what the
3759 // archive flow does for linked git worktrees.
3760 let worktree_id = cx.update(|cx| {
3761 project
3762 .read(cx)
3763 .visible_worktrees(cx)
3764 .next()
3765 .unwrap()
3766 .read(cx)
3767 .id()
3768 });
3769 project.update(cx, |project, cx| {
3770 project.remove_worktree(worktree_id, cx);
3771 });
3772 cx.run_until_parked();
3773
3774 // Trigger a thread event after archiving + worktree removal.
3775 // In production this happens when an async title-generation task
3776 // completes after the thread was archived.
3777 thread.update_in(&mut vcx, |thread, _window, cx| {
3778 thread.set_title("Generated title".into(), cx).detach();
3779 });
3780 vcx.run_until_parked();
3781
3782 // The archived thread must still have its original folder paths.
3783 cx.read(|cx| {
3784 let store = ThreadMetadataStore::global(cx).read(cx);
3785 let entry = store.entry(thread_id).unwrap();
3786 assert!(entry.archived, "thread should still be archived");
3787 assert_eq!(
3788 entry.display_title().as_ref(),
3789 "Generated title",
3790 "title should still be updated for archived threads"
3791 );
3792 assert_eq!(
3793 entry.folder_paths(),
3794 &folder_paths_before,
3795 "archived thread must retain its folder paths after worktree \
3796 removal + subsequent thread event, otherwise restoring it \
3797 will prompt the user to re-associate a project"
3798 );
3799 assert_eq!(
3800 entry.main_worktree_paths(),
3801 &main_paths_before,
3802 "archived thread must retain its main worktree paths after \
3803 worktree removal + subsequent thread event"
3804 );
3805 });
3806 }
3807
3808 #[gpui::test]
3809 async fn test_collab_guest_threads_not_saved_to_metadata_store(cx: &mut TestAppContext) {
3810 init_test(cx);
3811
3812 let fs = FakeFs::new(cx.executor());
3813 let project = Project::test(fs, [Path::new("/project-a")], cx).await;
3814
3815 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3816 crate::test_support::open_thread_with_connection(
3817 &panel,
3818 StubAgentConnection::new(),
3819 &mut vcx,
3820 );
3821 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3822 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3823 thread.update_in(&mut vcx, |thread, _window, cx| {
3824 thread.push_user_content_block(None, "hello".into(), cx);
3825 thread.set_title("Thread".into(), cx).detach();
3826 });
3827 vcx.run_until_parked();
3828
3829 // Confirm the thread is in the store while the project is local.
3830 cx.update(|cx| {
3831 let store = ThreadMetadataStore::global(cx);
3832 assert!(
3833 store.read(cx).entry(thread_id).is_some(),
3834 "thread must be in the store while the project is local"
3835 );
3836 });
3837
3838 cx.update(|cx| {
3839 let store = ThreadMetadataStore::global(cx);
3840 store.update(cx, |store, cx| {
3841 store.delete(thread_id, cx);
3842 });
3843 });
3844 project.update(cx, |project, _cx| {
3845 project.mark_as_collab_for_testing();
3846 });
3847
3848 thread.update_in(&mut vcx, |thread, _window, cx| {
3849 thread.push_user_content_block(None, "more content".into(), cx);
3850 });
3851 vcx.run_until_parked();
3852
3853 cx.update(|cx| {
3854 let store = ThreadMetadataStore::global(cx);
3855 assert!(
3856 store.read(cx).entry(thread_id).is_none(),
3857 "threads must not be persisted while the project is a collab guest session"
3858 );
3859 });
3860 }
3861
3862 // When a worktree is added to a collab project, update_thread_work_dirs
3863 // fires with the new worktree paths. Without an is_via_collab() guard it
3864 // overwrites the stored paths of any retained or active local threads with
3865 // the new (expanded) path set, corrupting metadata that belonged to the
3866 // guest's own local project.
3867 #[gpui::test]
3868 async fn test_collab_guest_retained_thread_paths_not_overwritten_on_worktree_change(
3869 cx: &mut TestAppContext,
3870 ) {
3871 init_test(cx);
3872
3873 let fs = FakeFs::new(cx.executor());
3874 fs.insert_tree("/project-a", serde_json::json!({})).await;
3875 fs.insert_tree("/project-b", serde_json::json!({})).await;
3876 let project = Project::test(fs, [Path::new("/project-a")], cx).await;
3877
3878 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3879
3880 // Open thread A and give it content so its metadata is saved with /project-a.
3881 crate::test_support::open_thread_with_connection(
3882 &panel,
3883 StubAgentConnection::new(),
3884 &mut vcx,
3885 );
3886 let thread_a_id = crate::test_support::active_thread_id(&panel, &vcx);
3887 let thread_a = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3888 thread_a.update_in(&mut vcx, |thread, _window, cx| {
3889 thread.push_user_content_block(None, "hello".into(), cx);
3890 thread.set_title("Thread A".into(), cx).detach();
3891 });
3892 vcx.run_until_parked();
3893
3894 cx.update(|cx| {
3895 let store = ThreadMetadataStore::global(cx);
3896 let entry = store.read(cx).entry(thread_a_id).unwrap();
3897 assert_eq!(
3898 entry.folder_paths().paths(),
3899 &[std::path::PathBuf::from("/project-a")],
3900 "thread A must be saved with /project-a before collab"
3901 );
3902 });
3903
3904 // Open thread B, making thread A a retained thread in the panel.
3905 crate::test_support::open_thread_with_connection(
3906 &panel,
3907 StubAgentConnection::new(),
3908 &mut vcx,
3909 );
3910 vcx.run_until_parked();
3911
3912 // Transition the project into collab mode (simulates joining as a guest).
3913 project.update(cx, |project, _cx| {
3914 project.mark_as_collab_for_testing();
3915 });
3916
3917 // Add a second worktree. For a real collab guest this would be one of
3918 // the host's worktrees arriving via the collab protocol, but here we
3919 // use a local path because the test infrastructure cannot easily produce
3920 // a remote worktree with a fully-scanned root entry.
3921 //
3922 // This fires WorktreeAdded → update_thread_work_dirs. Without an
3923 // is_via_collab() guard that call overwrites the stored paths of
3924 // retained thread A from {/project-a} to {/project-a, /project-b},
3925 // polluting its metadata with a path it never belonged to.
3926 project
3927 .update(cx, |project, cx| {
3928 project.find_or_create_worktree(Path::new("/project-b"), true, cx)
3929 })
3930 .await
3931 .unwrap();
3932 vcx.run_until_parked();
3933
3934 cx.update(|cx| {
3935 let store = ThreadMetadataStore::global(cx);
3936 let entry = store
3937 .read(cx)
3938 .entry(thread_a_id)
3939 .expect("thread A must still exist in the store");
3940 assert_eq!(
3941 entry.folder_paths().paths(),
3942 &[std::path::PathBuf::from("/project-a")],
3943 "retained thread A's stored path must not be updated while the project is via collab"
3944 );
3945 });
3946 }
3947}