1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use agent::{ThreadStore, ZED_AGENT_ID};
7use agent_client_protocol as acp;
8use anyhow::Context as _;
9use chrono::{DateTime, Utc};
10use collections::{HashMap, HashSet};
11use db::{
12 kvp::KeyValueStore,
13 sqlez::{
14 bindable::{Bind, Column},
15 domain::Domain,
16 statement::Statement,
17 thread_safe_connection::ThreadSafeConnection,
18 },
19 sqlez_macros::sql,
20};
21use fs::Fs;
22use futures::{FutureExt, future::Shared};
23use gpui::{AppContext as _, Entity, Global, Subscription, Task};
24pub use project::WorktreePaths;
25use project::{AgentId, linked_worktree_short_name};
26use remote::{RemoteConnectionOptions, same_remote_connection_identity};
27use ui::{App, Context, SharedString, ThreadItemWorktreeInfo, WorktreeKind};
28use util::ResultExt as _;
29use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
30
31use crate::DEFAULT_THREAD_TITLE;
32
33#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, serde::Serialize, serde::Deserialize)]
34pub struct ThreadId(uuid::Uuid);
35
36impl ThreadId {
37 pub fn new() -> Self {
38 Self(uuid::Uuid::new_v4())
39 }
40}
41
42impl Bind for ThreadId {
43 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
44 self.0.bind(statement, start_index)
45 }
46}
47
48impl Column for ThreadId {
49 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
50 let (uuid, next) = Column::column(statement, start_index)?;
51 Ok((ThreadId(uuid), next))
52 }
53}
54
55const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill";
56const THREAD_ID_MIGRATION_KEY: &str = "thread-metadata-thread-id-backfill";
57
58pub fn init(cx: &mut App) {
59 ThreadMetadataStore::init_global(cx);
60 let migration_task = migrate_thread_metadata(cx);
61 migrate_thread_remote_connections(cx, migration_task);
62 migrate_thread_ids(cx);
63}
64
65/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
66/// We skip migrating threads that do not have a project.
67///
68/// TODO: Remove this after N weeks of shipping the sidebar
69fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
70 let store = ThreadMetadataStore::global(cx);
71 let db = store.read(cx).db.clone();
72
73 cx.spawn(async move |cx| {
74 let existing_list = db.list()?;
75 let is_first_migration = existing_list.is_empty();
76 let existing_session_ids: HashSet<Arc<str>> = existing_list
77 .into_iter()
78 .filter_map(|m| m.session_id.map(|s| s.0))
79 .collect();
80
81 let mut to_migrate = store.read_with(cx, |_store, cx| {
82 ThreadStore::global(cx)
83 .read(cx)
84 .entries()
85 .filter_map(|entry| {
86 if existing_session_ids.contains(&entry.id.0) {
87 return None;
88 }
89
90 Some(ThreadMetadata {
91 thread_id: ThreadId::new(),
92 session_id: Some(entry.id),
93 agent_id: ZED_AGENT_ID.clone(),
94 title: if entry.title.is_empty()
95 || entry.title.as_ref() == DEFAULT_THREAD_TITLE
96 {
97 None
98 } else {
99 Some(entry.title)
100 },
101 updated_at: entry.updated_at,
102 created_at: entry.created_at,
103 worktree_paths: WorktreePaths::from_folder_paths(&entry.folder_paths),
104 remote_connection: None,
105 archived: true,
106 })
107 })
108 .collect::<Vec<_>>()
109 });
110
111 if to_migrate.is_empty() {
112 return anyhow::Ok(());
113 }
114
115 // On the first migration (no entries in DB yet), keep the 5 most
116 // recent threads per project unarchived.
117 if is_first_migration {
118 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
119 for entry in &mut to_migrate {
120 if entry.worktree_paths.is_empty() {
121 continue;
122 }
123 per_project
124 .entry(entry.worktree_paths.folder_path_list().clone())
125 .or_default()
126 .push(entry);
127 }
128 for entries in per_project.values_mut() {
129 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
130 for entry in entries.iter_mut().take(5) {
131 entry.archived = false;
132 }
133 }
134 }
135
136 log::info!("Migrating {} thread store entries", to_migrate.len());
137
138 // Manually save each entry to the database and call reload, otherwise
139 // we'll end up triggering lots of reloads after each save
140 for entry in to_migrate {
141 db.save(entry).await?;
142 }
143
144 log::info!("Finished migrating thread store entries");
145
146 let _ = store.update(cx, |store, cx| store.reload(cx));
147 anyhow::Ok(())
148 })
149}
150
151fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task<anyhow::Result<()>>) {
152 let store = ThreadMetadataStore::global(cx);
153 let db = store.read(cx).db.clone();
154 let kvp = KeyValueStore::global(cx);
155 let workspace_db = WorkspaceDb::global(cx);
156 let fs = <dyn Fs>::global(cx);
157
158 cx.spawn(async move |cx| -> anyhow::Result<()> {
159 migration_task.await?;
160
161 if kvp
162 .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)?
163 .is_some()
164 {
165 return Ok(());
166 }
167
168 let recent_workspaces = workspace_db.recent_workspaces_on_disk(fs.as_ref()).await?;
169
170 let mut local_path_lists = HashSet::<PathList>::default();
171 let mut remote_path_lists = HashMap::<PathList, RemoteConnectionOptions>::default();
172
173 recent_workspaces
174 .iter()
175 .filter(|(_, location, path_list, _)| {
176 !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local)
177 })
178 .for_each(|(_, _, path_list, _)| {
179 local_path_lists.insert(path_list.clone());
180 });
181
182 for (_, location, path_list, _) in recent_workspaces {
183 match location {
184 SerializedWorkspaceLocation::Remote(remote_connection)
185 if !local_path_lists.contains(&path_list) =>
186 {
187 remote_path_lists
188 .entry(path_list)
189 .or_insert(remote_connection);
190 }
191 _ => {}
192 }
193 }
194
195 let mut reloaded = false;
196 for metadata in db.list()? {
197 if metadata.remote_connection.is_some() {
198 continue;
199 }
200
201 if let Some(remote_connection) = remote_path_lists
202 .get(metadata.folder_paths())
203 .or_else(|| remote_path_lists.get(metadata.main_worktree_paths()))
204 {
205 db.save(ThreadMetadata {
206 remote_connection: Some(remote_connection.clone()),
207 ..metadata
208 })
209 .await?;
210 reloaded = true;
211 }
212 }
213
214 let reloaded_task = reloaded
215 .then_some(store.update(cx, |store, cx| store.reload(cx)))
216 .unwrap_or(Task::ready(()).shared());
217
218 kvp.write_kvp(
219 THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(),
220 "1".to_string(),
221 )
222 .await?;
223 reloaded_task.await;
224
225 Ok(())
226 })
227 .detach_and_log_err(cx);
228}
229
230fn migrate_thread_ids(cx: &mut App) {
231 let store = ThreadMetadataStore::global(cx);
232 let db = store.read(cx).db.clone();
233 let kvp = KeyValueStore::global(cx);
234
235 cx.spawn(async move |cx| -> anyhow::Result<()> {
236 if kvp.read_kvp(THREAD_ID_MIGRATION_KEY)?.is_some() {
237 return Ok(());
238 }
239
240 let mut reloaded = false;
241 for metadata in db.list()? {
242 db.save(metadata).await?;
243 reloaded = true;
244 }
245
246 let reloaded_task = reloaded
247 .then_some(store.update(cx, |store, cx| store.reload(cx)))
248 .unwrap_or(Task::ready(()).shared());
249
250 kvp.write_kvp(THREAD_ID_MIGRATION_KEY.to_string(), "1".to_string())
251 .await?;
252 reloaded_task.await;
253
254 Ok(())
255 })
256 .detach_and_log_err(cx);
257}
258
259struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
260impl Global for GlobalThreadMetadataStore {}
261
262/// Lightweight metadata for any thread (native or ACP), enough to populate
263/// the sidebar list and route to the correct load path when clicked.
264#[derive(Debug, Clone, PartialEq)]
265pub struct ThreadMetadata {
266 pub thread_id: ThreadId,
267 pub session_id: Option<acp::SessionId>,
268 pub agent_id: AgentId,
269 pub title: Option<SharedString>,
270 pub updated_at: DateTime<Utc>,
271 pub created_at: Option<DateTime<Utc>>,
272 pub worktree_paths: WorktreePaths,
273 pub remote_connection: Option<RemoteConnectionOptions>,
274 pub archived: bool,
275}
276
277impl ThreadMetadata {
278 pub fn new_draft(
279 thread_id: ThreadId,
280 agent_id: AgentId,
281 title: Option<SharedString>,
282 worktree_paths: WorktreePaths,
283 remote_connection: Option<RemoteConnectionOptions>,
284 ) -> Self {
285 let now = Utc::now();
286 Self {
287 thread_id,
288 session_id: None,
289 agent_id,
290 title,
291 updated_at: now,
292 created_at: Some(now),
293 worktree_paths: worktree_paths.clone(),
294 remote_connection,
295 archived: worktree_paths.is_empty(),
296 }
297 }
298
299 pub fn is_draft(&self) -> bool {
300 self.session_id.is_none()
301 }
302
303 pub fn display_title(&self) -> SharedString {
304 self.title
305 .clone()
306 .unwrap_or_else(|| crate::DEFAULT_THREAD_TITLE.into())
307 }
308
309 pub fn folder_paths(&self) -> &PathList {
310 self.worktree_paths.folder_path_list()
311 }
312 pub fn main_worktree_paths(&self) -> &PathList {
313 self.worktree_paths.main_worktree_path_list()
314 }
315}
316
317/// Derives worktree display info from a thread's stored path list.
318///
319/// For each path in the thread's `folder_paths`, produces a
320/// [`ThreadItemWorktreeInfo`] with a short display name, full path, and whether
321/// the worktree is the main checkout or a linked git worktree. When
322/// multiple main paths exist and a linked worktree's short name alone
323/// wouldn't identify which main project it belongs to, the main project
324/// name is prefixed for disambiguation (e.g. `project:feature`).
325pub fn worktree_info_from_thread_paths<S: std::hash::BuildHasher>(
326 worktree_paths: &WorktreePaths,
327 branch_names: &std::collections::HashMap<PathBuf, SharedString, S>,
328) -> Vec<ThreadItemWorktreeInfo> {
329 let mut infos: Vec<ThreadItemWorktreeInfo> = Vec::new();
330 let mut linked_short_names: Vec<(SharedString, SharedString)> = Vec::new();
331 let mut unique_main_count = HashSet::default();
332
333 for (main_path, folder_path) in worktree_paths.ordered_pairs() {
334 unique_main_count.insert(main_path.clone());
335 let is_linked = main_path != folder_path;
336
337 if is_linked {
338 let short_name = linked_worktree_short_name(main_path, folder_path).unwrap_or_default();
339 let project_name = main_path
340 .file_name()
341 .map(|n| SharedString::from(n.to_string_lossy().to_string()))
342 .unwrap_or_default();
343 linked_short_names.push((short_name.clone(), project_name));
344 infos.push(ThreadItemWorktreeInfo {
345 name: short_name,
346 full_path: SharedString::from(folder_path.display().to_string()),
347 highlight_positions: Vec::new(),
348 kind: WorktreeKind::Linked,
349 branch_name: branch_names.get(folder_path).cloned(),
350 });
351 } else {
352 let Some(name) = folder_path.file_name() else {
353 continue;
354 };
355 infos.push(ThreadItemWorktreeInfo {
356 name: SharedString::from(name.to_string_lossy().to_string()),
357 full_path: SharedString::from(folder_path.display().to_string()),
358 highlight_positions: Vec::new(),
359 kind: WorktreeKind::Main,
360 branch_name: branch_names.get(folder_path).cloned(),
361 });
362 }
363 }
364
365 // When the group has multiple main worktree paths and the thread's
366 // folder paths don't all share the same short name, prefix each
367 // linked worktree chip with its main project name so the user knows
368 // which project it belongs to.
369 let all_same_name = infos.len() > 1 && infos.iter().all(|i| i.name == infos[0].name);
370
371 if unique_main_count.len() > 1 && !all_same_name {
372 for (info, (_short_name, project_name)) in infos
373 .iter_mut()
374 .filter(|i| i.kind == WorktreeKind::Linked)
375 .zip(linked_short_names.iter())
376 {
377 info.name = SharedString::from(format!("{}:{}", project_name, info.name));
378 }
379 }
380
381 infos
382}
383
384impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
385 fn from(meta: &ThreadMetadata) -> Self {
386 let session_id = meta
387 .session_id
388 .clone()
389 .unwrap_or_else(|| acp::SessionId::new(meta.thread_id.0.to_string()));
390 Self {
391 session_id,
392 work_dirs: Some(meta.folder_paths().clone()),
393 title: meta.title.clone(),
394 updated_at: Some(meta.updated_at),
395 created_at: meta.created_at,
396 meta: None,
397 }
398 }
399}
400
401/// Record of a git worktree that was archived (deleted from disk) when its
402/// last thread was archived.
403pub struct ArchivedGitWorktree {
404 /// Auto-incrementing primary key.
405 pub id: i64,
406 /// Absolute path to the directory of the worktree before it was deleted.
407 /// Used when restoring, to put the recreated worktree back where it was.
408 /// If the path already exists on disk, the worktree is assumed to be
409 /// already restored and is used as-is.
410 pub worktree_path: PathBuf,
411 /// Absolute path of the main repository ("main worktree") that owned this worktree.
412 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
413 /// If the main repo isn't found on disk, unarchiving fails because we only store
414 /// commit hashes, and without the actual git repo being available, we can't restore
415 /// the files.
416 pub main_repo_path: PathBuf,
417 /// Branch that was checked out in the worktree at archive time. `None` if
418 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
419 /// could happen if the user made a detached one outside of Zed.
420 /// On restore, we try to switch to this branch. If that fails (e.g. it's
421 /// checked out elsewhere), we auto-generate a new one.
422 pub branch_name: Option<String>,
423 /// SHA of the WIP commit that captures files that were staged (but not yet
424 /// committed) at the time of archiving. This commit can be empty if the
425 /// user had no staged files at the time. It sits directly on top of whatever
426 /// the user's last actual commit was.
427 pub staged_commit_hash: String,
428 /// SHA of the WIP commit that captures files that were unstaged (including
429 /// untracked) at the time of archiving. This commit can be empty if the user
430 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
431 /// After doing `git reset` past both of these commits, we're back in the state
432 /// we had before archiving, including what was staged, what was unstaged, and
433 /// what was committed.
434 pub unstaged_commit_hash: String,
435 /// SHA of the commit that HEAD pointed at before we created the two WIP
436 /// commits during archival. After resetting past the WIP commits during
437 /// restore, HEAD should land back on this commit. It also serves as a
438 /// pre-restore sanity check (abort if this commit no longer exists in the
439 /// repo) and as a fallback target if the WIP resets fail.
440 pub original_commit_hash: String,
441}
442
443/// The store holds all metadata needed to show threads in the sidebar/the archive.
444///
445/// Listens to ConversationView events and updates metadata when the root thread changes.
446pub struct ThreadMetadataStore {
447 db: ThreadMetadataDb,
448 threads: HashMap<ThreadId, ThreadMetadata>,
449 threads_by_paths: HashMap<PathList, HashSet<ThreadId>>,
450 threads_by_main_paths: HashMap<PathList, HashSet<ThreadId>>,
451 threads_by_session: HashMap<acp::SessionId, ThreadId>,
452 reload_task: Option<Shared<Task<()>>>,
453 conversation_subscriptions: HashMap<gpui::EntityId, Subscription>,
454 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
455 in_flight_archives: HashMap<ThreadId, (Task<()>, smol::channel::Sender<()>)>,
456 _db_operations_task: Task<()>,
457}
458
459#[derive(Debug, PartialEq)]
460enum DbOperation {
461 Upsert(ThreadMetadata),
462 Delete(ThreadId),
463}
464
465impl DbOperation {
466 fn id(&self) -> ThreadId {
467 match self {
468 DbOperation::Upsert(thread) => thread.thread_id,
469 DbOperation::Delete(thread_id) => *thread_id,
470 }
471 }
472}
473
474/// Override for the test DB name used by `ThreadMetadataStore::init_global`.
475/// When set as a GPUI global, `init_global` uses this name instead of
476/// deriving one from the thread name. This prevents data from leaking
477/// across proptest cases that share a thread name.
478#[cfg(any(test, feature = "test-support"))]
479pub struct TestMetadataDbName(pub String);
480#[cfg(any(test, feature = "test-support"))]
481impl gpui::Global for TestMetadataDbName {}
482
483#[cfg(any(test, feature = "test-support"))]
484impl TestMetadataDbName {
485 pub fn global(cx: &App) -> String {
486 cx.try_global::<Self>()
487 .map(|g| g.0.clone())
488 .unwrap_or_else(|| {
489 let thread = std::thread::current();
490 let test_name = thread.name().unwrap_or("unknown_test");
491 format!("THREAD_METADATA_DB_{}", test_name)
492 })
493 }
494}
495
496impl ThreadMetadataStore {
497 #[cfg(not(any(test, feature = "test-support")))]
498 pub fn init_global(cx: &mut App) {
499 if cx.has_global::<Self>() {
500 return;
501 }
502
503 let db = ThreadMetadataDb::global(cx);
504 let thread_store = cx.new(|cx| Self::new(db, cx));
505 cx.set_global(GlobalThreadMetadataStore(thread_store));
506 }
507
508 #[cfg(any(test, feature = "test-support"))]
509 pub fn init_global(cx: &mut App) {
510 let db_name = TestMetadataDbName::global(cx);
511 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
512 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
513 cx.set_global(GlobalThreadMetadataStore(thread_store));
514 }
515
516 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
517 cx.try_global::<GlobalThreadMetadataStore>()
518 .map(|store| store.0.clone())
519 }
520
521 pub fn global(cx: &App) -> Entity<Self> {
522 cx.global::<GlobalThreadMetadataStore>().0.clone()
523 }
524
525 pub fn is_empty(&self) -> bool {
526 self.threads.is_empty()
527 }
528
529 /// Returns all thread IDs.
530 pub fn entry_ids(&self) -> impl Iterator<Item = ThreadId> + '_ {
531 self.threads.keys().copied()
532 }
533
534 /// Returns the metadata for a specific thread, if it exists.
535 pub fn entry(&self, thread_id: ThreadId) -> Option<&ThreadMetadata> {
536 self.threads.get(&thread_id)
537 }
538
539 /// Returns the metadata for a thread identified by its ACP session ID.
540 pub fn entry_by_session(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
541 let thread_id = self.threads_by_session.get(session_id)?;
542 self.threads.get(thread_id)
543 }
544
545 /// Returns all threads.
546 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
547 self.threads.values()
548 }
549
550 /// Returns all archived threads.
551 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
552 self.entries().filter(|t| t.archived)
553 }
554
555 /// Returns all threads for the given path list and remote connection,
556 /// excluding archived threads.
557 ///
558 /// When `remote_connection` is `Some`, only threads whose persisted
559 /// `remote_connection` matches by normalized identity are returned.
560 /// When `None`, only local (non-remote) threads are returned.
561 pub fn entries_for_path<'a>(
562 &'a self,
563 path_list: &PathList,
564 remote_connection: Option<&'a RemoteConnectionOptions>,
565 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
566 self.threads_by_paths
567 .get(path_list)
568 .into_iter()
569 .flatten()
570 .filter_map(|s| self.threads.get(s))
571 .filter(|s| !s.archived)
572 .filter(move |s| {
573 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
574 })
575 }
576
577 /// Returns threads whose `main_worktree_paths` matches the given path list
578 /// and remote connection, excluding archived threads. This finds threads
579 /// that were opened in a linked worktree but are associated with the given
580 /// main worktree.
581 ///
582 /// When `remote_connection` is `Some`, only threads whose persisted
583 /// `remote_connection` matches by normalized identity are returned.
584 /// When `None`, only local (non-remote) threads are returned.
585 pub fn entries_for_main_worktree_path<'a>(
586 &'a self,
587 path_list: &PathList,
588 remote_connection: Option<&'a RemoteConnectionOptions>,
589 ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
590 self.threads_by_main_paths
591 .get(path_list)
592 .into_iter()
593 .flatten()
594 .filter_map(|s| self.threads.get(s))
595 .filter(|s| !s.archived)
596 .filter(move |s| {
597 same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
598 })
599 }
600
601 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
602 let db = self.db.clone();
603 self.reload_task.take();
604
605 let list_task = cx
606 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
607
608 let reload_task = cx
609 .spawn(async move |this, cx| {
610 let Some(rows) = list_task.await.log_err() else {
611 return;
612 };
613
614 this.update(cx, |this, cx| {
615 this.threads.clear();
616 this.threads_by_paths.clear();
617 this.threads_by_main_paths.clear();
618 this.threads_by_session.clear();
619
620 for row in rows {
621 if let Some(sid) = &row.session_id {
622 this.threads_by_session.insert(sid.clone(), row.thread_id);
623 }
624 this.threads_by_paths
625 .entry(row.folder_paths().clone())
626 .or_default()
627 .insert(row.thread_id);
628 if !row.main_worktree_paths().is_empty() {
629 this.threads_by_main_paths
630 .entry(row.main_worktree_paths().clone())
631 .or_default()
632 .insert(row.thread_id);
633 }
634 this.threads.insert(row.thread_id, row);
635 }
636
637 cx.notify();
638 })
639 .ok();
640 })
641 .shared();
642 self.reload_task = Some(reload_task.clone());
643 reload_task
644 }
645
646 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
647 for metadata in metadata {
648 self.save_internal(metadata);
649 }
650 cx.notify();
651 }
652
653 pub fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
654 self.save_internal(metadata);
655 cx.notify();
656 }
657
658 fn save_internal(&mut self, metadata: ThreadMetadata) {
659 if let Some(thread) = self.threads.get(&metadata.thread_id) {
660 if thread.folder_paths() != metadata.folder_paths() {
661 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
662 thread_ids.remove(&metadata.thread_id);
663 }
664 }
665 if thread.main_worktree_paths() != metadata.main_worktree_paths()
666 && !thread.main_worktree_paths().is_empty()
667 {
668 if let Some(thread_ids) = self
669 .threads_by_main_paths
670 .get_mut(thread.main_worktree_paths())
671 {
672 thread_ids.remove(&metadata.thread_id);
673 }
674 }
675 }
676
677 if let Some(sid) = &metadata.session_id {
678 self.threads_by_session
679 .insert(sid.clone(), metadata.thread_id);
680 }
681
682 self.threads.insert(metadata.thread_id, metadata.clone());
683
684 self.threads_by_paths
685 .entry(metadata.folder_paths().clone())
686 .or_default()
687 .insert(metadata.thread_id);
688
689 if !metadata.main_worktree_paths().is_empty() {
690 self.threads_by_main_paths
691 .entry(metadata.main_worktree_paths().clone())
692 .or_default()
693 .insert(metadata.thread_id);
694 }
695
696 self.pending_thread_ops_tx
697 .try_send(DbOperation::Upsert(metadata))
698 .log_err();
699 }
700
701 pub fn update_working_directories(
702 &mut self,
703 thread_id: ThreadId,
704 work_dirs: PathList,
705 cx: &mut Context<Self>,
706 ) {
707 if let Some(thread) = self.threads.get(&thread_id) {
708 debug_assert!(
709 !thread.archived,
710 "update_working_directories called on archived thread"
711 );
712 self.save_internal(ThreadMetadata {
713 worktree_paths: WorktreePaths::from_path_lists(
714 thread.main_worktree_paths().clone(),
715 work_dirs.clone(),
716 )
717 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&work_dirs)),
718 ..thread.clone()
719 });
720 cx.notify();
721 }
722 }
723
724 pub fn update_worktree_paths(
725 &mut self,
726 thread_ids: &[ThreadId],
727 worktree_paths: WorktreePaths,
728 cx: &mut Context<Self>,
729 ) {
730 let mut changed = false;
731 for &thread_id in thread_ids {
732 let Some(thread) = self.threads.get(&thread_id) else {
733 continue;
734 };
735 if thread.worktree_paths == worktree_paths {
736 continue;
737 }
738 // Don't overwrite paths for archived threads — the
739 // project may no longer include the worktree that was
740 // removed during the archive flow.
741 if thread.archived {
742 continue;
743 }
744 self.save_internal(ThreadMetadata {
745 worktree_paths: worktree_paths.clone(),
746 ..thread.clone()
747 });
748 changed = true;
749 }
750 if changed {
751 cx.notify();
752 }
753 }
754
755 pub fn archive(
756 &mut self,
757 thread_id: ThreadId,
758 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
759 cx: &mut Context<Self>,
760 ) {
761 self.update_archived(thread_id, true, cx);
762
763 if let Some(job) = archive_job {
764 self.in_flight_archives.insert(thread_id, job);
765 }
766 }
767
768 pub fn unarchive(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
769 self.update_archived(thread_id, false, cx);
770 // Dropping the Sender triggers cancellation in the background task.
771 self.in_flight_archives.remove(&thread_id);
772 }
773
774 pub fn cleanup_completed_archive(&mut self, thread_id: ThreadId) {
775 self.in_flight_archives.remove(&thread_id);
776 }
777
778 /// Returns `true` if any unarchived thread other than `current_session_id`
779 /// references `path` in its folder paths. Used to determine whether a
780 /// worktree can safely be removed from disk.
781 pub fn path_is_referenced_by_other_unarchived_threads(
782 &self,
783 thread_id: ThreadId,
784 path: &Path,
785 remote_connection: Option<&RemoteConnectionOptions>,
786 ) -> bool {
787 self.entries().any(|thread| {
788 thread.thread_id != thread_id
789 && !thread.archived
790 && same_remote_connection_identity(
791 thread.remote_connection.as_ref(),
792 remote_connection,
793 )
794 && thread
795 .folder_paths()
796 .paths()
797 .iter()
798 .any(|other_path| other_path.as_path() == path)
799 })
800 }
801
802 /// Updates a thread's `folder_paths` after an archived worktree has been
803 /// restored to disk. The restored worktree may land at a different path
804 /// than it had before archival, so each `(old_path, new_path)` pair in
805 /// `path_replacements` is applied to the thread's stored folder paths.
806 pub fn update_restored_worktree_paths(
807 &mut self,
808 thread_id: ThreadId,
809 path_replacements: &[(PathBuf, PathBuf)],
810 cx: &mut Context<Self>,
811 ) {
812 if let Some(thread) = self.threads.get(&thread_id).cloned() {
813 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
814 for (old_path, new_path) in path_replacements {
815 if let Some(pos) = paths.iter().position(|p| p == old_path) {
816 paths[pos] = new_path.clone();
817 }
818 }
819 let new_folder_paths = PathList::new(&paths);
820 self.save_internal(ThreadMetadata {
821 worktree_paths: WorktreePaths::from_path_lists(
822 thread.main_worktree_paths().clone(),
823 new_folder_paths.clone(),
824 )
825 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
826 ..thread
827 });
828 cx.notify();
829 }
830 }
831
832 pub fn complete_worktree_restore(
833 &mut self,
834 thread_id: ThreadId,
835 path_replacements: &[(PathBuf, PathBuf)],
836 cx: &mut Context<Self>,
837 ) {
838 if let Some(thread) = self.threads.get(&thread_id).cloned() {
839 let mut paths: Vec<PathBuf> = thread.folder_paths().paths().to_vec();
840 for (old_path, new_path) in path_replacements {
841 for path in &mut paths {
842 if path == old_path {
843 *path = new_path.clone();
844 }
845 }
846 }
847 let new_folder_paths = PathList::new(&paths);
848 self.save_internal(ThreadMetadata {
849 worktree_paths: WorktreePaths::from_path_lists(
850 thread.main_worktree_paths().clone(),
851 new_folder_paths.clone(),
852 )
853 .unwrap_or_else(|_| WorktreePaths::from_folder_paths(&new_folder_paths)),
854 ..thread
855 });
856 cx.notify();
857 }
858 }
859
860 /// Apply a mutation to the worktree paths of all threads whose current
861 /// `folder_paths` matches `current_folder_paths`, then re-index.
862 /// When `remote_connection` is provided, only threads with a matching
863 /// remote connection are affected.
864 pub fn change_worktree_paths(
865 &mut self,
866 current_folder_paths: &PathList,
867 remote_connection: Option<&RemoteConnectionOptions>,
868 mutate: impl Fn(&mut WorktreePaths),
869 cx: &mut Context<Self>,
870 ) {
871 let thread_ids: Vec<_> = self
872 .threads_by_paths
873 .get(current_folder_paths)
874 .into_iter()
875 .flatten()
876 .filter(|id| {
877 self.threads.get(id).is_some_and(|t| {
878 !t.archived
879 && same_remote_connection_identity(
880 t.remote_connection.as_ref(),
881 remote_connection,
882 )
883 })
884 })
885 .copied()
886 .collect();
887
888 self.mutate_thread_paths(&thread_ids, mutate, cx);
889 }
890
891 fn mutate_thread_paths(
892 &mut self,
893 thread_ids: &[ThreadId],
894 mutate: impl Fn(&mut WorktreePaths),
895 cx: &mut Context<Self>,
896 ) {
897 if thread_ids.is_empty() {
898 return;
899 }
900
901 for thread_id in thread_ids {
902 if let Some(thread) = self.threads.get_mut(thread_id) {
903 if let Some(ids) = self
904 .threads_by_main_paths
905 .get_mut(thread.main_worktree_paths())
906 {
907 ids.remove(thread_id);
908 }
909 if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
910 ids.remove(thread_id);
911 }
912
913 mutate(&mut thread.worktree_paths);
914
915 self.threads_by_main_paths
916 .entry(thread.main_worktree_paths().clone())
917 .or_default()
918 .insert(*thread_id);
919 self.threads_by_paths
920 .entry(thread.folder_paths().clone())
921 .or_default()
922 .insert(*thread_id);
923
924 self.pending_thread_ops_tx
925 .try_send(DbOperation::Upsert(thread.clone()))
926 .log_err();
927 }
928 }
929
930 cx.notify();
931 }
932
933 pub fn create_archived_worktree(
934 &self,
935 worktree_path: String,
936 main_repo_path: String,
937 branch_name: Option<String>,
938 staged_commit_hash: String,
939 unstaged_commit_hash: String,
940 original_commit_hash: String,
941 cx: &App,
942 ) -> Task<anyhow::Result<i64>> {
943 let db = self.db.clone();
944 cx.background_spawn(async move {
945 db.create_archived_worktree(
946 worktree_path,
947 main_repo_path,
948 branch_name,
949 staged_commit_hash,
950 unstaged_commit_hash,
951 original_commit_hash,
952 )
953 .await
954 })
955 }
956
957 pub fn link_thread_to_archived_worktree(
958 &self,
959 thread_id: ThreadId,
960 archived_worktree_id: i64,
961 cx: &App,
962 ) -> Task<anyhow::Result<()>> {
963 let db = self.db.clone();
964 cx.background_spawn(async move {
965 db.link_thread_to_archived_worktree(thread_id, archived_worktree_id)
966 .await
967 })
968 }
969
970 pub fn get_archived_worktrees_for_thread(
971 &self,
972 thread_id: ThreadId,
973 cx: &App,
974 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
975 let db = self.db.clone();
976 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(thread_id).await })
977 }
978
979 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
980 let db = self.db.clone();
981 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
982 }
983
984 pub fn unlink_thread_from_all_archived_worktrees(
985 &self,
986 thread_id: ThreadId,
987 cx: &App,
988 ) -> Task<anyhow::Result<()>> {
989 let db = self.db.clone();
990 cx.background_spawn(async move {
991 db.unlink_thread_from_all_archived_worktrees(thread_id)
992 .await
993 })
994 }
995
996 pub fn is_archived_worktree_referenced(
997 &self,
998 archived_worktree_id: i64,
999 cx: &App,
1000 ) -> Task<anyhow::Result<bool>> {
1001 let db = self.db.clone();
1002 cx.background_spawn(async move {
1003 db.is_archived_worktree_referenced(archived_worktree_id)
1004 .await
1005 })
1006 }
1007
1008 pub fn get_all_archived_branch_names(
1009 &self,
1010 cx: &App,
1011 ) -> Task<anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>>> {
1012 let db = self.db.clone();
1013 cx.background_spawn(async move { db.get_all_archived_branch_names() })
1014 }
1015
1016 fn update_archived(&mut self, thread_id: ThreadId, archived: bool, cx: &mut Context<Self>) {
1017 if let Some(thread) = self.threads.get(&thread_id) {
1018 self.save_internal(ThreadMetadata {
1019 archived,
1020 ..thread.clone()
1021 });
1022 cx.notify();
1023 }
1024 }
1025
1026 pub fn delete(&mut self, thread_id: ThreadId, cx: &mut Context<Self>) {
1027 if let Some(thread) = self.threads.get(&thread_id) {
1028 if let Some(sid) = &thread.session_id {
1029 self.threads_by_session.remove(sid);
1030 }
1031 if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
1032 thread_ids.remove(&thread_id);
1033 }
1034 if !thread.main_worktree_paths().is_empty() {
1035 if let Some(thread_ids) = self
1036 .threads_by_main_paths
1037 .get_mut(thread.main_worktree_paths())
1038 {
1039 thread_ids.remove(&thread_id);
1040 }
1041 }
1042 }
1043 self.threads.remove(&thread_id);
1044 self.pending_thread_ops_tx
1045 .try_send(DbOperation::Delete(thread_id))
1046 .log_err();
1047 cx.notify();
1048 }
1049
1050 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
1051 let weak_store = cx.weak_entity();
1052
1053 cx.observe_new::<crate::ConversationView>(move |_view, _window, cx| {
1054 let view_entity = cx.entity();
1055 let entity_id = view_entity.entity_id();
1056
1057 cx.on_release({
1058 let weak_store = weak_store.clone();
1059 move |_view, cx| {
1060 weak_store
1061 .update(cx, |store, _cx| {
1062 store.conversation_subscriptions.remove(&entity_id);
1063 })
1064 .ok();
1065 }
1066 })
1067 .detach();
1068
1069 weak_store
1070 .update(cx, |this, cx| {
1071 let subscription = cx.subscribe(&view_entity, Self::handle_conversation_event);
1072 this.conversation_subscriptions
1073 .insert(entity_id, subscription);
1074 })
1075 .ok();
1076 })
1077 .detach();
1078
1079 let (tx, rx) = smol::channel::unbounded();
1080 let _db_operations_task = cx.background_spawn({
1081 let db = db.clone();
1082 async move {
1083 while let Ok(first_update) = rx.recv().await {
1084 let mut updates = vec![first_update];
1085 while let Ok(update) = rx.try_recv() {
1086 updates.push(update);
1087 }
1088 let updates = Self::dedup_db_operations(updates);
1089 for operation in updates {
1090 match operation {
1091 DbOperation::Upsert(metadata) => {
1092 db.save(metadata).await.log_err();
1093 }
1094 DbOperation::Delete(thread_id) => {
1095 db.delete(thread_id).await.log_err();
1096 }
1097 }
1098 }
1099 }
1100 }
1101 });
1102
1103 let mut this = Self {
1104 db,
1105 threads: HashMap::default(),
1106 threads_by_paths: HashMap::default(),
1107 threads_by_main_paths: HashMap::default(),
1108 threads_by_session: HashMap::default(),
1109 reload_task: None,
1110 conversation_subscriptions: HashMap::default(),
1111 pending_thread_ops_tx: tx,
1112 in_flight_archives: HashMap::default(),
1113 _db_operations_task,
1114 };
1115 let _ = this.reload(cx);
1116 this
1117 }
1118
1119 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
1120 let mut ops = HashMap::default();
1121 for operation in operations.into_iter().rev() {
1122 if ops.contains_key(&operation.id()) {
1123 continue;
1124 }
1125 ops.insert(operation.id(), operation);
1126 }
1127 ops.into_values().collect()
1128 }
1129
1130 fn handle_conversation_event(
1131 &mut self,
1132 conversation_view: Entity<crate::ConversationView>,
1133 _event: &crate::conversation_view::RootThreadUpdated,
1134 cx: &mut Context<Self>,
1135 ) {
1136 let view = conversation_view.read(cx);
1137 let thread_id = view.thread_id;
1138 let Some(thread) = view.root_acp_thread(cx) else {
1139 return;
1140 };
1141
1142 let thread_ref = thread.read(cx);
1143 if thread_ref.entries().is_empty() {
1144 return;
1145 }
1146
1147 let existing_thread = self.entry(thread_id);
1148 let session_id = Some(thread_ref.session_id().clone());
1149 let title = thread_ref.title();
1150
1151 let updated_at = Utc::now();
1152
1153 let created_at = existing_thread
1154 .and_then(|t| t.created_at)
1155 .unwrap_or_else(|| updated_at);
1156
1157 let agent_id = thread_ref.connection().agent_id();
1158
1159 // Preserve project-dependent fields for archived threads.
1160 // The worktree may already have been removed from the
1161 // project as part of the archive flow, so re-evaluating
1162 // these from the current project state would yield
1163 // empty/incorrect results.
1164 let (worktree_paths, remote_connection) =
1165 if let Some(existing) = existing_thread.filter(|t| t.archived) {
1166 (
1167 existing.worktree_paths.clone(),
1168 existing.remote_connection.clone(),
1169 )
1170 } else {
1171 let project = thread_ref.project().read(cx);
1172 let worktree_paths = project.worktree_paths(cx);
1173 let remote_connection = project.remote_connection_options(cx);
1174
1175 (worktree_paths, remote_connection)
1176 };
1177
1178 // Threads without a folder path (e.g. started in an empty
1179 // window) are archived by default so they don't get lost,
1180 // because they won't show up in the sidebar. Users can reload
1181 // them from the archive.
1182 let archived = existing_thread
1183 .map(|t| t.archived)
1184 .unwrap_or(worktree_paths.is_empty());
1185
1186 let metadata = ThreadMetadata {
1187 thread_id,
1188 session_id,
1189 agent_id,
1190 title,
1191 created_at: Some(created_at),
1192 updated_at,
1193 worktree_paths,
1194 remote_connection,
1195 archived,
1196 };
1197
1198 self.save(metadata, cx);
1199 }
1200}
1201
1202impl Global for ThreadMetadataStore {}
1203
1204struct ThreadMetadataDb(ThreadSafeConnection);
1205
1206impl Domain for ThreadMetadataDb {
1207 const NAME: &str = stringify!(ThreadMetadataDb);
1208
1209 const MIGRATIONS: &[&str] = &[
1210 sql!(
1211 CREATE TABLE IF NOT EXISTS sidebar_threads(
1212 session_id TEXT PRIMARY KEY,
1213 agent_id TEXT,
1214 title TEXT NOT NULL,
1215 updated_at TEXT NOT NULL,
1216 created_at TEXT,
1217 folder_paths TEXT,
1218 folder_paths_order TEXT
1219 ) STRICT;
1220 ),
1221 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
1222 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
1223 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
1224 sql!(
1225 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
1226 id INTEGER PRIMARY KEY,
1227 worktree_path TEXT NOT NULL,
1228 main_repo_path TEXT NOT NULL,
1229 branch_name TEXT,
1230 staged_commit_hash TEXT,
1231 unstaged_commit_hash TEXT,
1232 original_commit_hash TEXT
1233 ) STRICT;
1234
1235 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
1236 session_id TEXT NOT NULL,
1237 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1238 PRIMARY KEY (session_id, archived_worktree_id)
1239 ) STRICT;
1240 ),
1241 sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT),
1242 sql!(ALTER TABLE sidebar_threads ADD COLUMN thread_id BLOB),
1243 sql!(
1244 UPDATE sidebar_threads SET thread_id = randomblob(16) WHERE thread_id IS NULL;
1245
1246 CREATE TABLE thread_archived_worktrees_v2(
1247 thread_id BLOB NOT NULL,
1248 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
1249 PRIMARY KEY (thread_id, archived_worktree_id)
1250 ) STRICT;
1251
1252 INSERT INTO thread_archived_worktrees_v2(thread_id, archived_worktree_id)
1253 SELECT s.thread_id, t.archived_worktree_id
1254 FROM thread_archived_worktrees t
1255 JOIN sidebar_threads s ON s.session_id = t.session_id;
1256
1257 DROP TABLE thread_archived_worktrees;
1258 ALTER TABLE thread_archived_worktrees_v2 RENAME TO thread_archived_worktrees;
1259
1260 CREATE TABLE sidebar_threads_v2(
1261 thread_id BLOB PRIMARY KEY,
1262 session_id TEXT,
1263 agent_id TEXT,
1264 title TEXT NOT NULL,
1265 updated_at TEXT NOT NULL,
1266 created_at TEXT,
1267 folder_paths TEXT,
1268 folder_paths_order TEXT,
1269 archived INTEGER DEFAULT 0,
1270 main_worktree_paths TEXT,
1271 main_worktree_paths_order TEXT,
1272 remote_connection TEXT
1273 ) STRICT;
1274
1275 INSERT INTO sidebar_threads_v2(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection)
1276 SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection
1277 FROM sidebar_threads;
1278
1279 DROP TABLE sidebar_threads;
1280 ALTER TABLE sidebar_threads_v2 RENAME TO sidebar_threads;
1281 ),
1282 ];
1283}
1284
1285db::static_connection!(ThreadMetadataDb, []);
1286
1287impl ThreadMetadataDb {
1288 #[allow(dead_code)]
1289 pub fn list_ids(&self) -> anyhow::Result<Vec<ThreadId>> {
1290 self.select::<ThreadId>(
1291 "SELECT thread_id FROM sidebar_threads \
1292 ORDER BY updated_at DESC",
1293 )?()
1294 }
1295
1296 /// List all sidebar thread metadata, ordered by updated_at descending.
1297 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
1298 self.select::<ThreadMetadata>(
1299 "SELECT thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection \
1300 FROM sidebar_threads \
1301 ORDER BY updated_at DESC"
1302 )?()
1303 }
1304
1305 /// Upsert metadata for a thread.
1306 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
1307 let session_id = row.session_id.as_ref().map(|s| s.0.clone());
1308 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
1309 None
1310 } else {
1311 Some(row.agent_id.to_string())
1312 };
1313 let title = row
1314 .title
1315 .as_ref()
1316 .map(|t| t.to_string())
1317 .unwrap_or_default();
1318 let updated_at = row.updated_at.to_rfc3339();
1319 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
1320 let serialized = row.folder_paths().serialize();
1321 let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
1322 (None, None)
1323 } else {
1324 (Some(serialized.paths), Some(serialized.order))
1325 };
1326 let main_serialized = row.main_worktree_paths().serialize();
1327 let (main_worktree_paths, main_worktree_paths_order) =
1328 if row.main_worktree_paths().is_empty() {
1329 (None, None)
1330 } else {
1331 (Some(main_serialized.paths), Some(main_serialized.order))
1332 };
1333 let remote_connection = row
1334 .remote_connection
1335 .as_ref()
1336 .map(serde_json::to_string)
1337 .transpose()
1338 .context("serialize thread metadata remote connection")?;
1339 let thread_id = row.thread_id;
1340 let archived = row.archived;
1341
1342 self.write(move |conn| {
1343 let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
1344 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12) \
1345 ON CONFLICT(thread_id) DO UPDATE SET \
1346 session_id = excluded.session_id, \
1347 agent_id = excluded.agent_id, \
1348 title = excluded.title, \
1349 updated_at = excluded.updated_at, \
1350 created_at = excluded.created_at, \
1351 folder_paths = excluded.folder_paths, \
1352 folder_paths_order = excluded.folder_paths_order, \
1353 archived = excluded.archived, \
1354 main_worktree_paths = excluded.main_worktree_paths, \
1355 main_worktree_paths_order = excluded.main_worktree_paths_order, \
1356 remote_connection = excluded.remote_connection";
1357 let mut stmt = Statement::prepare(conn, sql)?;
1358 let mut i = stmt.bind(&thread_id, 1)?;
1359 i = stmt.bind(&session_id, i)?;
1360 i = stmt.bind(&agent_id, i)?;
1361 i = stmt.bind(&title, i)?;
1362 i = stmt.bind(&updated_at, i)?;
1363 i = stmt.bind(&created_at, i)?;
1364 i = stmt.bind(&folder_paths, i)?;
1365 i = stmt.bind(&folder_paths_order, i)?;
1366 i = stmt.bind(&archived, i)?;
1367 i = stmt.bind(&main_worktree_paths, i)?;
1368 i = stmt.bind(&main_worktree_paths_order, i)?;
1369 stmt.bind(&remote_connection, i)?;
1370 stmt.exec()
1371 })
1372 .await
1373 }
1374
1375 /// Delete metadata for a single thread.
1376 pub async fn delete(&self, thread_id: ThreadId) -> anyhow::Result<()> {
1377 self.write(move |conn| {
1378 let mut stmt =
1379 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE thread_id = ?")?;
1380 stmt.bind(&thread_id, 1)?;
1381 stmt.exec()
1382 })
1383 .await
1384 }
1385
1386 pub async fn create_archived_worktree(
1387 &self,
1388 worktree_path: String,
1389 main_repo_path: String,
1390 branch_name: Option<String>,
1391 staged_commit_hash: String,
1392 unstaged_commit_hash: String,
1393 original_commit_hash: String,
1394 ) -> anyhow::Result<i64> {
1395 self.write(move |conn| {
1396 let mut stmt = Statement::prepare(
1397 conn,
1398 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
1399 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
1400 RETURNING id",
1401 )?;
1402 let mut i = stmt.bind(&worktree_path, 1)?;
1403 i = stmt.bind(&main_repo_path, i)?;
1404 i = stmt.bind(&branch_name, i)?;
1405 i = stmt.bind(&staged_commit_hash, i)?;
1406 i = stmt.bind(&unstaged_commit_hash, i)?;
1407 stmt.bind(&original_commit_hash, i)?;
1408 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
1409 })
1410 .await
1411 }
1412
1413 pub async fn link_thread_to_archived_worktree(
1414 &self,
1415 thread_id: ThreadId,
1416 archived_worktree_id: i64,
1417 ) -> anyhow::Result<()> {
1418 self.write(move |conn| {
1419 let mut stmt = Statement::prepare(
1420 conn,
1421 "INSERT INTO thread_archived_worktrees(thread_id, archived_worktree_id) \
1422 VALUES (?1, ?2)",
1423 )?;
1424 let i = stmt.bind(&thread_id, 1)?;
1425 stmt.bind(&archived_worktree_id, i)?;
1426 stmt.exec()
1427 })
1428 .await
1429 }
1430
1431 pub async fn get_archived_worktrees_for_thread(
1432 &self,
1433 thread_id: ThreadId,
1434 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
1435 self.select_bound::<ThreadId, ArchivedGitWorktree>(
1436 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
1437 FROM archived_git_worktrees a \
1438 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
1439 WHERE t.thread_id = ?1",
1440 )?(thread_id)
1441 }
1442
1443 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
1444 self.write(move |conn| {
1445 let mut stmt = Statement::prepare(
1446 conn,
1447 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
1448 )?;
1449 stmt.bind(&id, 1)?;
1450 stmt.exec()?;
1451
1452 let mut stmt =
1453 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
1454 stmt.bind(&id, 1)?;
1455 stmt.exec()
1456 })
1457 .await
1458 }
1459
1460 pub async fn unlink_thread_from_all_archived_worktrees(
1461 &self,
1462 thread_id: ThreadId,
1463 ) -> anyhow::Result<()> {
1464 self.write(move |conn| {
1465 let mut stmt = Statement::prepare(
1466 conn,
1467 "DELETE FROM thread_archived_worktrees WHERE thread_id = ?",
1468 )?;
1469 stmt.bind(&thread_id, 1)?;
1470 stmt.exec()
1471 })
1472 .await
1473 }
1474
1475 pub async fn is_archived_worktree_referenced(
1476 &self,
1477 archived_worktree_id: i64,
1478 ) -> anyhow::Result<bool> {
1479 self.select_row_bound::<i64, i64>(
1480 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1481 )?(archived_worktree_id)
1482 .map(|count| count.unwrap_or(0) > 0)
1483 }
1484
1485 pub fn get_all_archived_branch_names(
1486 &self,
1487 ) -> anyhow::Result<HashMap<ThreadId, HashMap<PathBuf, String>>> {
1488 let rows = self.select::<(ThreadId, String, String)>(
1489 "SELECT t.thread_id, a.worktree_path, a.branch_name \
1490 FROM thread_archived_worktrees t \
1491 JOIN archived_git_worktrees a ON a.id = t.archived_worktree_id \
1492 WHERE a.branch_name IS NOT NULL \
1493 ORDER BY a.id ASC",
1494 )?()?;
1495
1496 let mut result: HashMap<ThreadId, HashMap<PathBuf, String>> = HashMap::default();
1497 for (thread_id, worktree_path, branch_name) in rows {
1498 result
1499 .entry(thread_id)
1500 .or_default()
1501 .insert(PathBuf::from(worktree_path), branch_name);
1502 }
1503 Ok(result)
1504 }
1505}
1506
1507impl Column for ThreadMetadata {
1508 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1509 let (thread_id_uuid, next): (uuid::Uuid, i32) = Column::column(statement, start_index)?;
1510 let (id, next): (Option<Arc<str>>, i32) = Column::column(statement, next)?;
1511 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1512 let (title, next): (String, i32) = Column::column(statement, next)?;
1513 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1514 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1515 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1516 let (folder_paths_order_str, next): (Option<String>, i32) =
1517 Column::column(statement, next)?;
1518 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1519 let (main_worktree_paths_str, next): (Option<String>, i32) =
1520 Column::column(statement, next)?;
1521 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1522 Column::column(statement, next)?;
1523 let (remote_connection_json, next): (Option<String>, i32) =
1524 Column::column(statement, next)?;
1525
1526 let agent_id = agent_id
1527 .map(|id| AgentId::new(id))
1528 .unwrap_or(ZED_AGENT_ID.clone());
1529
1530 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1531 let created_at = created_at_str
1532 .as_deref()
1533 .map(DateTime::parse_from_rfc3339)
1534 .transpose()?
1535 .map(|dt| dt.with_timezone(&Utc));
1536
1537 let folder_paths = folder_paths_str
1538 .map(|paths| {
1539 PathList::deserialize(&util::path_list::SerializedPathList {
1540 paths,
1541 order: folder_paths_order_str.unwrap_or_default(),
1542 })
1543 })
1544 .unwrap_or_default();
1545
1546 let main_worktree_paths = main_worktree_paths_str
1547 .map(|paths| {
1548 PathList::deserialize(&util::path_list::SerializedPathList {
1549 paths,
1550 order: main_worktree_paths_order_str.unwrap_or_default(),
1551 })
1552 })
1553 .unwrap_or_default();
1554
1555 let remote_connection = remote_connection_json
1556 .as_deref()
1557 .map(serde_json::from_str::<RemoteConnectionOptions>)
1558 .transpose()
1559 .context("deserialize thread metadata remote connection")?;
1560
1561 let worktree_paths = WorktreePaths::from_path_lists(main_worktree_paths, folder_paths)
1562 .unwrap_or_else(|_| WorktreePaths::default());
1563
1564 let thread_id = ThreadId(thread_id_uuid);
1565
1566 Ok((
1567 ThreadMetadata {
1568 thread_id,
1569 session_id: id.map(acp::SessionId::new),
1570 agent_id,
1571 title: if title.is_empty() || title == DEFAULT_THREAD_TITLE {
1572 None
1573 } else {
1574 Some(title.into())
1575 },
1576 updated_at,
1577 created_at,
1578 worktree_paths,
1579 remote_connection,
1580 archived,
1581 },
1582 next,
1583 ))
1584 }
1585}
1586
1587impl Column for ArchivedGitWorktree {
1588 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1589 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1590 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1591 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1592 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1593 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1594 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1595 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1596
1597 Ok((
1598 ArchivedGitWorktree {
1599 id,
1600 worktree_path: PathBuf::from(worktree_path_str),
1601 main_repo_path: PathBuf::from(main_repo_path_str),
1602 branch_name,
1603 staged_commit_hash,
1604 unstaged_commit_hash,
1605 original_commit_hash,
1606 },
1607 next,
1608 ))
1609 }
1610}
1611
1612#[cfg(test)]
1613mod tests {
1614 use super::*;
1615 use acp_thread::StubAgentConnection;
1616 use action_log::ActionLog;
1617 use agent::DbThread;
1618 use agent_client_protocol as acp;
1619
1620 use gpui::{TestAppContext, VisualTestContext};
1621 use project::FakeFs;
1622 use project::Project;
1623 use remote::WslConnectionOptions;
1624 use std::path::Path;
1625 use std::rc::Rc;
1626 use workspace::MultiWorkspace;
1627
1628 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1629 DbThread {
1630 title: title.to_string().into(),
1631 messages: Vec::new(),
1632 updated_at,
1633 detailed_summary: None,
1634 initial_project_snapshot: None,
1635 cumulative_token_usage: Default::default(),
1636 request_token_usage: Default::default(),
1637 model: None,
1638 profile: None,
1639 imported: false,
1640 subagent_context: None,
1641 speed: None,
1642 thinking_enabled: false,
1643 thinking_effort: None,
1644 draft_prompt: None,
1645 ui_scroll_position: None,
1646 }
1647 }
1648
1649 fn make_metadata(
1650 session_id: &str,
1651 title: &str,
1652 updated_at: DateTime<Utc>,
1653 folder_paths: PathList,
1654 ) -> ThreadMetadata {
1655 ThreadMetadata {
1656 thread_id: ThreadId::new(),
1657 archived: false,
1658 session_id: Some(acp::SessionId::new(session_id)),
1659 agent_id: agent::ZED_AGENT_ID.clone(),
1660 title: if title.is_empty() {
1661 None
1662 } else {
1663 Some(title.to_string().into())
1664 },
1665 updated_at,
1666 created_at: Some(updated_at),
1667 worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
1668 remote_connection: None,
1669 }
1670 }
1671
1672 fn init_test(cx: &mut TestAppContext) {
1673 let fs = FakeFs::new(cx.executor());
1674 cx.update(|cx| {
1675 let settings_store = settings::SettingsStore::test(cx);
1676 cx.set_global(settings_store);
1677 theme_settings::init(theme::LoadThemes::JustBase, cx);
1678 editor::init(cx);
1679 release_channel::init("0.0.0".parse().unwrap(), cx);
1680 prompt_store::init(cx);
1681 <dyn Fs>::set_global(fs, cx);
1682 ThreadMetadataStore::init_global(cx);
1683 ThreadStore::init_global(cx);
1684 language_model::LanguageModelRegistry::test(cx);
1685 });
1686 cx.run_until_parked();
1687 }
1688
1689 fn setup_panel_with_project(
1690 project: Entity<Project>,
1691 cx: &mut TestAppContext,
1692 ) -> (Entity<crate::AgentPanel>, VisualTestContext) {
1693 let multi_workspace =
1694 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1695 let workspace_entity = multi_workspace
1696 .read_with(cx, |mw, _cx| mw.workspace().clone())
1697 .unwrap();
1698 let mut vcx = VisualTestContext::from_window(multi_workspace.into(), cx);
1699 let panel = workspace_entity.update_in(&mut vcx, |workspace, window, cx| {
1700 cx.new(|cx| crate::AgentPanel::new(workspace, None, window, cx))
1701 });
1702 (panel, vcx)
1703 }
1704
1705 fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) {
1706 let kvp = cx.update(|cx| KeyValueStore::global(cx));
1707 smol::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string()))
1708 .unwrap();
1709 }
1710
1711 fn run_thread_metadata_migrations(cx: &mut TestAppContext) {
1712 clear_thread_metadata_remote_connection_backfill(cx);
1713 cx.update(|cx| {
1714 let migration_task = migrate_thread_metadata(cx);
1715 migrate_thread_remote_connections(cx, migration_task);
1716 });
1717 cx.run_until_parked();
1718 }
1719
1720 #[gpui::test]
1721 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1722 let first_paths = PathList::new(&[Path::new("/project-a")]);
1723 let second_paths = PathList::new(&[Path::new("/project-b")]);
1724 let now = Utc::now();
1725 let older = now - chrono::Duration::seconds(1);
1726
1727 let thread = std::thread::current();
1728 let test_name = thread.name().unwrap_or("unknown_test");
1729 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1730 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1731 &db_name,
1732 )));
1733
1734 db.save(make_metadata(
1735 "session-1",
1736 "First Thread",
1737 now,
1738 first_paths.clone(),
1739 ))
1740 .await
1741 .unwrap();
1742 db.save(make_metadata(
1743 "session-2",
1744 "Second Thread",
1745 older,
1746 second_paths.clone(),
1747 ))
1748 .await
1749 .unwrap();
1750
1751 cx.update(|cx| {
1752 let settings_store = settings::SettingsStore::test(cx);
1753 cx.set_global(settings_store);
1754 ThreadMetadataStore::init_global(cx);
1755 });
1756
1757 cx.run_until_parked();
1758
1759 cx.update(|cx| {
1760 let store = ThreadMetadataStore::global(cx);
1761 let store = store.read(cx);
1762
1763 assert_eq!(store.entry_ids().count(), 2);
1764 assert!(
1765 store
1766 .entry_by_session(&acp::SessionId::new("session-1"))
1767 .is_some()
1768 );
1769 assert!(
1770 store
1771 .entry_by_session(&acp::SessionId::new("session-2"))
1772 .is_some()
1773 );
1774
1775 let first_path_entries: Vec<_> = store
1776 .entries_for_path(&first_paths, None)
1777 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1778 .collect();
1779 assert_eq!(first_path_entries, vec!["session-1"]);
1780
1781 let second_path_entries: Vec<_> = store
1782 .entries_for_path(&second_paths, None)
1783 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1784 .collect();
1785 assert_eq!(second_path_entries, vec!["session-2"]);
1786 });
1787 }
1788
1789 #[gpui::test]
1790 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1791 init_test(cx);
1792
1793 let first_paths = PathList::new(&[Path::new("/project-a")]);
1794 let second_paths = PathList::new(&[Path::new("/project-b")]);
1795 let initial_time = Utc::now();
1796 let updated_time = initial_time + chrono::Duration::seconds(1);
1797
1798 let initial_metadata = make_metadata(
1799 "session-1",
1800 "First Thread",
1801 initial_time,
1802 first_paths.clone(),
1803 );
1804 let session1_thread_id = initial_metadata.thread_id;
1805
1806 let second_metadata = make_metadata(
1807 "session-2",
1808 "Second Thread",
1809 initial_time,
1810 second_paths.clone(),
1811 );
1812 let session2_thread_id = second_metadata.thread_id;
1813
1814 cx.update(|cx| {
1815 let store = ThreadMetadataStore::global(cx);
1816 store.update(cx, |store, cx| {
1817 store.save(initial_metadata, cx);
1818 store.save(second_metadata, cx);
1819 });
1820 });
1821
1822 cx.run_until_parked();
1823
1824 cx.update(|cx| {
1825 let store = ThreadMetadataStore::global(cx);
1826 let store = store.read(cx);
1827
1828 let first_path_entries: Vec<_> = store
1829 .entries_for_path(&first_paths, None)
1830 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1831 .collect();
1832 assert_eq!(first_path_entries, vec!["session-1"]);
1833
1834 let second_path_entries: Vec<_> = store
1835 .entries_for_path(&second_paths, None)
1836 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1837 .collect();
1838 assert_eq!(second_path_entries, vec!["session-2"]);
1839 });
1840
1841 let moved_metadata = ThreadMetadata {
1842 thread_id: session1_thread_id,
1843 session_id: Some(acp::SessionId::new("session-1")),
1844 agent_id: agent::ZED_AGENT_ID.clone(),
1845 title: Some("First Thread".into()),
1846 updated_at: updated_time,
1847 created_at: Some(updated_time),
1848 worktree_paths: WorktreePaths::from_folder_paths(&second_paths),
1849 remote_connection: None,
1850 archived: false,
1851 };
1852
1853 cx.update(|cx| {
1854 let store = ThreadMetadataStore::global(cx);
1855 store.update(cx, |store, cx| {
1856 store.save(moved_metadata, cx);
1857 });
1858 });
1859
1860 cx.run_until_parked();
1861
1862 cx.update(|cx| {
1863 let store = ThreadMetadataStore::global(cx);
1864 let store = store.read(cx);
1865
1866 assert_eq!(store.entry_ids().count(), 2);
1867 assert!(
1868 store
1869 .entry_by_session(&acp::SessionId::new("session-1"))
1870 .is_some()
1871 );
1872 assert!(
1873 store
1874 .entry_by_session(&acp::SessionId::new("session-2"))
1875 .is_some()
1876 );
1877
1878 let first_path_entries: Vec<_> = store
1879 .entries_for_path(&first_paths, None)
1880 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1881 .collect();
1882 assert!(first_path_entries.is_empty());
1883
1884 let second_path_entries: Vec<_> = store
1885 .entries_for_path(&second_paths, None)
1886 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1887 .collect();
1888 assert_eq!(second_path_entries.len(), 2);
1889 assert!(second_path_entries.contains(&"session-1".to_string()));
1890 assert!(second_path_entries.contains(&"session-2".to_string()));
1891 });
1892
1893 cx.update(|cx| {
1894 let store = ThreadMetadataStore::global(cx);
1895 store.update(cx, |store, cx| {
1896 store.delete(session2_thread_id, cx);
1897 });
1898 });
1899
1900 cx.run_until_parked();
1901
1902 cx.update(|cx| {
1903 let store = ThreadMetadataStore::global(cx);
1904 let store = store.read(cx);
1905
1906 assert_eq!(store.entry_ids().count(), 1);
1907
1908 let second_path_entries: Vec<_> = store
1909 .entries_for_path(&second_paths, None)
1910 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
1911 .collect();
1912 assert_eq!(second_path_entries, vec!["session-1"]);
1913 });
1914 }
1915
1916 #[gpui::test]
1917 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1918 init_test(cx);
1919
1920 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1921 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1922 let now = Utc::now();
1923
1924 let existing_metadata = ThreadMetadata {
1925 thread_id: ThreadId::new(),
1926 session_id: Some(acp::SessionId::new("a-session-0")),
1927 agent_id: agent::ZED_AGENT_ID.clone(),
1928 title: Some("Existing Metadata".into()),
1929 updated_at: now - chrono::Duration::seconds(10),
1930 created_at: Some(now - chrono::Duration::seconds(10)),
1931 worktree_paths: WorktreePaths::from_folder_paths(&project_a_paths),
1932 remote_connection: None,
1933 archived: false,
1934 };
1935
1936 cx.update(|cx| {
1937 let store = ThreadMetadataStore::global(cx);
1938 store.update(cx, |store, cx| {
1939 store.save(existing_metadata, cx);
1940 });
1941 });
1942 cx.run_until_parked();
1943
1944 let threads_to_save = vec![
1945 (
1946 "a-session-0",
1947 "Thread A0 From Native Store",
1948 project_a_paths.clone(),
1949 now,
1950 ),
1951 (
1952 "a-session-1",
1953 "Thread A1",
1954 project_a_paths.clone(),
1955 now + chrono::Duration::seconds(1),
1956 ),
1957 (
1958 "b-session-0",
1959 "Thread B0",
1960 project_b_paths.clone(),
1961 now + chrono::Duration::seconds(2),
1962 ),
1963 (
1964 "projectless",
1965 "Projectless",
1966 PathList::default(),
1967 now + chrono::Duration::seconds(3),
1968 ),
1969 ];
1970
1971 for (session_id, title, paths, updated_at) in &threads_to_save {
1972 let save_task = cx.update(|cx| {
1973 let thread_store = ThreadStore::global(cx);
1974 let session_id = session_id.to_string();
1975 let title = title.to_string();
1976 let paths = paths.clone();
1977 thread_store.update(cx, |store, cx| {
1978 store.save_thread(
1979 acp::SessionId::new(session_id),
1980 make_db_thread(&title, *updated_at),
1981 paths,
1982 cx,
1983 )
1984 })
1985 });
1986 save_task.await.unwrap();
1987 cx.run_until_parked();
1988 }
1989
1990 run_thread_metadata_migrations(cx);
1991
1992 let list = cx.update(|cx| {
1993 let store = ThreadMetadataStore::global(cx);
1994 store.read(cx).entries().cloned().collect::<Vec<_>>()
1995 });
1996
1997 assert_eq!(list.len(), 4);
1998 assert!(
1999 list.iter()
2000 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
2001 );
2002
2003 let existing_metadata = list
2004 .iter()
2005 .find(|metadata| {
2006 metadata
2007 .session_id
2008 .as_ref()
2009 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2010 })
2011 .unwrap();
2012 assert_eq!(existing_metadata.display_title(), "Existing Metadata");
2013 assert!(!existing_metadata.archived);
2014
2015 let migrated_session_ids: Vec<_> = list
2016 .iter()
2017 .filter_map(|metadata| metadata.session_id.as_ref().map(|s| s.0.to_string()))
2018 .collect();
2019 assert!(migrated_session_ids.iter().any(|s| s == "a-session-1"));
2020 assert!(migrated_session_ids.iter().any(|s| s == "b-session-0"));
2021 assert!(migrated_session_ids.iter().any(|s| s == "projectless"));
2022
2023 let migrated_entries: Vec<_> = list
2024 .iter()
2025 .filter(|metadata| {
2026 !metadata
2027 .session_id
2028 .as_ref()
2029 .is_some_and(|s| s.0.as_ref() == "a-session-0")
2030 })
2031 .collect();
2032 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
2033 }
2034
2035 #[gpui::test]
2036 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
2037 cx: &mut TestAppContext,
2038 ) {
2039 init_test(cx);
2040
2041 let project_paths = PathList::new(&[Path::new("/project-a")]);
2042 let existing_updated_at = Utc::now();
2043
2044 let existing_metadata = ThreadMetadata {
2045 thread_id: ThreadId::new(),
2046 session_id: Some(acp::SessionId::new("existing-session")),
2047 agent_id: agent::ZED_AGENT_ID.clone(),
2048 title: Some("Existing Metadata".into()),
2049 updated_at: existing_updated_at,
2050 created_at: Some(existing_updated_at),
2051 worktree_paths: WorktreePaths::from_folder_paths(&project_paths),
2052 remote_connection: None,
2053 archived: false,
2054 };
2055
2056 cx.update(|cx| {
2057 let store = ThreadMetadataStore::global(cx);
2058 store.update(cx, |store, cx| {
2059 store.save(existing_metadata, cx);
2060 });
2061 });
2062 cx.run_until_parked();
2063
2064 let save_task = cx.update(|cx| {
2065 let thread_store = ThreadStore::global(cx);
2066 thread_store.update(cx, |store, cx| {
2067 store.save_thread(
2068 acp::SessionId::new("existing-session"),
2069 make_db_thread(
2070 "Updated Native Thread Title",
2071 existing_updated_at + chrono::Duration::seconds(1),
2072 ),
2073 project_paths.clone(),
2074 cx,
2075 )
2076 })
2077 });
2078 save_task.await.unwrap();
2079 cx.run_until_parked();
2080
2081 run_thread_metadata_migrations(cx);
2082
2083 let list = cx.update(|cx| {
2084 let store = ThreadMetadataStore::global(cx);
2085 store.read(cx).entries().cloned().collect::<Vec<_>>()
2086 });
2087
2088 assert_eq!(list.len(), 1);
2089 assert_eq!(
2090 list[0].session_id.as_ref().unwrap().0.as_ref(),
2091 "existing-session"
2092 );
2093 }
2094
2095 #[gpui::test]
2096 async fn test_migrate_thread_remote_connections_backfills_from_workspace_db(
2097 cx: &mut TestAppContext,
2098 ) {
2099 init_test(cx);
2100
2101 let folder_paths = PathList::new(&[Path::new("/remote-project")]);
2102 let updated_at = Utc::now();
2103 let metadata = make_metadata(
2104 "remote-session",
2105 "Remote Thread",
2106 updated_at,
2107 folder_paths.clone(),
2108 );
2109
2110 cx.update(|cx| {
2111 let store = ThreadMetadataStore::global(cx);
2112 store.update(cx, |store, cx| {
2113 store.save(metadata, cx);
2114 });
2115 });
2116 cx.run_until_parked();
2117
2118 let workspace_db = cx.update(|cx| WorkspaceDb::global(cx));
2119 let workspace_id = workspace_db.next_id().await.unwrap();
2120 let serialized_paths = folder_paths.serialize();
2121 let remote_connection_id = 1_i64;
2122 workspace_db
2123 .write(move |conn| {
2124 let mut stmt = Statement::prepare(
2125 conn,
2126 "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)",
2127 )?;
2128 let mut next_index = stmt.bind(&remote_connection_id, 1)?;
2129 next_index = stmt.bind(&"wsl", next_index)?;
2130 next_index = stmt.bind(&Some("anth".to_string()), next_index)?;
2131 stmt.bind(&Some("Ubuntu".to_string()), next_index)?;
2132 stmt.exec()?;
2133
2134 let mut stmt = Statement::prepare(
2135 conn,
2136 "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1",
2137 )?;
2138 let mut next_index = stmt.bind(&workspace_id, 1)?;
2139 next_index = stmt.bind(&serialized_paths.paths, next_index)?;
2140 next_index = stmt.bind(&serialized_paths.order, next_index)?;
2141 stmt.bind(&Some(remote_connection_id as i32), next_index)?;
2142 stmt.exec()
2143 })
2144 .await
2145 .unwrap();
2146
2147 clear_thread_metadata_remote_connection_backfill(cx);
2148 cx.update(|cx| {
2149 migrate_thread_remote_connections(cx, Task::ready(Ok(())));
2150 });
2151 cx.run_until_parked();
2152
2153 let metadata = cx.update(|cx| {
2154 let store = ThreadMetadataStore::global(cx);
2155 store
2156 .read(cx)
2157 .entry_by_session(&acp::SessionId::new("remote-session"))
2158 .cloned()
2159 .expect("expected migrated metadata row")
2160 });
2161
2162 assert_eq!(
2163 metadata.remote_connection,
2164 Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
2165 distro_name: "Ubuntu".to_string(),
2166 user: Some("anth".to_string()),
2167 }))
2168 );
2169 }
2170
2171 #[gpui::test]
2172 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
2173 cx: &mut TestAppContext,
2174 ) {
2175 init_test(cx);
2176
2177 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
2178 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
2179 let now = Utc::now();
2180
2181 // Create 7 threads for project A and 3 for project B
2182 let mut threads_to_save = Vec::new();
2183 for i in 0..7 {
2184 threads_to_save.push((
2185 format!("a-session-{i}"),
2186 format!("Thread A{i}"),
2187 project_a_paths.clone(),
2188 now + chrono::Duration::seconds(i as i64),
2189 ));
2190 }
2191 for i in 0..3 {
2192 threads_to_save.push((
2193 format!("b-session-{i}"),
2194 format!("Thread B{i}"),
2195 project_b_paths.clone(),
2196 now + chrono::Duration::seconds(i as i64),
2197 ));
2198 }
2199
2200 for (session_id, title, paths, updated_at) in &threads_to_save {
2201 let save_task = cx.update(|cx| {
2202 let thread_store = ThreadStore::global(cx);
2203 let session_id = session_id.to_string();
2204 let title = title.to_string();
2205 let paths = paths.clone();
2206 thread_store.update(cx, |store, cx| {
2207 store.save_thread(
2208 acp::SessionId::new(session_id),
2209 make_db_thread(&title, *updated_at),
2210 paths,
2211 cx,
2212 )
2213 })
2214 });
2215 save_task.await.unwrap();
2216 cx.run_until_parked();
2217 }
2218
2219 run_thread_metadata_migrations(cx);
2220
2221 let list = cx.update(|cx| {
2222 let store = ThreadMetadataStore::global(cx);
2223 store.read(cx).entries().cloned().collect::<Vec<_>>()
2224 });
2225
2226 assert_eq!(list.len(), 10);
2227
2228 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
2229 let mut project_a_entries: Vec<_> = list
2230 .iter()
2231 .filter(|m| *m.folder_paths() == project_a_paths)
2232 .collect();
2233 assert_eq!(project_a_entries.len(), 7);
2234 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
2235
2236 for entry in &project_a_entries[..5] {
2237 assert!(
2238 !entry.archived,
2239 "Expected {:?} to be unarchived (top 5 most recent)",
2240 entry.session_id
2241 );
2242 }
2243 for entry in &project_a_entries[5..] {
2244 assert!(
2245 entry.archived,
2246 "Expected {:?} to be archived (older than top 5)",
2247 entry.session_id
2248 );
2249 }
2250
2251 // Project B: all 3 should be unarchived (under the limit)
2252 let project_b_entries: Vec<_> = list
2253 .iter()
2254 .filter(|m| *m.folder_paths() == project_b_paths)
2255 .collect();
2256 assert_eq!(project_b_entries.len(), 3);
2257 assert!(project_b_entries.iter().all(|m| !m.archived));
2258 }
2259
2260 #[gpui::test]
2261 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
2262 init_test(cx);
2263
2264 let fs = FakeFs::new(cx.executor());
2265 let project = Project::test(fs, None::<&Path>, cx).await;
2266 let connection = StubAgentConnection::new();
2267
2268 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2269 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2270
2271 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2272 let session_id = thread.read_with(&vcx, |t, _| t.session_id().clone());
2273 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
2274
2275 // Initial metadata was created by the panel with session_id: None.
2276 cx.read(|cx| {
2277 let store = ThreadMetadataStore::global(cx).read(cx);
2278 assert_eq!(store.entry_ids().count(), 1);
2279 assert!(
2280 store.entry(thread_id).unwrap().session_id.is_none(),
2281 "expected initial panel metadata to have no session_id"
2282 );
2283 });
2284
2285 // Setting a title on an empty thread should be ignored by the
2286 // event handler (entries are empty), leaving session_id as None.
2287 thread.update_in(&mut vcx, |thread, _window, cx| {
2288 thread.set_title("Draft Thread".into(), cx).detach();
2289 });
2290 vcx.run_until_parked();
2291
2292 cx.read(|cx| {
2293 let store = ThreadMetadataStore::global(cx).read(cx);
2294 assert!(
2295 store.entry(thread_id).unwrap().session_id.is_none(),
2296 "expected title updates on empty thread to be ignored by event handler"
2297 );
2298 });
2299
2300 // Pushing content makes entries non-empty, so the event handler
2301 // should now update metadata with the real session_id.
2302 thread.update_in(&mut vcx, |thread, _window, cx| {
2303 thread.push_user_content_block(None, "Hello".into(), cx);
2304 });
2305 vcx.run_until_parked();
2306
2307 cx.read(|cx| {
2308 let store = ThreadMetadataStore::global(cx).read(cx);
2309 assert_eq!(store.entry_ids().count(), 1);
2310 assert_eq!(
2311 store.entry(thread_id).unwrap().session_id.as_ref(),
2312 Some(&session_id),
2313 );
2314 });
2315 }
2316
2317 #[gpui::test]
2318 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
2319 init_test(cx);
2320
2321 let fs = FakeFs::new(cx.executor());
2322 let project = Project::test(fs, None::<&Path>, cx).await;
2323 let connection = StubAgentConnection::new();
2324
2325 let (panel, mut vcx) = setup_panel_with_project(project, cx);
2326 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
2327
2328 let session_id = crate::test_support::active_session_id(&panel, &vcx);
2329 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2330
2331 thread.update_in(&mut vcx, |thread, _window, cx| {
2332 thread.push_user_content_block(None, "Hello".into(), cx);
2333 });
2334 vcx.run_until_parked();
2335
2336 cx.read(|cx| {
2337 let store = ThreadMetadataStore::global(cx).read(cx);
2338 assert_eq!(store.entry_ids().count(), 1);
2339 assert!(store.entry_by_session(&session_id).is_some());
2340 });
2341
2342 // Dropping the panel releases the ConversationView and its thread.
2343 drop(panel);
2344 cx.update(|_| {});
2345 cx.run_until_parked();
2346
2347 cx.read(|cx| {
2348 let store = ThreadMetadataStore::global(cx).read(cx);
2349 assert_eq!(store.entry_ids().count(), 1);
2350 assert!(store.entry_by_session(&session_id).is_some());
2351 });
2352 }
2353
2354 #[gpui::test]
2355 async fn test_threads_without_project_association_are_archived_by_default(
2356 cx: &mut TestAppContext,
2357 ) {
2358 init_test(cx);
2359
2360 let fs = FakeFs::new(cx.executor());
2361 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
2362 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
2363
2364 // Thread in project without worktree
2365 let (panel_no_wt, mut vcx_no_wt) = setup_panel_with_project(project_without_worktree, cx);
2366 crate::test_support::open_thread_with_connection(
2367 &panel_no_wt,
2368 StubAgentConnection::new(),
2369 &mut vcx_no_wt,
2370 );
2371 let thread_no_wt = panel_no_wt.read_with(&vcx_no_wt, |panel, cx| {
2372 panel.active_agent_thread(cx).unwrap()
2373 });
2374 thread_no_wt.update_in(&mut vcx_no_wt, |thread, _window, cx| {
2375 thread.push_user_content_block(None, "content".into(), cx);
2376 thread.set_title("No Project Thread".into(), cx).detach();
2377 });
2378 vcx_no_wt.run_until_parked();
2379 let session_without_worktree =
2380 crate::test_support::active_session_id(&panel_no_wt, &vcx_no_wt);
2381
2382 // Thread in project with worktree
2383 let (panel_wt, mut vcx_wt) = setup_panel_with_project(project_with_worktree, cx);
2384 crate::test_support::open_thread_with_connection(
2385 &panel_wt,
2386 StubAgentConnection::new(),
2387 &mut vcx_wt,
2388 );
2389 let thread_wt =
2390 panel_wt.read_with(&vcx_wt, |panel, cx| panel.active_agent_thread(cx).unwrap());
2391 thread_wt.update_in(&mut vcx_wt, |thread, _window, cx| {
2392 thread.push_user_content_block(None, "content".into(), cx);
2393 thread.set_title("Project Thread".into(), cx).detach();
2394 });
2395 vcx_wt.run_until_parked();
2396 let session_with_worktree = crate::test_support::active_session_id(&panel_wt, &vcx_wt);
2397
2398 cx.update(|cx| {
2399 let store = ThreadMetadataStore::global(cx);
2400 let store = store.read(cx);
2401
2402 let without_worktree = store
2403 .entry_by_session(&session_without_worktree)
2404 .expect("missing metadata for thread without project association");
2405 assert!(without_worktree.folder_paths().is_empty());
2406 assert!(
2407 without_worktree.archived,
2408 "expected thread without project association to be archived"
2409 );
2410
2411 let with_worktree = store
2412 .entry_by_session(&session_with_worktree)
2413 .expect("missing metadata for thread with project association");
2414 assert_eq!(
2415 *with_worktree.folder_paths(),
2416 PathList::new(&[Path::new("/project-a")])
2417 );
2418 assert!(
2419 !with_worktree.archived,
2420 "expected thread with project association to remain unarchived"
2421 );
2422 });
2423 }
2424
2425 #[gpui::test]
2426 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
2427 init_test(cx);
2428
2429 let fs = FakeFs::new(cx.executor());
2430 let project = Project::test(fs, None::<&Path>, cx).await;
2431 let connection = Rc::new(StubAgentConnection::new());
2432
2433 // Create a regular (non-subagent) thread through the panel.
2434 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
2435 crate::test_support::open_thread_with_connection(&panel, (*connection).clone(), &mut vcx);
2436
2437 let regular_thread =
2438 panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
2439 let regular_session_id = regular_thread.read_with(&vcx, |t, _| t.session_id().clone());
2440
2441 regular_thread.update_in(&mut vcx, |thread, _window, cx| {
2442 thread.push_user_content_block(None, "content".into(), cx);
2443 thread.set_title("Regular Thread".into(), cx).detach();
2444 });
2445 vcx.run_until_parked();
2446
2447 // Create a standalone subagent AcpThread (not wrapped in a
2448 // ConversationView). The ThreadMetadataStore only observes
2449 // ConversationView events, so this thread's events should
2450 // have no effect on sidebar metadata.
2451 let subagent_session_id = acp::SessionId::new("subagent-session");
2452 let subagent_thread = cx.update(|cx| {
2453 let action_log = cx.new(|_| ActionLog::new(project.clone()));
2454 cx.new(|cx| {
2455 acp_thread::AcpThread::new(
2456 Some(regular_session_id.clone()),
2457 Some("Subagent Thread".into()),
2458 None,
2459 connection.clone(),
2460 project.clone(),
2461 action_log,
2462 subagent_session_id.clone(),
2463 watch::Receiver::constant(acp::PromptCapabilities::new()),
2464 cx,
2465 )
2466 })
2467 });
2468
2469 cx.update(|cx| {
2470 subagent_thread.update(cx, |thread, cx| {
2471 thread
2472 .set_title("Subagent Thread Title".into(), cx)
2473 .detach();
2474 });
2475 });
2476 cx.run_until_parked();
2477
2478 // Only the regular thread should appear in sidebar metadata.
2479 // The subagent thread is excluded because the metadata store
2480 // only observes ConversationView events.
2481 let list = cx.update(|cx| {
2482 let store = ThreadMetadataStore::global(cx);
2483 store.read(cx).entries().cloned().collect::<Vec<_>>()
2484 });
2485
2486 assert_eq!(
2487 list.len(),
2488 1,
2489 "Expected only the regular thread in sidebar metadata, \
2490 but found {} entries (subagent threads are leaking into the sidebar)",
2491 list.len(),
2492 );
2493 assert_eq!(list[0].session_id.as_ref().unwrap(), ®ular_session_id);
2494 assert_eq!(list[0].display_title(), "Regular Thread");
2495 }
2496
2497 #[test]
2498 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
2499 let now = Utc::now();
2500
2501 let meta = make_metadata("session-1", "First Thread", now, PathList::default());
2502 let thread_id = meta.thread_id;
2503 let operations = vec![DbOperation::Upsert(meta), DbOperation::Delete(thread_id)];
2504
2505 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
2506
2507 assert_eq!(deduped.len(), 1);
2508 assert_eq!(deduped[0], DbOperation::Delete(thread_id));
2509 }
2510
2511 #[test]
2512 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
2513 let now = Utc::now();
2514 let later = now + chrono::Duration::seconds(1);
2515
2516 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
2517 let shared_thread_id = old_metadata.thread_id;
2518 let new_metadata = ThreadMetadata {
2519 thread_id: shared_thread_id,
2520 ..make_metadata("session-1", "New Title", later, PathList::default())
2521 };
2522
2523 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2524 DbOperation::Upsert(old_metadata),
2525 DbOperation::Upsert(new_metadata.clone()),
2526 ]);
2527
2528 assert_eq!(deduped.len(), 1);
2529 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
2530 }
2531
2532 #[test]
2533 fn test_dedup_db_operations_preserves_distinct_sessions() {
2534 let now = Utc::now();
2535
2536 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
2537 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
2538 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
2539 DbOperation::Upsert(metadata1.clone()),
2540 DbOperation::Upsert(metadata2.clone()),
2541 ]);
2542
2543 assert_eq!(deduped.len(), 2);
2544 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
2545 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
2546 }
2547
2548 #[gpui::test]
2549 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
2550 init_test(cx);
2551
2552 let paths = PathList::new(&[Path::new("/project-a")]);
2553 let now = Utc::now();
2554 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2555 let thread_id = metadata.thread_id;
2556
2557 cx.update(|cx| {
2558 let store = ThreadMetadataStore::global(cx);
2559 store.update(cx, |store, cx| {
2560 store.save(metadata, cx);
2561 });
2562 });
2563
2564 cx.run_until_parked();
2565
2566 cx.update(|cx| {
2567 let store = ThreadMetadataStore::global(cx);
2568 let store = store.read(cx);
2569
2570 let path_entries: Vec<_> = store
2571 .entries_for_path(&paths, None)
2572 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2573 .collect();
2574 assert_eq!(path_entries, vec!["session-1"]);
2575
2576 assert_eq!(store.archived_entries().count(), 0);
2577 });
2578
2579 cx.update(|cx| {
2580 let store = ThreadMetadataStore::global(cx);
2581 store.update(cx, |store, cx| {
2582 store.archive(thread_id, None, cx);
2583 });
2584 });
2585
2586 // Thread 1 should now be archived
2587 cx.run_until_parked();
2588
2589 cx.update(|cx| {
2590 let store = ThreadMetadataStore::global(cx);
2591 let store = store.read(cx);
2592
2593 let path_entries: Vec<_> = store
2594 .entries_for_path(&paths, None)
2595 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2596 .collect();
2597 assert!(path_entries.is_empty());
2598
2599 let archived: Vec<_> = store.archived_entries().collect();
2600 assert_eq!(archived.len(), 1);
2601 assert_eq!(
2602 archived[0].session_id.as_ref().unwrap().0.as_ref(),
2603 "session-1"
2604 );
2605 assert!(archived[0].archived);
2606 });
2607
2608 cx.update(|cx| {
2609 let store = ThreadMetadataStore::global(cx);
2610 store.update(cx, |store, cx| {
2611 store.unarchive(thread_id, cx);
2612 });
2613 });
2614
2615 cx.run_until_parked();
2616
2617 cx.update(|cx| {
2618 let store = ThreadMetadataStore::global(cx);
2619 let store = store.read(cx);
2620
2621 let path_entries: Vec<_> = store
2622 .entries_for_path(&paths, None)
2623 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2624 .collect();
2625 assert_eq!(path_entries, vec!["session-1"]);
2626
2627 assert_eq!(store.archived_entries().count(), 0);
2628 });
2629 }
2630
2631 #[gpui::test]
2632 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2633 init_test(cx);
2634
2635 let paths = PathList::new(&[Path::new("/project-a")]);
2636 let now = Utc::now();
2637
2638 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2639 let metadata2 = make_metadata(
2640 "session-2",
2641 "Archived Thread",
2642 now - chrono::Duration::seconds(1),
2643 paths.clone(),
2644 );
2645 let session2_thread_id = metadata2.thread_id;
2646
2647 cx.update(|cx| {
2648 let store = ThreadMetadataStore::global(cx);
2649 store.update(cx, |store, cx| {
2650 store.save(metadata1, cx);
2651 store.save(metadata2, cx);
2652 });
2653 });
2654
2655 cx.run_until_parked();
2656
2657 cx.update(|cx| {
2658 let store = ThreadMetadataStore::global(cx);
2659 store.update(cx, |store, cx| {
2660 store.archive(session2_thread_id, None, cx);
2661 });
2662 });
2663
2664 cx.run_until_parked();
2665
2666 cx.update(|cx| {
2667 let store = ThreadMetadataStore::global(cx);
2668 let store = store.read(cx);
2669
2670 let path_entries: Vec<_> = store
2671 .entries_for_path(&paths, None)
2672 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2673 .collect();
2674 assert_eq!(path_entries, vec!["session-1"]);
2675
2676 assert_eq!(store.entries().count(), 2);
2677
2678 let archived: Vec<_> = store
2679 .archived_entries()
2680 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2681 .collect();
2682 assert_eq!(archived, vec!["session-2"]);
2683 });
2684 }
2685
2686 #[gpui::test]
2687 async fn test_entries_filter_by_remote_connection(cx: &mut TestAppContext) {
2688 init_test(cx);
2689
2690 let main_paths = PathList::new(&[Path::new("/project-a")]);
2691 let linked_paths = PathList::new(&[Path::new("/wt-feature")]);
2692 let now = Utc::now();
2693
2694 let remote_a = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 1 });
2695 let remote_b = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 2 });
2696
2697 // Three threads at the same folder_paths but different hosts.
2698 let local_thread = make_metadata("local-session", "Local Thread", now, main_paths.clone());
2699
2700 let mut remote_a_thread = make_metadata(
2701 "remote-a-session",
2702 "Remote A Thread",
2703 now - chrono::Duration::seconds(1),
2704 main_paths.clone(),
2705 );
2706 remote_a_thread.remote_connection = Some(remote_a.clone());
2707
2708 let mut remote_b_thread = make_metadata(
2709 "remote-b-session",
2710 "Remote B Thread",
2711 now - chrono::Duration::seconds(2),
2712 main_paths.clone(),
2713 );
2714 remote_b_thread.remote_connection = Some(remote_b.clone());
2715
2716 let linked_worktree_paths =
2717 WorktreePaths::from_path_lists(main_paths.clone(), linked_paths).unwrap();
2718
2719 let local_linked_thread = ThreadMetadata {
2720 thread_id: ThreadId::new(),
2721 archived: false,
2722 session_id: Some(acp::SessionId::new("local-linked")),
2723 agent_id: agent::ZED_AGENT_ID.clone(),
2724 title: Some("Local Linked".into()),
2725 updated_at: now,
2726 created_at: Some(now),
2727 worktree_paths: linked_worktree_paths.clone(),
2728 remote_connection: None,
2729 };
2730
2731 let remote_linked_thread = ThreadMetadata {
2732 thread_id: ThreadId::new(),
2733 archived: false,
2734 session_id: Some(acp::SessionId::new("remote-linked")),
2735 agent_id: agent::ZED_AGENT_ID.clone(),
2736 title: Some("Remote Linked".into()),
2737 updated_at: now - chrono::Duration::seconds(1),
2738 created_at: Some(now - chrono::Duration::seconds(1)),
2739 worktree_paths: linked_worktree_paths,
2740 remote_connection: Some(remote_a.clone()),
2741 };
2742
2743 cx.update(|cx| {
2744 let store = ThreadMetadataStore::global(cx);
2745 store.update(cx, |store, cx| {
2746 store.save(local_thread, cx);
2747 store.save(remote_a_thread, cx);
2748 store.save(remote_b_thread, cx);
2749 store.save(local_linked_thread, cx);
2750 store.save(remote_linked_thread, cx);
2751 });
2752 });
2753 cx.run_until_parked();
2754
2755 cx.update(|cx| {
2756 let store = ThreadMetadataStore::global(cx);
2757 let store = store.read(cx);
2758
2759 let local_entries: Vec<_> = store
2760 .entries_for_path(&main_paths, None)
2761 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2762 .collect();
2763 assert_eq!(local_entries, vec!["local-session"]);
2764
2765 let remote_a_entries: Vec<_> = store
2766 .entries_for_path(&main_paths, Some(&remote_a))
2767 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2768 .collect();
2769 assert_eq!(remote_a_entries, vec!["remote-a-session"]);
2770
2771 let remote_b_entries: Vec<_> = store
2772 .entries_for_path(&main_paths, Some(&remote_b))
2773 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2774 .collect();
2775 assert_eq!(remote_b_entries, vec!["remote-b-session"]);
2776
2777 let mut local_main_entries: Vec<_> = store
2778 .entries_for_main_worktree_path(&main_paths, None)
2779 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2780 .collect();
2781 local_main_entries.sort();
2782 assert_eq!(local_main_entries, vec!["local-linked", "local-session"]);
2783
2784 let mut remote_main_entries: Vec<_> = store
2785 .entries_for_main_worktree_path(&main_paths, Some(&remote_a))
2786 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2787 .collect();
2788 remote_main_entries.sort();
2789 assert_eq!(
2790 remote_main_entries,
2791 vec!["remote-a-session", "remote-linked"]
2792 );
2793 });
2794 }
2795
2796 #[gpui::test]
2797 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2798 init_test(cx);
2799
2800 let paths = PathList::new(&[Path::new("/project-a")]);
2801 let now = Utc::now();
2802
2803 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2804 let m2 = make_metadata(
2805 "session-2",
2806 "Thread Two",
2807 now - chrono::Duration::seconds(1),
2808 paths.clone(),
2809 );
2810 let m3 = make_metadata(
2811 "session-3",
2812 "Thread Three",
2813 now - chrono::Duration::seconds(2),
2814 paths,
2815 );
2816
2817 cx.update(|cx| {
2818 let store = ThreadMetadataStore::global(cx);
2819 store.update(cx, |store, cx| {
2820 store.save_all(vec![m1, m2, m3], cx);
2821 });
2822 });
2823
2824 cx.run_until_parked();
2825
2826 cx.update(|cx| {
2827 let store = ThreadMetadataStore::global(cx);
2828 let store = store.read(cx);
2829
2830 assert_eq!(store.entries().count(), 3);
2831 assert!(
2832 store
2833 .entry_by_session(&acp::SessionId::new("session-1"))
2834 .is_some()
2835 );
2836 assert!(
2837 store
2838 .entry_by_session(&acp::SessionId::new("session-2"))
2839 .is_some()
2840 );
2841 assert!(
2842 store
2843 .entry_by_session(&acp::SessionId::new("session-3"))
2844 .is_some()
2845 );
2846
2847 assert_eq!(store.entry_ids().count(), 3);
2848 });
2849 }
2850
2851 #[gpui::test]
2852 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2853 init_test(cx);
2854
2855 let paths = PathList::new(&[Path::new("/project-a")]);
2856 let now = Utc::now();
2857 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2858 let thread_id = metadata.thread_id;
2859
2860 cx.update(|cx| {
2861 let store = ThreadMetadataStore::global(cx);
2862 store.update(cx, |store, cx| {
2863 store.save(metadata, cx);
2864 });
2865 });
2866
2867 cx.run_until_parked();
2868
2869 cx.update(|cx| {
2870 let store = ThreadMetadataStore::global(cx);
2871 store.update(cx, |store, cx| {
2872 store.archive(thread_id, None, cx);
2873 });
2874 });
2875
2876 cx.run_until_parked();
2877
2878 cx.update(|cx| {
2879 let store = ThreadMetadataStore::global(cx);
2880 store.update(cx, |store, cx| {
2881 let _ = store.reload(cx);
2882 });
2883 });
2884
2885 cx.run_until_parked();
2886
2887 cx.update(|cx| {
2888 let store = ThreadMetadataStore::global(cx);
2889 let store = store.read(cx);
2890
2891 let thread = store
2892 .entry_by_session(&acp::SessionId::new("session-1"))
2893 .expect("thread should exist after reload");
2894 assert!(thread.archived);
2895
2896 let path_entries: Vec<_> = store
2897 .entries_for_path(&paths, None)
2898 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2899 .collect();
2900 assert!(path_entries.is_empty());
2901
2902 let archived: Vec<_> = store
2903 .archived_entries()
2904 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
2905 .collect();
2906 assert_eq!(archived, vec!["session-1"]);
2907 });
2908 }
2909
2910 #[gpui::test]
2911 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2912 init_test(cx);
2913
2914 cx.run_until_parked();
2915
2916 cx.update(|cx| {
2917 let store = ThreadMetadataStore::global(cx);
2918 store.update(cx, |store, cx| {
2919 store.archive(ThreadId::new(), None, cx);
2920 });
2921 });
2922
2923 cx.run_until_parked();
2924
2925 cx.update(|cx| {
2926 let store = ThreadMetadataStore::global(cx);
2927 let store = store.read(cx);
2928
2929 assert!(store.is_empty());
2930 assert_eq!(store.entries().count(), 0);
2931 assert_eq!(store.archived_entries().count(), 0);
2932 });
2933 }
2934
2935 #[gpui::test]
2936 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2937 init_test(cx);
2938
2939 let paths = PathList::new(&[Path::new("/project-a")]);
2940 let now = Utc::now();
2941 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2942 let thread_id = metadata.thread_id;
2943
2944 cx.update(|cx| {
2945 let store = ThreadMetadataStore::global(cx);
2946 store.update(cx, |store, cx| {
2947 store.save(metadata.clone(), cx);
2948 store.archive(thread_id, None, cx);
2949 });
2950 });
2951
2952 cx.run_until_parked();
2953
2954 cx.update(|cx| {
2955 let store = ThreadMetadataStore::global(cx);
2956 let store = store.read(cx);
2957
2958 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2959 pretty_assertions::assert_eq!(
2960 entries,
2961 vec![ThreadMetadata {
2962 archived: true,
2963 ..metadata
2964 }]
2965 );
2966 });
2967 }
2968
2969 #[gpui::test]
2970 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2971 init_test(cx);
2972 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2973
2974 let id = store
2975 .read_with(cx, |store, cx| {
2976 store.create_archived_worktree(
2977 "/tmp/worktree".to_string(),
2978 "/home/user/repo".to_string(),
2979 Some("feature-branch".to_string()),
2980 "staged_aaa".to_string(),
2981 "unstaged_bbb".to_string(),
2982 "original_000".to_string(),
2983 cx,
2984 )
2985 })
2986 .await
2987 .unwrap();
2988
2989 let thread_id_1 = ThreadId::new();
2990
2991 store
2992 .read_with(cx, |store, cx| {
2993 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
2994 })
2995 .await
2996 .unwrap();
2997
2998 let worktrees = store
2999 .read_with(cx, |store, cx| {
3000 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3001 })
3002 .await
3003 .unwrap();
3004
3005 assert_eq!(worktrees.len(), 1);
3006 let wt = &worktrees[0];
3007 assert_eq!(wt.id, id);
3008 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
3009 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
3010 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
3011 assert_eq!(wt.staged_commit_hash, "staged_aaa");
3012 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
3013 assert_eq!(wt.original_commit_hash, "original_000");
3014 }
3015
3016 #[gpui::test]
3017 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
3018 init_test(cx);
3019 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3020
3021 let id = store
3022 .read_with(cx, |store, cx| {
3023 store.create_archived_worktree(
3024 "/tmp/worktree".to_string(),
3025 "/home/user/repo".to_string(),
3026 Some("main".to_string()),
3027 "deadbeef".to_string(),
3028 "deadbeef".to_string(),
3029 "original_000".to_string(),
3030 cx,
3031 )
3032 })
3033 .await
3034 .unwrap();
3035
3036 let thread_id_1 = ThreadId::new();
3037
3038 store
3039 .read_with(cx, |store, cx| {
3040 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3041 })
3042 .await
3043 .unwrap();
3044
3045 store
3046 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
3047 .await
3048 .unwrap();
3049
3050 let worktrees = store
3051 .read_with(cx, |store, cx| {
3052 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3053 })
3054 .await
3055 .unwrap();
3056 assert!(worktrees.is_empty());
3057 }
3058
3059 #[gpui::test]
3060 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
3061 init_test(cx);
3062 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3063
3064 let id = store
3065 .read_with(cx, |store, cx| {
3066 store.create_archived_worktree(
3067 "/tmp/worktree".to_string(),
3068 "/home/user/repo".to_string(),
3069 None,
3070 "abc123".to_string(),
3071 "abc123".to_string(),
3072 "original_000".to_string(),
3073 cx,
3074 )
3075 })
3076 .await
3077 .unwrap();
3078
3079 let thread_id_1 = ThreadId::new();
3080 let thread_id_2 = ThreadId::new();
3081
3082 store
3083 .read_with(cx, |store, cx| {
3084 store.link_thread_to_archived_worktree(thread_id_1, id, cx)
3085 })
3086 .await
3087 .unwrap();
3088
3089 store
3090 .read_with(cx, |store, cx| {
3091 store.link_thread_to_archived_worktree(thread_id_2, id, cx)
3092 })
3093 .await
3094 .unwrap();
3095
3096 let wt1 = store
3097 .read_with(cx, |store, cx| {
3098 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3099 })
3100 .await
3101 .unwrap();
3102
3103 let wt2 = store
3104 .read_with(cx, |store, cx| {
3105 store.get_archived_worktrees_for_thread(thread_id_2, cx)
3106 })
3107 .await
3108 .unwrap();
3109
3110 assert_eq!(wt1.len(), 1);
3111 assert_eq!(wt2.len(), 1);
3112 assert_eq!(wt1[0].id, wt2[0].id);
3113 }
3114
3115 #[gpui::test]
3116 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
3117 init_test(cx);
3118 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3119
3120 let original_paths = PathList::new(&[
3121 Path::new("/projects/worktree-a"),
3122 Path::new("/projects/worktree-b"),
3123 Path::new("/other/unrelated"),
3124 ]);
3125 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3126 let thread_id = meta.thread_id;
3127
3128 store.update(cx, |store, cx| {
3129 store.save(meta, cx);
3130 });
3131
3132 let replacements = vec![
3133 (
3134 PathBuf::from("/projects/worktree-a"),
3135 PathBuf::from("/restored/worktree-a"),
3136 ),
3137 (
3138 PathBuf::from("/projects/worktree-b"),
3139 PathBuf::from("/restored/worktree-b"),
3140 ),
3141 ];
3142
3143 store.update(cx, |store, cx| {
3144 store.complete_worktree_restore(thread_id, &replacements, cx);
3145 });
3146
3147 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3148 let entry = entry.unwrap();
3149 let paths = entry.folder_paths().paths();
3150 assert_eq!(paths.len(), 3);
3151 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3152 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3153 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3154 }
3155
3156 #[gpui::test]
3157 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
3158 init_test(cx);
3159 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3160
3161 let original_paths =
3162 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3163 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3164 let thread_id = meta.thread_id;
3165
3166 store.update(cx, |store, cx| {
3167 store.save(meta, cx);
3168 });
3169
3170 let replacements = vec![
3171 (
3172 PathBuf::from("/projects/worktree-a"),
3173 PathBuf::from("/new/worktree-a"),
3174 ),
3175 (
3176 PathBuf::from("/nonexistent/path"),
3177 PathBuf::from("/should/not/appear"),
3178 ),
3179 ];
3180
3181 store.update(cx, |store, cx| {
3182 store.complete_worktree_restore(thread_id, &replacements, cx);
3183 });
3184
3185 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3186 let entry = entry.unwrap();
3187 let paths = entry.folder_paths().paths();
3188 assert_eq!(paths.len(), 2);
3189 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3190 assert!(paths.contains(&PathBuf::from("/other/path")));
3191 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3192 }
3193
3194 #[gpui::test]
3195 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
3196 init_test(cx);
3197 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3198
3199 let original_paths = PathList::new(&[
3200 Path::new("/projects/worktree-a"),
3201 Path::new("/projects/worktree-b"),
3202 Path::new("/other/unrelated"),
3203 ]);
3204 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
3205 let thread_id = meta.thread_id;
3206
3207 store.update(cx, |store, cx| {
3208 store.save(meta, cx);
3209 });
3210
3211 let replacements = vec![
3212 (
3213 PathBuf::from("/projects/worktree-a"),
3214 PathBuf::from("/restored/worktree-a"),
3215 ),
3216 (
3217 PathBuf::from("/projects/worktree-b"),
3218 PathBuf::from("/restored/worktree-b"),
3219 ),
3220 ];
3221
3222 store.update(cx, |store, cx| {
3223 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3224 });
3225
3226 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3227 let entry = entry.unwrap();
3228 let paths = entry.folder_paths().paths();
3229 assert_eq!(paths.len(), 3);
3230 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
3231 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
3232 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
3233 }
3234
3235 #[gpui::test]
3236 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
3237 init_test(cx);
3238 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3239
3240 let original_paths =
3241 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
3242 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
3243 let thread_id = meta.thread_id;
3244
3245 store.update(cx, |store, cx| {
3246 store.save(meta, cx);
3247 });
3248
3249 let replacements = vec![
3250 (
3251 PathBuf::from("/projects/worktree-a"),
3252 PathBuf::from("/new/worktree-a"),
3253 ),
3254 (
3255 PathBuf::from("/nonexistent/path"),
3256 PathBuf::from("/should/not/appear"),
3257 ),
3258 ];
3259
3260 store.update(cx, |store, cx| {
3261 store.update_restored_worktree_paths(thread_id, &replacements, cx);
3262 });
3263
3264 let entry = store.read_with(cx, |store, _cx| store.entry(thread_id).cloned());
3265 let entry = entry.unwrap();
3266 let paths = entry.folder_paths().paths();
3267 assert_eq!(paths.len(), 2);
3268 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
3269 assert!(paths.contains(&PathBuf::from("/other/path")));
3270 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
3271 }
3272
3273 #[gpui::test]
3274 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
3275 init_test(cx);
3276 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
3277
3278 let id1 = store
3279 .read_with(cx, |store, cx| {
3280 store.create_archived_worktree(
3281 "/projects/worktree-a".to_string(),
3282 "/home/user/repo".to_string(),
3283 Some("branch-a".to_string()),
3284 "staged_a".to_string(),
3285 "unstaged_a".to_string(),
3286 "original_000".to_string(),
3287 cx,
3288 )
3289 })
3290 .await
3291 .unwrap();
3292
3293 let id2 = store
3294 .read_with(cx, |store, cx| {
3295 store.create_archived_worktree(
3296 "/projects/worktree-b".to_string(),
3297 "/home/user/repo".to_string(),
3298 Some("branch-b".to_string()),
3299 "staged_b".to_string(),
3300 "unstaged_b".to_string(),
3301 "original_000".to_string(),
3302 cx,
3303 )
3304 })
3305 .await
3306 .unwrap();
3307
3308 let thread_id_1 = ThreadId::new();
3309
3310 store
3311 .read_with(cx, |store, cx| {
3312 store.link_thread_to_archived_worktree(thread_id_1, id1, cx)
3313 })
3314 .await
3315 .unwrap();
3316
3317 store
3318 .read_with(cx, |store, cx| {
3319 store.link_thread_to_archived_worktree(thread_id_1, id2, cx)
3320 })
3321 .await
3322 .unwrap();
3323
3324 let worktrees = store
3325 .read_with(cx, |store, cx| {
3326 store.get_archived_worktrees_for_thread(thread_id_1, cx)
3327 })
3328 .await
3329 .unwrap();
3330
3331 assert_eq!(worktrees.len(), 2);
3332
3333 let paths: Vec<&Path> = worktrees
3334 .iter()
3335 .map(|w| w.worktree_path.as_path())
3336 .collect();
3337 assert!(paths.contains(&Path::new("/projects/worktree-a")));
3338 assert!(paths.contains(&Path::new("/projects/worktree-b")));
3339 }
3340
3341 // ── Migration tests ────────────────────────────────────────────────
3342
3343 #[test]
3344 fn test_thread_id_primary_key_migration_backfills_null_thread_ids() {
3345 use db::sqlez::connection::Connection;
3346
3347 let connection =
3348 Connection::open_memory(Some("test_thread_id_pk_migration_backfills_nulls"));
3349
3350 // Run migrations 0-6 (the old schema, before the thread_id PK migration).
3351 let old_migrations: &[&str] = &ThreadMetadataDb::MIGRATIONS[..7];
3352 connection
3353 .migrate(ThreadMetadataDb::NAME, old_migrations, &mut |_, _, _| false)
3354 .expect("old migrations should succeed");
3355
3356 // Insert rows: one with a thread_id, two without.
3357 connection
3358 .exec(
3359 "INSERT INTO sidebar_threads \
3360 (session_id, title, updated_at, thread_id) \
3361 VALUES ('has-tid', 'Has ThreadId', '2025-01-01T00:00:00Z', X'0102030405060708090A0B0C0D0E0F10')",
3362 )
3363 .unwrap()()
3364 .unwrap();
3365 connection
3366 .exec(
3367 "INSERT INTO sidebar_threads \
3368 (session_id, title, updated_at) \
3369 VALUES ('no-tid-1', 'No ThreadId 1', '2025-01-02T00:00:00Z')",
3370 )
3371 .unwrap()()
3372 .unwrap();
3373 connection
3374 .exec(
3375 "INSERT INTO sidebar_threads \
3376 (session_id, title, updated_at) \
3377 VALUES ('no-tid-2', 'No ThreadId 2', '2025-01-03T00:00:00Z')",
3378 )
3379 .unwrap()()
3380 .unwrap();
3381
3382 // Set up archived_git_worktrees + thread_archived_worktrees rows
3383 // referencing the session without a thread_id.
3384 connection
3385 .exec(
3386 "INSERT INTO archived_git_worktrees \
3387 (id, worktree_path, main_repo_path, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
3388 VALUES (1, '/wt', '/main', 'abc', 'def', '000')",
3389 )
3390 .unwrap()()
3391 .unwrap();
3392 connection
3393 .exec(
3394 "INSERT INTO thread_archived_worktrees \
3395 (session_id, archived_worktree_id) \
3396 VALUES ('no-tid-1', 1)",
3397 )
3398 .unwrap()()
3399 .unwrap();
3400
3401 // Run all migrations (0-7). sqlez skips 0-6 and runs only migration 7.
3402 connection
3403 .migrate(
3404 ThreadMetadataDb::NAME,
3405 ThreadMetadataDb::MIGRATIONS,
3406 &mut |_, _, _| false,
3407 )
3408 .expect("new migration should succeed");
3409
3410 // All 3 rows should survive with non-NULL thread_ids.
3411 let count: i64 = connection
3412 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM sidebar_threads")
3413 .unwrap()(())
3414 .unwrap()
3415 .unwrap();
3416 assert_eq!(count, 3, "all 3 rows should survive the migration");
3417
3418 let null_count: i64 = connection
3419 .select_row_bound::<(), i64>(
3420 "SELECT COUNT(*) FROM sidebar_threads WHERE thread_id IS NULL",
3421 )
3422 .unwrap()(())
3423 .unwrap()
3424 .unwrap();
3425 assert_eq!(
3426 null_count, 0,
3427 "no rows should have NULL thread_id after migration"
3428 );
3429
3430 // The row that already had a thread_id should keep its original value.
3431 let original_tid: Vec<u8> = connection
3432 .select_row_bound::<&str, Vec<u8>>(
3433 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3434 )
3435 .unwrap()("has-tid")
3436 .unwrap()
3437 .unwrap();
3438 assert_eq!(
3439 original_tid,
3440 vec![
3441 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
3442 0x0F, 0x10
3443 ],
3444 "pre-existing thread_id should be preserved"
3445 );
3446
3447 // The two rows that had NULL thread_ids should now have distinct non-empty blobs.
3448 let generated_tid_1: Vec<u8> = connection
3449 .select_row_bound::<&str, Vec<u8>>(
3450 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3451 )
3452 .unwrap()("no-tid-1")
3453 .unwrap()
3454 .unwrap();
3455 let generated_tid_2: Vec<u8> = connection
3456 .select_row_bound::<&str, Vec<u8>>(
3457 "SELECT thread_id FROM sidebar_threads WHERE session_id = ?",
3458 )
3459 .unwrap()("no-tid-2")
3460 .unwrap()
3461 .unwrap();
3462 assert_eq!(
3463 generated_tid_1.len(),
3464 16,
3465 "generated thread_id should be 16 bytes"
3466 );
3467 assert_eq!(
3468 generated_tid_2.len(),
3469 16,
3470 "generated thread_id should be 16 bytes"
3471 );
3472 assert_ne!(
3473 generated_tid_1, generated_tid_2,
3474 "each generated thread_id should be unique"
3475 );
3476
3477 // The thread_archived_worktrees join row should have migrated
3478 // using the backfilled thread_id from the session without a
3479 // pre-existing thread_id.
3480 let archived_count: i64 = connection
3481 .select_row_bound::<(), i64>("SELECT COUNT(*) FROM thread_archived_worktrees")
3482 .unwrap()(())
3483 .unwrap()
3484 .unwrap();
3485 assert_eq!(
3486 archived_count, 1,
3487 "thread_archived_worktrees row should survive migration"
3488 );
3489
3490 // The thread_archived_worktrees row should reference the
3491 // backfilled thread_id of the 'no-tid-1' session.
3492 let archived_tid: Vec<u8> = connection
3493 .select_row_bound::<(), Vec<u8>>(
3494 "SELECT thread_id FROM thread_archived_worktrees LIMIT 1",
3495 )
3496 .unwrap()(())
3497 .unwrap()
3498 .unwrap();
3499 assert_eq!(
3500 archived_tid, generated_tid_1,
3501 "thread_archived_worktrees should reference the backfilled thread_id"
3502 );
3503 }
3504
3505 // ── ThreadWorktreePaths tests ──────────────────────────────────────
3506
3507 /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs.
3508 fn make_worktree_paths(pairs: &[(&str, &str)]) -> WorktreePaths {
3509 let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs
3510 .iter()
3511 .map(|(m, f)| (Path::new(*m), Path::new(*f)))
3512 .unzip();
3513 WorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders)).unwrap()
3514 }
3515
3516 #[test]
3517 fn test_thread_worktree_paths_full_add_then_remove_cycle() {
3518 // Full scenario from the issue:
3519 // 1. Start with linked worktree selectric → zed
3520 // 2. Add cloud
3521 // 3. Remove zed
3522
3523 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3524
3525 // Step 2: add cloud
3526 paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud"));
3527
3528 assert_eq!(paths.ordered_pairs().count(), 2);
3529 assert_eq!(
3530 paths.folder_path_list(),
3531 &PathList::new(&[
3532 Path::new("/worktrees/selectric/zed"),
3533 Path::new("/projects/cloud"),
3534 ])
3535 );
3536 assert_eq!(
3537 paths.main_worktree_path_list(),
3538 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),])
3539 );
3540
3541 // Step 3: remove zed
3542 paths.remove_main_path(Path::new("/projects/zed"));
3543
3544 assert_eq!(paths.ordered_pairs().count(), 1);
3545 assert_eq!(
3546 paths.folder_path_list(),
3547 &PathList::new(&[Path::new("/projects/cloud")])
3548 );
3549 assert_eq!(
3550 paths.main_worktree_path_list(),
3551 &PathList::new(&[Path::new("/projects/cloud")])
3552 );
3553 }
3554
3555 #[test]
3556 fn test_thread_worktree_paths_add_is_idempotent() {
3557 let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]);
3558
3559 paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed"));
3560
3561 assert_eq!(paths.ordered_pairs().count(), 1);
3562 }
3563
3564 #[test]
3565 fn test_thread_worktree_paths_remove_nonexistent_is_noop() {
3566 let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]);
3567
3568 paths.remove_main_path(Path::new("/projects/nonexistent"));
3569
3570 assert_eq!(paths.ordered_pairs().count(), 1);
3571 }
3572
3573 #[test]
3574 fn test_thread_worktree_paths_from_path_lists_preserves_association() {
3575 let folder = PathList::new(&[
3576 Path::new("/worktrees/selectric/zed"),
3577 Path::new("/projects/cloud"),
3578 ]);
3579 let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]);
3580
3581 let paths = WorktreePaths::from_path_lists(main, folder).unwrap();
3582
3583 let pairs: Vec<_> = paths
3584 .ordered_pairs()
3585 .map(|(m, f)| (m.clone(), f.clone()))
3586 .collect();
3587 assert_eq!(pairs.len(), 2);
3588 assert!(pairs.contains(&(
3589 PathBuf::from("/projects/zed"),
3590 PathBuf::from("/worktrees/selectric/zed")
3591 )));
3592 assert!(pairs.contains(&(
3593 PathBuf::from("/projects/cloud"),
3594 PathBuf::from("/projects/cloud")
3595 )));
3596 }
3597
3598 #[test]
3599 fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() {
3600 // Two linked worktrees of the same main repo: the main_worktree_path_list
3601 // deduplicates because PathList stores unique sorted paths, but
3602 // ordered_pairs still has both entries.
3603 let paths = make_worktree_paths(&[
3604 ("/projects/zed", "/worktrees/selectric/zed"),
3605 ("/projects/zed", "/worktrees/feature/zed"),
3606 ]);
3607
3608 // main_worktree_path_list has the duplicate main path twice
3609 // (PathList keeps all entries from its input)
3610 assert_eq!(paths.ordered_pairs().count(), 2);
3611 assert_eq!(
3612 paths.folder_path_list(),
3613 &PathList::new(&[
3614 Path::new("/worktrees/selectric/zed"),
3615 Path::new("/worktrees/feature/zed"),
3616 ])
3617 );
3618 assert_eq!(
3619 paths.main_worktree_path_list(),
3620 &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),])
3621 );
3622 }
3623
3624 #[test]
3625 fn test_thread_worktree_paths_mismatched_lengths_returns_error() {
3626 let folder = PathList::new(&[
3627 Path::new("/worktrees/selectric/zed"),
3628 Path::new("/projects/cloud"),
3629 ]);
3630 let main = PathList::new(&[Path::new("/projects/zed")]);
3631
3632 let result = WorktreePaths::from_path_lists(main, folder);
3633 assert!(result.is_err());
3634 }
3635
3636 /// Regression test: archiving a thread created in a git worktree must
3637 /// preserve the thread's folder paths so that restoring it later does
3638 /// not prompt the user to re-associate a project.
3639 #[gpui::test]
3640 async fn test_archived_thread_retains_paths_after_worktree_removal(cx: &mut TestAppContext) {
3641 init_test(cx);
3642
3643 let fs = FakeFs::new(cx.executor());
3644 fs.insert_tree(
3645 "/worktrees/feature",
3646 serde_json::json!({ "src": { "main.rs": "" } }),
3647 )
3648 .await;
3649 let project = Project::test(fs, [Path::new("/worktrees/feature")], cx).await;
3650 let connection = StubAgentConnection::new();
3651
3652 let (panel, mut vcx) = setup_panel_with_project(project.clone(), cx);
3653 crate::test_support::open_thread_with_connection(&panel, connection, &mut vcx);
3654
3655 let thread = panel.read_with(&vcx, |panel, cx| panel.active_agent_thread(cx).unwrap());
3656 let thread_id = crate::test_support::active_thread_id(&panel, &vcx);
3657
3658 // Push content so the event handler saves metadata with the
3659 // project's worktree paths.
3660 thread.update_in(&mut vcx, |thread, _window, cx| {
3661 thread.push_user_content_block(None, "Hello".into(), cx);
3662 });
3663 vcx.run_until_parked();
3664
3665 // Verify paths were saved correctly.
3666 let (folder_paths_before, main_paths_before) = cx.read(|cx| {
3667 let store = ThreadMetadataStore::global(cx).read(cx);
3668 let entry = store.entry(thread_id).unwrap();
3669 assert!(
3670 !entry.folder_paths().is_empty(),
3671 "thread should have folder paths before archiving"
3672 );
3673 (
3674 entry.folder_paths().clone(),
3675 entry.main_worktree_paths().clone(),
3676 )
3677 });
3678
3679 // Archive the thread.
3680 cx.update(|cx| {
3681 ThreadMetadataStore::global(cx).update(cx, |store, cx| {
3682 store.archive(thread_id, None, cx);
3683 });
3684 });
3685 cx.run_until_parked();
3686
3687 // Remove the worktree from the project, simulating what the
3688 // archive flow does for linked git worktrees.
3689 let worktree_id = cx.update(|cx| {
3690 project
3691 .read(cx)
3692 .visible_worktrees(cx)
3693 .next()
3694 .unwrap()
3695 .read(cx)
3696 .id()
3697 });
3698 project.update(cx, |project, cx| {
3699 project.remove_worktree(worktree_id, cx);
3700 });
3701 cx.run_until_parked();
3702
3703 // Trigger a thread event after archiving + worktree removal.
3704 // In production this happens when an async title-generation task
3705 // completes after the thread was archived.
3706 thread.update_in(&mut vcx, |thread, _window, cx| {
3707 thread.set_title("Generated title".into(), cx).detach();
3708 });
3709 vcx.run_until_parked();
3710
3711 // The archived thread must still have its original folder paths.
3712 cx.read(|cx| {
3713 let store = ThreadMetadataStore::global(cx).read(cx);
3714 let entry = store.entry(thread_id).unwrap();
3715 assert!(entry.archived, "thread should still be archived");
3716 assert_eq!(
3717 entry.display_title().as_ref(),
3718 "Generated title",
3719 "title should still be updated for archived threads"
3720 );
3721 assert_eq!(
3722 entry.folder_paths(),
3723 &folder_paths_before,
3724 "archived thread must retain its folder paths after worktree \
3725 removal + subsequent thread event, otherwise restoring it \
3726 will prompt the user to re-associate a project"
3727 );
3728 assert_eq!(
3729 entry.main_worktree_paths(),
3730 &main_paths_before,
3731 "archived thread must retain its main worktree paths after \
3732 worktree removal + subsequent thread event"
3733 );
3734 });
3735 }
3736}