1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use acp_thread::AcpThreadEvent;
7use agent::{ThreadStore, ZED_AGENT_ID};
8use agent_client_protocol as acp;
9use anyhow::Context as _;
10use chrono::{DateTime, Utc};
11use collections::{HashMap, HashSet};
12use db::{
13 sqlez::{
14 bindable::Column, domain::Domain, statement::Statement,
15 thread_safe_connection::ThreadSafeConnection,
16 },
17 sqlez_macros::sql,
18};
19use futures::{FutureExt as _, future::Shared};
20use gpui::{AppContext as _, Entity, Global, Subscription, Task};
21use project::AgentId;
22use ui::{App, Context, SharedString};
23use util::ResultExt as _;
24use workspace::PathList;
25
26use crate::DEFAULT_THREAD_TITLE;
27
28pub fn init(cx: &mut App) {
29 ThreadMetadataStore::init_global(cx);
30 migrate_thread_metadata(cx);
31}
32
33/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
34/// We skip migrating threads that do not have a project.
35///
36/// TODO: Remove this after N weeks of shipping the sidebar
37fn migrate_thread_metadata(cx: &mut App) {
38 let store = ThreadMetadataStore::global(cx);
39 let db = store.read(cx).db.clone();
40
41 cx.spawn(async move |cx| {
42 let existing_entries = db.list_ids()?.into_iter().collect::<HashSet<_>>();
43
44 let is_first_migration = existing_entries.is_empty();
45
46 let mut to_migrate = store.read_with(cx, |_store, cx| {
47 ThreadStore::global(cx)
48 .read(cx)
49 .entries()
50 .filter_map(|entry| {
51 if existing_entries.contains(&entry.id.0) {
52 return None;
53 }
54
55 Some(ThreadMetadata {
56 session_id: entry.id,
57 agent_id: ZED_AGENT_ID.clone(),
58 title: entry.title,
59 updated_at: entry.updated_at,
60 created_at: entry.created_at,
61 folder_paths: entry.folder_paths,
62 main_worktree_paths: PathList::default(),
63 archived: true,
64 })
65 })
66 .collect::<Vec<_>>()
67 });
68
69 if to_migrate.is_empty() {
70 return anyhow::Ok(());
71 }
72
73 // On the first migration (no entries in DB yet), keep the 5 most
74 // recent threads per project unarchived.
75 if is_first_migration {
76 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
77 for entry in &mut to_migrate {
78 if entry.folder_paths.is_empty() {
79 continue;
80 }
81 per_project
82 .entry(entry.folder_paths.clone())
83 .or_default()
84 .push(entry);
85 }
86 for entries in per_project.values_mut() {
87 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
88 for entry in entries.iter_mut().take(5) {
89 entry.archived = false;
90 }
91 }
92 }
93
94 log::info!("Migrating {} thread store entries", to_migrate.len());
95
96 // Manually save each entry to the database and call reload, otherwise
97 // we'll end up triggering lots of reloads after each save
98 for entry in to_migrate {
99 db.save(entry).await?;
100 }
101
102 log::info!("Finished migrating thread store entries");
103
104 let _ = store.update(cx, |store, cx| store.reload(cx));
105 anyhow::Ok(())
106 })
107 .detach_and_log_err(cx);
108}
109
110struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
111impl Global for GlobalThreadMetadataStore {}
112
113/// Lightweight metadata for any thread (native or ACP), enough to populate
114/// the sidebar list and route to the correct load path when clicked.
115#[derive(Debug, Clone, PartialEq)]
116pub struct ThreadMetadata {
117 pub session_id: acp::SessionId,
118 pub agent_id: AgentId,
119 pub title: SharedString,
120 pub updated_at: DateTime<Utc>,
121 pub created_at: Option<DateTime<Utc>>,
122 pub folder_paths: PathList,
123 pub main_worktree_paths: PathList,
124 pub archived: bool,
125}
126
127impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
128 fn from(meta: &ThreadMetadata) -> Self {
129 Self {
130 session_id: meta.session_id.clone(),
131 work_dirs: Some(meta.folder_paths.clone()),
132 title: Some(meta.title.clone()),
133 updated_at: Some(meta.updated_at),
134 created_at: meta.created_at,
135 meta: None,
136 }
137 }
138}
139
140/// Record of a git worktree that was archived (deleted from disk) when its
141/// last thread was archived.
142pub struct ArchivedGitWorktree {
143 /// Auto-incrementing primary key.
144 pub id: i64,
145 /// Absolute path to the directory of the worktree before it was deleted.
146 /// Used when restoring, to put the recreated worktree back where it was.
147 /// If the path already exists on disk, the worktree is assumed to be
148 /// already restored and is used as-is.
149 pub worktree_path: PathBuf,
150 /// Absolute path of the main repository ("main worktree") that owned this worktree.
151 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
152 /// If the main repo isn't found on disk, unarchiving fails because we only store
153 /// commit hashes, and without the actual git repo being available, we can't restore
154 /// the files.
155 pub main_repo_path: PathBuf,
156 /// Branch that was checked out in the worktree at archive time. `None` if
157 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
158 /// could happen if the user made a detached one outside of Zed.
159 /// On restore, we try to switch to this branch. If that fails (e.g. it's
160 /// checked out elsewhere), we auto-generate a new one.
161 pub branch_name: Option<String>,
162 /// SHA of the WIP commit that captures files that were staged (but not yet
163 /// committed) at the time of archiving. This commit can be empty if the
164 /// user had no staged files at the time. It sits directly on top of whatever
165 /// the user's last actual commit was.
166 pub staged_commit_hash: String,
167 /// SHA of the WIP commit that captures files that were unstaged (including
168 /// untracked) at the time of archiving. This commit can be empty if the user
169 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
170 /// After doing `git reset` past both of these commits, we're back in the state
171 /// we had before archiving, including what was staged, what was unstaged, and
172 /// what was committed.
173 pub unstaged_commit_hash: String,
174 /// SHA of the commit that HEAD pointed at before we created the two WIP
175 /// commits during archival. After resetting past the WIP commits during
176 /// restore, HEAD should land back on this commit. It also serves as a
177 /// pre-restore sanity check (abort if this commit no longer exists in the
178 /// repo) and as a fallback target if the WIP resets fail.
179 pub original_commit_hash: String,
180}
181
182/// The store holds all metadata needed to show threads in the sidebar/the archive.
183///
184/// Automatically listens to AcpThread events and updates metadata if it has changed.
185pub struct ThreadMetadataStore {
186 db: ThreadMetadataDb,
187 threads: HashMap<acp::SessionId, ThreadMetadata>,
188 threads_by_paths: HashMap<PathList, HashSet<acp::SessionId>>,
189 threads_by_main_paths: HashMap<PathList, HashSet<acp::SessionId>>,
190 reload_task: Option<Shared<Task<()>>>,
191 session_subscriptions: HashMap<acp::SessionId, Subscription>,
192 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
193 in_flight_archives: HashMap<acp::SessionId, (Task<()>, smol::channel::Sender<()>)>,
194 _db_operations_task: Task<()>,
195}
196
197#[derive(Debug, PartialEq)]
198enum DbOperation {
199 Upsert(ThreadMetadata),
200 Delete(acp::SessionId),
201}
202
203impl DbOperation {
204 fn id(&self) -> &acp::SessionId {
205 match self {
206 DbOperation::Upsert(thread) => &thread.session_id,
207 DbOperation::Delete(session_id) => session_id,
208 }
209 }
210}
211
212impl ThreadMetadataStore {
213 #[cfg(not(any(test, feature = "test-support")))]
214 pub fn init_global(cx: &mut App) {
215 if cx.has_global::<Self>() {
216 return;
217 }
218
219 let db = ThreadMetadataDb::global(cx);
220 let thread_store = cx.new(|cx| Self::new(db, cx));
221 cx.set_global(GlobalThreadMetadataStore(thread_store));
222 }
223
224 #[cfg(any(test, feature = "test-support"))]
225 pub fn init_global(cx: &mut App) {
226 let thread = std::thread::current();
227 let test_name = thread.name().unwrap_or("unknown_test");
228 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
229 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
230 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
231 cx.set_global(GlobalThreadMetadataStore(thread_store));
232 }
233
234 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
235 cx.try_global::<GlobalThreadMetadataStore>()
236 .map(|store| store.0.clone())
237 }
238
239 pub fn global(cx: &App) -> Entity<Self> {
240 cx.global::<GlobalThreadMetadataStore>().0.clone()
241 }
242
243 pub fn is_empty(&self) -> bool {
244 self.threads.is_empty()
245 }
246
247 /// Returns all thread IDs.
248 pub fn entry_ids(&self) -> impl Iterator<Item = acp::SessionId> + '_ {
249 self.threads.keys().cloned()
250 }
251
252 /// Returns the metadata for a specific thread, if it exists.
253 pub fn entry(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
254 self.threads.get(session_id)
255 }
256
257 /// Returns all threads.
258 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
259 self.threads.values()
260 }
261
262 /// Returns all archived threads.
263 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
264 self.entries().filter(|t| t.archived)
265 }
266
267 /// Returns all threads for the given path list, excluding archived threads.
268 pub fn entries_for_path(
269 &self,
270 path_list: &PathList,
271 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
272 self.threads_by_paths
273 .get(path_list)
274 .into_iter()
275 .flatten()
276 .filter_map(|s| self.threads.get(s))
277 .filter(|s| !s.archived)
278 }
279
280 /// Returns threads whose `main_worktree_paths` matches the given path list,
281 /// excluding archived threads. This finds threads that were opened in a
282 /// linked worktree but are associated with the given main worktree.
283 pub fn entries_for_main_worktree_path(
284 &self,
285 path_list: &PathList,
286 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
287 self.threads_by_main_paths
288 .get(path_list)
289 .into_iter()
290 .flatten()
291 .filter_map(|s| self.threads.get(s))
292 .filter(|s| !s.archived)
293 }
294
295 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
296 let db = self.db.clone();
297 self.reload_task.take();
298
299 let list_task = cx
300 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
301
302 let reload_task = cx
303 .spawn(async move |this, cx| {
304 let Some(rows) = list_task.await.log_err() else {
305 return;
306 };
307
308 this.update(cx, |this, cx| {
309 this.threads.clear();
310 this.threads_by_paths.clear();
311 this.threads_by_main_paths.clear();
312
313 for row in rows {
314 this.threads_by_paths
315 .entry(row.folder_paths.clone())
316 .or_default()
317 .insert(row.session_id.clone());
318 if !row.main_worktree_paths.is_empty() {
319 this.threads_by_main_paths
320 .entry(row.main_worktree_paths.clone())
321 .or_default()
322 .insert(row.session_id.clone());
323 }
324 this.threads.insert(row.session_id.clone(), row);
325 }
326
327 cx.notify();
328 })
329 .ok();
330 })
331 .shared();
332 self.reload_task = Some(reload_task.clone());
333 reload_task
334 }
335
336 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
337 for metadata in metadata {
338 self.save_internal(metadata);
339 }
340 cx.notify();
341 }
342
343 #[cfg(any(test, feature = "test-support"))]
344 pub fn save_manually(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
345 self.save(metadata, cx)
346 }
347
348 fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
349 self.save_internal(metadata);
350 cx.notify();
351 }
352
353 fn save_internal(&mut self, metadata: ThreadMetadata) {
354 if let Some(thread) = self.threads.get(&metadata.session_id) {
355 if thread.folder_paths != metadata.folder_paths {
356 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
357 session_ids.remove(&metadata.session_id);
358 }
359 }
360 if thread.main_worktree_paths != metadata.main_worktree_paths
361 && !thread.main_worktree_paths.is_empty()
362 {
363 if let Some(session_ids) = self
364 .threads_by_main_paths
365 .get_mut(&thread.main_worktree_paths)
366 {
367 session_ids.remove(&metadata.session_id);
368 }
369 }
370 }
371
372 self.threads
373 .insert(metadata.session_id.clone(), metadata.clone());
374
375 self.threads_by_paths
376 .entry(metadata.folder_paths.clone())
377 .or_default()
378 .insert(metadata.session_id.clone());
379
380 if !metadata.main_worktree_paths.is_empty() {
381 self.threads_by_main_paths
382 .entry(metadata.main_worktree_paths.clone())
383 .or_default()
384 .insert(metadata.session_id.clone());
385 }
386
387 self.pending_thread_ops_tx
388 .try_send(DbOperation::Upsert(metadata))
389 .log_err();
390 }
391
392 pub fn update_working_directories(
393 &mut self,
394 session_id: &acp::SessionId,
395 work_dirs: PathList,
396 cx: &mut Context<Self>,
397 ) {
398 if let Some(thread) = self.threads.get(session_id) {
399 self.save_internal(ThreadMetadata {
400 folder_paths: work_dirs,
401 ..thread.clone()
402 });
403 cx.notify();
404 }
405 }
406
407 pub fn archive(
408 &mut self,
409 session_id: &acp::SessionId,
410 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
411 cx: &mut Context<Self>,
412 ) {
413 self.update_archived(session_id, true, cx);
414
415 if let Some(job) = archive_job {
416 self.in_flight_archives.insert(session_id.clone(), job);
417 }
418 }
419
420 pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
421 self.update_archived(session_id, false, cx);
422 // Dropping the Sender triggers cancellation in the background task.
423 self.in_flight_archives.remove(session_id);
424 }
425
426 pub fn cleanup_completed_archive(&mut self, session_id: &acp::SessionId) {
427 self.in_flight_archives.remove(session_id);
428 }
429
430 /// Updates a thread's `folder_paths` after an archived worktree has been
431 /// restored to disk. The restored worktree may land at a different path
432 /// than it had before archival, so each `(old_path, new_path)` pair in
433 /// `path_replacements` is applied to the thread's stored folder paths.
434 pub fn update_restored_worktree_paths(
435 &mut self,
436 session_id: &acp::SessionId,
437 path_replacements: &[(PathBuf, PathBuf)],
438 cx: &mut Context<Self>,
439 ) {
440 if let Some(thread) = self.threads.get(session_id).cloned() {
441 let mut paths: Vec<PathBuf> = thread.folder_paths.paths().to_vec();
442 for (old_path, new_path) in path_replacements {
443 if let Some(pos) = paths.iter().position(|p| p == old_path) {
444 paths[pos] = new_path.clone();
445 }
446 }
447 let new_folder_paths = PathList::new(&paths);
448 self.save_internal(ThreadMetadata {
449 folder_paths: new_folder_paths,
450 ..thread
451 });
452 cx.notify();
453 }
454 }
455
456 pub fn complete_worktree_restore(
457 &mut self,
458 session_id: &acp::SessionId,
459 path_replacements: &[(PathBuf, PathBuf)],
460 cx: &mut Context<Self>,
461 ) {
462 if let Some(thread) = self.threads.get(session_id).cloned() {
463 let mut paths: Vec<PathBuf> = thread.folder_paths.paths().to_vec();
464 for (old_path, new_path) in path_replacements {
465 for path in &mut paths {
466 if path == old_path {
467 *path = new_path.clone();
468 }
469 }
470 }
471 let new_folder_paths = PathList::new(&paths);
472 self.save_internal(ThreadMetadata {
473 folder_paths: new_folder_paths,
474 ..thread
475 });
476 cx.notify();
477 }
478 }
479
480 pub fn create_archived_worktree(
481 &self,
482 worktree_path: String,
483 main_repo_path: String,
484 branch_name: Option<String>,
485 staged_commit_hash: String,
486 unstaged_commit_hash: String,
487 original_commit_hash: String,
488 cx: &App,
489 ) -> Task<anyhow::Result<i64>> {
490 let db = self.db.clone();
491 cx.background_spawn(async move {
492 db.create_archived_worktree(
493 worktree_path,
494 main_repo_path,
495 branch_name,
496 staged_commit_hash,
497 unstaged_commit_hash,
498 original_commit_hash,
499 )
500 .await
501 })
502 }
503
504 pub fn link_thread_to_archived_worktree(
505 &self,
506 session_id: String,
507 archived_worktree_id: i64,
508 cx: &App,
509 ) -> Task<anyhow::Result<()>> {
510 let db = self.db.clone();
511 cx.background_spawn(async move {
512 db.link_thread_to_archived_worktree(session_id, archived_worktree_id)
513 .await
514 })
515 }
516
517 pub fn get_archived_worktrees_for_thread(
518 &self,
519 session_id: String,
520 cx: &App,
521 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
522 let db = self.db.clone();
523 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(session_id).await })
524 }
525
526 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
527 let db = self.db.clone();
528 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
529 }
530
531 pub fn unlink_thread_from_all_archived_worktrees(
532 &self,
533 session_id: String,
534 cx: &App,
535 ) -> Task<anyhow::Result<()>> {
536 let db = self.db.clone();
537 cx.background_spawn(async move {
538 db.unlink_thread_from_all_archived_worktrees(session_id)
539 .await
540 })
541 }
542
543 pub fn is_archived_worktree_referenced(
544 &self,
545 archived_worktree_id: i64,
546 cx: &App,
547 ) -> Task<anyhow::Result<bool>> {
548 let db = self.db.clone();
549 cx.background_spawn(async move {
550 db.is_archived_worktree_referenced(archived_worktree_id)
551 .await
552 })
553 }
554
555 fn update_archived(
556 &mut self,
557 session_id: &acp::SessionId,
558 archived: bool,
559 cx: &mut Context<Self>,
560 ) {
561 if let Some(thread) = self.threads.get(session_id) {
562 self.save_internal(ThreadMetadata {
563 archived,
564 ..thread.clone()
565 });
566 cx.notify();
567 }
568 }
569
570 pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context<Self>) {
571 if let Some(thread) = self.threads.get(&session_id) {
572 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
573 session_ids.remove(&session_id);
574 }
575 if !thread.main_worktree_paths.is_empty() {
576 if let Some(session_ids) = self
577 .threads_by_main_paths
578 .get_mut(&thread.main_worktree_paths)
579 {
580 session_ids.remove(&session_id);
581 }
582 }
583 }
584 self.threads.remove(&session_id);
585 self.pending_thread_ops_tx
586 .try_send(DbOperation::Delete(session_id))
587 .log_err();
588 cx.notify();
589 }
590
591 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
592 let weak_store = cx.weak_entity();
593
594 cx.observe_new::<acp_thread::AcpThread>(move |thread, _window, cx| {
595 // Don't track subagent threads in the sidebar.
596 if thread.parent_session_id().is_some() {
597 return;
598 }
599
600 let thread_entity = cx.entity();
601
602 cx.on_release({
603 let weak_store = weak_store.clone();
604 move |thread, cx| {
605 weak_store
606 .update(cx, |store, _cx| {
607 let session_id = thread.session_id().clone();
608 store.session_subscriptions.remove(&session_id);
609 })
610 .ok();
611 }
612 })
613 .detach();
614
615 weak_store
616 .update(cx, |this, cx| {
617 let subscription = cx.subscribe(&thread_entity, Self::handle_thread_event);
618 this.session_subscriptions
619 .insert(thread.session_id().clone(), subscription);
620 })
621 .ok();
622 })
623 .detach();
624
625 let (tx, rx) = smol::channel::unbounded();
626 let _db_operations_task = cx.background_spawn({
627 let db = db.clone();
628 async move {
629 while let Ok(first_update) = rx.recv().await {
630 let mut updates = vec![first_update];
631 while let Ok(update) = rx.try_recv() {
632 updates.push(update);
633 }
634 let updates = Self::dedup_db_operations(updates);
635 for operation in updates {
636 match operation {
637 DbOperation::Upsert(metadata) => {
638 db.save(metadata).await.log_err();
639 }
640 DbOperation::Delete(session_id) => {
641 db.delete(session_id).await.log_err();
642 }
643 }
644 }
645 }
646 }
647 });
648
649 let mut this = Self {
650 db,
651 threads: HashMap::default(),
652 threads_by_paths: HashMap::default(),
653 threads_by_main_paths: HashMap::default(),
654 reload_task: None,
655 session_subscriptions: HashMap::default(),
656 pending_thread_ops_tx: tx,
657 in_flight_archives: HashMap::default(),
658 _db_operations_task,
659 };
660 let _ = this.reload(cx);
661 this
662 }
663
664 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
665 let mut ops = HashMap::default();
666 for operation in operations.into_iter().rev() {
667 if ops.contains_key(operation.id()) {
668 continue;
669 }
670 ops.insert(operation.id().clone(), operation);
671 }
672 ops.into_values().collect()
673 }
674
675 fn handle_thread_event(
676 &mut self,
677 thread: Entity<acp_thread::AcpThread>,
678 event: &AcpThreadEvent,
679 cx: &mut Context<Self>,
680 ) {
681 // Don't track subagent threads in the sidebar.
682 if thread.read(cx).parent_session_id().is_some() {
683 return;
684 }
685
686 match event {
687 AcpThreadEvent::NewEntry
688 | AcpThreadEvent::TitleUpdated
689 | AcpThreadEvent::EntryUpdated(_)
690 | AcpThreadEvent::EntriesRemoved(_)
691 | AcpThreadEvent::ToolAuthorizationRequested(_)
692 | AcpThreadEvent::ToolAuthorizationReceived(_)
693 | AcpThreadEvent::Retry(_)
694 | AcpThreadEvent::Stopped(_)
695 | AcpThreadEvent::Error
696 | AcpThreadEvent::LoadError(_)
697 | AcpThreadEvent::Refusal
698 | AcpThreadEvent::WorkingDirectoriesUpdated => {
699 let thread_ref = thread.read(cx);
700 if thread_ref.entries().is_empty() {
701 return;
702 }
703
704 let existing_thread = self.threads.get(thread_ref.session_id());
705 let session_id = thread_ref.session_id().clone();
706 let title = thread_ref
707 .title()
708 .unwrap_or_else(|| DEFAULT_THREAD_TITLE.into());
709
710 let updated_at = Utc::now();
711
712 let created_at = existing_thread
713 .and_then(|t| t.created_at)
714 .unwrap_or_else(|| updated_at);
715
716 let agent_id = thread_ref.connection().agent_id();
717
718 let folder_paths = {
719 let project = thread_ref.project().read(cx);
720 let paths: Vec<Arc<Path>> = project
721 .visible_worktrees(cx)
722 .map(|worktree| worktree.read(cx).abs_path())
723 .collect();
724 PathList::new(&paths)
725 };
726
727 let main_worktree_paths = thread_ref
728 .project()
729 .read(cx)
730 .project_group_key(cx)
731 .path_list()
732 .clone();
733
734 // Threads without a folder path (e.g. started in an empty
735 // window) are archived by default so they don't get lost,
736 // because they won't show up in the sidebar. Users can reload
737 // them from the archive.
738 let archived = existing_thread
739 .map(|t| t.archived)
740 .unwrap_or(folder_paths.is_empty());
741
742 let metadata = ThreadMetadata {
743 session_id,
744 agent_id,
745 title,
746 created_at: Some(created_at),
747 updated_at,
748 folder_paths,
749 main_worktree_paths,
750 archived,
751 };
752
753 self.save(metadata, cx);
754 }
755 AcpThreadEvent::TokenUsageUpdated
756 | AcpThreadEvent::SubagentSpawned(_)
757 | AcpThreadEvent::PromptCapabilitiesUpdated
758 | AcpThreadEvent::AvailableCommandsUpdated(_)
759 | AcpThreadEvent::ModeUpdated(_)
760 | AcpThreadEvent::ConfigOptionsUpdated(_) => {}
761 }
762 }
763}
764
765impl Global for ThreadMetadataStore {}
766
767struct ThreadMetadataDb(ThreadSafeConnection);
768
769impl Domain for ThreadMetadataDb {
770 const NAME: &str = stringify!(ThreadMetadataDb);
771
772 const MIGRATIONS: &[&str] = &[
773 sql!(
774 CREATE TABLE IF NOT EXISTS sidebar_threads(
775 session_id TEXT PRIMARY KEY,
776 agent_id TEXT,
777 title TEXT NOT NULL,
778 updated_at TEXT NOT NULL,
779 created_at TEXT,
780 folder_paths TEXT,
781 folder_paths_order TEXT
782 ) STRICT;
783 ),
784 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
785 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
786 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
787 sql!(
788 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
789 id INTEGER PRIMARY KEY,
790 worktree_path TEXT NOT NULL,
791 main_repo_path TEXT NOT NULL,
792 branch_name TEXT,
793 staged_commit_hash TEXT,
794 unstaged_commit_hash TEXT,
795 original_commit_hash TEXT
796 ) STRICT;
797
798 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
799 session_id TEXT NOT NULL,
800 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
801 PRIMARY KEY (session_id, archived_worktree_id)
802 ) STRICT;
803 ),
804 ];
805}
806
807db::static_connection!(ThreadMetadataDb, []);
808
809impl ThreadMetadataDb {
810 pub fn list_ids(&self) -> anyhow::Result<Vec<Arc<str>>> {
811 self.select::<Arc<str>>(
812 "SELECT session_id FROM sidebar_threads \
813 ORDER BY updated_at DESC",
814 )?()
815 }
816
817 /// List all sidebar thread metadata, ordered by updated_at descending.
818 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
819 self.select::<ThreadMetadata>(
820 "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order \
821 FROM sidebar_threads \
822 ORDER BY updated_at DESC"
823 )?()
824 }
825
826 /// Upsert metadata for a thread.
827 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
828 let id = row.session_id.0.clone();
829 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
830 None
831 } else {
832 Some(row.agent_id.to_string())
833 };
834 let title = row.title.to_string();
835 let updated_at = row.updated_at.to_rfc3339();
836 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
837 let serialized = row.folder_paths.serialize();
838 let (folder_paths, folder_paths_order) = if row.folder_paths.is_empty() {
839 (None, None)
840 } else {
841 (Some(serialized.paths), Some(serialized.order))
842 };
843 let main_serialized = row.main_worktree_paths.serialize();
844 let (main_worktree_paths, main_worktree_paths_order) = if row.main_worktree_paths.is_empty()
845 {
846 (None, None)
847 } else {
848 (Some(main_serialized.paths), Some(main_serialized.order))
849 };
850 let archived = row.archived;
851
852 self.write(move |conn| {
853 let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order) \
854 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) \
855 ON CONFLICT(session_id) DO UPDATE SET \
856 agent_id = excluded.agent_id, \
857 title = excluded.title, \
858 updated_at = excluded.updated_at, \
859 created_at = excluded.created_at, \
860 folder_paths = excluded.folder_paths, \
861 folder_paths_order = excluded.folder_paths_order, \
862 archived = excluded.archived, \
863 main_worktree_paths = excluded.main_worktree_paths, \
864 main_worktree_paths_order = excluded.main_worktree_paths_order";
865 let mut stmt = Statement::prepare(conn, sql)?;
866 let mut i = stmt.bind(&id, 1)?;
867 i = stmt.bind(&agent_id, i)?;
868 i = stmt.bind(&title, i)?;
869 i = stmt.bind(&updated_at, i)?;
870 i = stmt.bind(&created_at, i)?;
871 i = stmt.bind(&folder_paths, i)?;
872 i = stmt.bind(&folder_paths_order, i)?;
873 i = stmt.bind(&archived, i)?;
874 i = stmt.bind(&main_worktree_paths, i)?;
875 stmt.bind(&main_worktree_paths_order, i)?;
876 stmt.exec()
877 })
878 .await
879 }
880
881 /// Delete metadata for a single thread.
882 pub async fn delete(&self, session_id: acp::SessionId) -> anyhow::Result<()> {
883 let id = session_id.0.clone();
884 self.write(move |conn| {
885 let mut stmt =
886 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE session_id = ?")?;
887 stmt.bind(&id, 1)?;
888 stmt.exec()
889 })
890 .await
891 }
892
893 pub async fn create_archived_worktree(
894 &self,
895 worktree_path: String,
896 main_repo_path: String,
897 branch_name: Option<String>,
898 staged_commit_hash: String,
899 unstaged_commit_hash: String,
900 original_commit_hash: String,
901 ) -> anyhow::Result<i64> {
902 self.write(move |conn| {
903 let mut stmt = Statement::prepare(
904 conn,
905 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
906 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
907 RETURNING id",
908 )?;
909 let mut i = stmt.bind(&worktree_path, 1)?;
910 i = stmt.bind(&main_repo_path, i)?;
911 i = stmt.bind(&branch_name, i)?;
912 i = stmt.bind(&staged_commit_hash, i)?;
913 i = stmt.bind(&unstaged_commit_hash, i)?;
914 stmt.bind(&original_commit_hash, i)?;
915 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
916 })
917 .await
918 }
919
920 pub async fn link_thread_to_archived_worktree(
921 &self,
922 session_id: String,
923 archived_worktree_id: i64,
924 ) -> anyhow::Result<()> {
925 self.write(move |conn| {
926 let mut stmt = Statement::prepare(
927 conn,
928 "INSERT INTO thread_archived_worktrees(session_id, archived_worktree_id) \
929 VALUES (?1, ?2)",
930 )?;
931 let i = stmt.bind(&session_id, 1)?;
932 stmt.bind(&archived_worktree_id, i)?;
933 stmt.exec()
934 })
935 .await
936 }
937
938 pub async fn get_archived_worktrees_for_thread(
939 &self,
940 session_id: String,
941 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
942 self.select_bound::<String, ArchivedGitWorktree>(
943 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
944 FROM archived_git_worktrees a \
945 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
946 WHERE t.session_id = ?1",
947 )?(session_id)
948 }
949
950 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
951 self.write(move |conn| {
952 let mut stmt = Statement::prepare(
953 conn,
954 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
955 )?;
956 stmt.bind(&id, 1)?;
957 stmt.exec()?;
958
959 let mut stmt =
960 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
961 stmt.bind(&id, 1)?;
962 stmt.exec()
963 })
964 .await
965 }
966
967 pub async fn unlink_thread_from_all_archived_worktrees(
968 &self,
969 session_id: String,
970 ) -> anyhow::Result<()> {
971 self.write(move |conn| {
972 let mut stmt = Statement::prepare(
973 conn,
974 "DELETE FROM thread_archived_worktrees WHERE session_id = ?",
975 )?;
976 stmt.bind(&session_id, 1)?;
977 stmt.exec()
978 })
979 .await
980 }
981
982 pub async fn is_archived_worktree_referenced(
983 &self,
984 archived_worktree_id: i64,
985 ) -> anyhow::Result<bool> {
986 self.select_row_bound::<i64, i64>(
987 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
988 )?(archived_worktree_id)
989 .map(|count| count.unwrap_or(0) > 0)
990 }
991}
992
993impl Column for ThreadMetadata {
994 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
995 let (id, next): (Arc<str>, i32) = Column::column(statement, start_index)?;
996 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
997 let (title, next): (String, i32) = Column::column(statement, next)?;
998 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
999 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1000 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1001 let (folder_paths_order_str, next): (Option<String>, i32) =
1002 Column::column(statement, next)?;
1003 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1004 let (main_worktree_paths_str, next): (Option<String>, i32) =
1005 Column::column(statement, next)?;
1006 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1007 Column::column(statement, next)?;
1008
1009 let agent_id = agent_id
1010 .map(|id| AgentId::new(id))
1011 .unwrap_or(ZED_AGENT_ID.clone());
1012
1013 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1014 let created_at = created_at_str
1015 .as_deref()
1016 .map(DateTime::parse_from_rfc3339)
1017 .transpose()?
1018 .map(|dt| dt.with_timezone(&Utc));
1019
1020 let folder_paths = folder_paths_str
1021 .map(|paths| {
1022 PathList::deserialize(&util::path_list::SerializedPathList {
1023 paths,
1024 order: folder_paths_order_str.unwrap_or_default(),
1025 })
1026 })
1027 .unwrap_or_default();
1028
1029 let main_worktree_paths = main_worktree_paths_str
1030 .map(|paths| {
1031 PathList::deserialize(&util::path_list::SerializedPathList {
1032 paths,
1033 order: main_worktree_paths_order_str.unwrap_or_default(),
1034 })
1035 })
1036 .unwrap_or_default();
1037
1038 Ok((
1039 ThreadMetadata {
1040 session_id: acp::SessionId::new(id),
1041 agent_id,
1042 title: title.into(),
1043 updated_at,
1044 created_at,
1045 folder_paths,
1046 main_worktree_paths,
1047 archived,
1048 },
1049 next,
1050 ))
1051 }
1052}
1053
1054impl Column for ArchivedGitWorktree {
1055 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1056 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1057 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1058 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1059 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1060 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1061 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1062 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1063
1064 Ok((
1065 ArchivedGitWorktree {
1066 id,
1067 worktree_path: PathBuf::from(worktree_path_str),
1068 main_repo_path: PathBuf::from(main_repo_path_str),
1069 branch_name,
1070 staged_commit_hash,
1071 unstaged_commit_hash,
1072 original_commit_hash,
1073 },
1074 next,
1075 ))
1076 }
1077}
1078
1079#[cfg(test)]
1080mod tests {
1081 use super::*;
1082 use acp_thread::{AgentConnection, StubAgentConnection};
1083 use action_log::ActionLog;
1084 use agent::DbThread;
1085 use agent_client_protocol as acp;
1086
1087 use gpui::TestAppContext;
1088 use project::FakeFs;
1089 use project::Project;
1090 use std::path::Path;
1091 use std::rc::Rc;
1092
1093 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1094 DbThread {
1095 title: title.to_string().into(),
1096 messages: Vec::new(),
1097 updated_at,
1098 detailed_summary: None,
1099 initial_project_snapshot: None,
1100 cumulative_token_usage: Default::default(),
1101 request_token_usage: Default::default(),
1102 model: None,
1103 profile: None,
1104 imported: false,
1105 subagent_context: None,
1106 speed: None,
1107 thinking_enabled: false,
1108 thinking_effort: None,
1109 draft_prompt: None,
1110 ui_scroll_position: None,
1111 }
1112 }
1113
1114 fn make_metadata(
1115 session_id: &str,
1116 title: &str,
1117 updated_at: DateTime<Utc>,
1118 folder_paths: PathList,
1119 ) -> ThreadMetadata {
1120 ThreadMetadata {
1121 archived: false,
1122 session_id: acp::SessionId::new(session_id),
1123 agent_id: agent::ZED_AGENT_ID.clone(),
1124 title: title.to_string().into(),
1125 updated_at,
1126 created_at: Some(updated_at),
1127 folder_paths,
1128 main_worktree_paths: PathList::default(),
1129 }
1130 }
1131
1132 fn init_test(cx: &mut TestAppContext) {
1133 cx.update(|cx| {
1134 let settings_store = settings::SettingsStore::test(cx);
1135 cx.set_global(settings_store);
1136 ThreadMetadataStore::init_global(cx);
1137 ThreadStore::init_global(cx);
1138 });
1139 cx.run_until_parked();
1140 }
1141
1142 #[gpui::test]
1143 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1144 let first_paths = PathList::new(&[Path::new("/project-a")]);
1145 let second_paths = PathList::new(&[Path::new("/project-b")]);
1146 let now = Utc::now();
1147 let older = now - chrono::Duration::seconds(1);
1148
1149 let thread = std::thread::current();
1150 let test_name = thread.name().unwrap_or("unknown_test");
1151 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1152 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1153 &db_name,
1154 )));
1155
1156 db.save(make_metadata(
1157 "session-1",
1158 "First Thread",
1159 now,
1160 first_paths.clone(),
1161 ))
1162 .await
1163 .unwrap();
1164 db.save(make_metadata(
1165 "session-2",
1166 "Second Thread",
1167 older,
1168 second_paths.clone(),
1169 ))
1170 .await
1171 .unwrap();
1172
1173 cx.update(|cx| {
1174 let settings_store = settings::SettingsStore::test(cx);
1175 cx.set_global(settings_store);
1176 ThreadMetadataStore::init_global(cx);
1177 });
1178
1179 cx.run_until_parked();
1180
1181 cx.update(|cx| {
1182 let store = ThreadMetadataStore::global(cx);
1183 let store = store.read(cx);
1184
1185 let entry_ids = store
1186 .entry_ids()
1187 .map(|session_id| session_id.0.to_string())
1188 .collect::<Vec<_>>();
1189 assert_eq!(entry_ids.len(), 2);
1190 assert!(entry_ids.contains(&"session-1".to_string()));
1191 assert!(entry_ids.contains(&"session-2".to_string()));
1192
1193 let first_path_entries = store
1194 .entries_for_path(&first_paths)
1195 .map(|entry| entry.session_id.0.to_string())
1196 .collect::<Vec<_>>();
1197 assert_eq!(first_path_entries, vec!["session-1"]);
1198
1199 let second_path_entries = store
1200 .entries_for_path(&second_paths)
1201 .map(|entry| entry.session_id.0.to_string())
1202 .collect::<Vec<_>>();
1203 assert_eq!(second_path_entries, vec!["session-2"]);
1204 });
1205 }
1206
1207 #[gpui::test]
1208 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1209 init_test(cx);
1210
1211 let first_paths = PathList::new(&[Path::new("/project-a")]);
1212 let second_paths = PathList::new(&[Path::new("/project-b")]);
1213 let initial_time = Utc::now();
1214 let updated_time = initial_time + chrono::Duration::seconds(1);
1215
1216 let initial_metadata = make_metadata(
1217 "session-1",
1218 "First Thread",
1219 initial_time,
1220 first_paths.clone(),
1221 );
1222
1223 let second_metadata = make_metadata(
1224 "session-2",
1225 "Second Thread",
1226 initial_time,
1227 second_paths.clone(),
1228 );
1229
1230 cx.update(|cx| {
1231 let store = ThreadMetadataStore::global(cx);
1232 store.update(cx, |store, cx| {
1233 store.save(initial_metadata, cx);
1234 store.save(second_metadata, cx);
1235 });
1236 });
1237
1238 cx.run_until_parked();
1239
1240 cx.update(|cx| {
1241 let store = ThreadMetadataStore::global(cx);
1242 let store = store.read(cx);
1243
1244 let first_path_entries = store
1245 .entries_for_path(&first_paths)
1246 .map(|entry| entry.session_id.0.to_string())
1247 .collect::<Vec<_>>();
1248 assert_eq!(first_path_entries, vec!["session-1"]);
1249
1250 let second_path_entries = store
1251 .entries_for_path(&second_paths)
1252 .map(|entry| entry.session_id.0.to_string())
1253 .collect::<Vec<_>>();
1254 assert_eq!(second_path_entries, vec!["session-2"]);
1255 });
1256
1257 let moved_metadata = make_metadata(
1258 "session-1",
1259 "First Thread",
1260 updated_time,
1261 second_paths.clone(),
1262 );
1263
1264 cx.update(|cx| {
1265 let store = ThreadMetadataStore::global(cx);
1266 store.update(cx, |store, cx| {
1267 store.save(moved_metadata, cx);
1268 });
1269 });
1270
1271 cx.run_until_parked();
1272
1273 cx.update(|cx| {
1274 let store = ThreadMetadataStore::global(cx);
1275 let store = store.read(cx);
1276
1277 let entry_ids = store
1278 .entry_ids()
1279 .map(|session_id| session_id.0.to_string())
1280 .collect::<Vec<_>>();
1281 assert_eq!(entry_ids.len(), 2);
1282 assert!(entry_ids.contains(&"session-1".to_string()));
1283 assert!(entry_ids.contains(&"session-2".to_string()));
1284
1285 let first_path_entries = store
1286 .entries_for_path(&first_paths)
1287 .map(|entry| entry.session_id.0.to_string())
1288 .collect::<Vec<_>>();
1289 assert!(first_path_entries.is_empty());
1290
1291 let second_path_entries = store
1292 .entries_for_path(&second_paths)
1293 .map(|entry| entry.session_id.0.to_string())
1294 .collect::<Vec<_>>();
1295 assert_eq!(second_path_entries.len(), 2);
1296 assert!(second_path_entries.contains(&"session-1".to_string()));
1297 assert!(second_path_entries.contains(&"session-2".to_string()));
1298 });
1299
1300 cx.update(|cx| {
1301 let store = ThreadMetadataStore::global(cx);
1302 store.update(cx, |store, cx| {
1303 store.delete(acp::SessionId::new("session-2"), cx);
1304 });
1305 });
1306
1307 cx.run_until_parked();
1308
1309 cx.update(|cx| {
1310 let store = ThreadMetadataStore::global(cx);
1311 let store = store.read(cx);
1312
1313 let entry_ids = store
1314 .entry_ids()
1315 .map(|session_id| session_id.0.to_string())
1316 .collect::<Vec<_>>();
1317 assert_eq!(entry_ids, vec!["session-1"]);
1318
1319 let second_path_entries = store
1320 .entries_for_path(&second_paths)
1321 .map(|entry| entry.session_id.0.to_string())
1322 .collect::<Vec<_>>();
1323 assert_eq!(second_path_entries, vec!["session-1"]);
1324 });
1325 }
1326
1327 #[gpui::test]
1328 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1329 init_test(cx);
1330
1331 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1332 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1333 let now = Utc::now();
1334
1335 let existing_metadata = ThreadMetadata {
1336 session_id: acp::SessionId::new("a-session-0"),
1337 agent_id: agent::ZED_AGENT_ID.clone(),
1338 title: "Existing Metadata".into(),
1339 updated_at: now - chrono::Duration::seconds(10),
1340 created_at: Some(now - chrono::Duration::seconds(10)),
1341 folder_paths: project_a_paths.clone(),
1342 main_worktree_paths: PathList::default(),
1343 archived: false,
1344 };
1345
1346 cx.update(|cx| {
1347 let store = ThreadMetadataStore::global(cx);
1348 store.update(cx, |store, cx| {
1349 store.save(existing_metadata, cx);
1350 });
1351 });
1352 cx.run_until_parked();
1353
1354 let threads_to_save = vec![
1355 (
1356 "a-session-0",
1357 "Thread A0 From Native Store",
1358 project_a_paths.clone(),
1359 now,
1360 ),
1361 (
1362 "a-session-1",
1363 "Thread A1",
1364 project_a_paths.clone(),
1365 now + chrono::Duration::seconds(1),
1366 ),
1367 (
1368 "b-session-0",
1369 "Thread B0",
1370 project_b_paths.clone(),
1371 now + chrono::Duration::seconds(2),
1372 ),
1373 (
1374 "projectless",
1375 "Projectless",
1376 PathList::default(),
1377 now + chrono::Duration::seconds(3),
1378 ),
1379 ];
1380
1381 for (session_id, title, paths, updated_at) in &threads_to_save {
1382 let save_task = cx.update(|cx| {
1383 let thread_store = ThreadStore::global(cx);
1384 let session_id = session_id.to_string();
1385 let title = title.to_string();
1386 let paths = paths.clone();
1387 thread_store.update(cx, |store, cx| {
1388 store.save_thread(
1389 acp::SessionId::new(session_id),
1390 make_db_thread(&title, *updated_at),
1391 paths,
1392 cx,
1393 )
1394 })
1395 });
1396 save_task.await.unwrap();
1397 cx.run_until_parked();
1398 }
1399
1400 cx.update(|cx| migrate_thread_metadata(cx));
1401 cx.run_until_parked();
1402
1403 let list = cx.update(|cx| {
1404 let store = ThreadMetadataStore::global(cx);
1405 store.read(cx).entries().cloned().collect::<Vec<_>>()
1406 });
1407
1408 assert_eq!(list.len(), 4);
1409 assert!(
1410 list.iter()
1411 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1412 );
1413
1414 let existing_metadata = list
1415 .iter()
1416 .find(|metadata| metadata.session_id.0.as_ref() == "a-session-0")
1417 .unwrap();
1418 assert_eq!(existing_metadata.title.as_ref(), "Existing Metadata");
1419 assert!(!existing_metadata.archived);
1420
1421 let migrated_session_ids = list
1422 .iter()
1423 .map(|metadata| metadata.session_id.0.as_ref())
1424 .collect::<Vec<_>>();
1425 assert!(migrated_session_ids.contains(&"a-session-1"));
1426 assert!(migrated_session_ids.contains(&"b-session-0"));
1427 assert!(migrated_session_ids.contains(&"projectless"));
1428
1429 let migrated_entries = list
1430 .iter()
1431 .filter(|metadata| metadata.session_id.0.as_ref() != "a-session-0")
1432 .collect::<Vec<_>>();
1433 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1434 }
1435
1436 #[gpui::test]
1437 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1438 cx: &mut TestAppContext,
1439 ) {
1440 init_test(cx);
1441
1442 let project_paths = PathList::new(&[Path::new("/project-a")]);
1443 let existing_updated_at = Utc::now();
1444
1445 let existing_metadata = ThreadMetadata {
1446 session_id: acp::SessionId::new("existing-session"),
1447 agent_id: agent::ZED_AGENT_ID.clone(),
1448 title: "Existing Metadata".into(),
1449 updated_at: existing_updated_at,
1450 created_at: Some(existing_updated_at),
1451 folder_paths: project_paths.clone(),
1452 main_worktree_paths: PathList::default(),
1453 archived: false,
1454 };
1455
1456 cx.update(|cx| {
1457 let store = ThreadMetadataStore::global(cx);
1458 store.update(cx, |store, cx| {
1459 store.save(existing_metadata, cx);
1460 });
1461 });
1462 cx.run_until_parked();
1463
1464 let save_task = cx.update(|cx| {
1465 let thread_store = ThreadStore::global(cx);
1466 thread_store.update(cx, |store, cx| {
1467 store.save_thread(
1468 acp::SessionId::new("existing-session"),
1469 make_db_thread(
1470 "Updated Native Thread Title",
1471 existing_updated_at + chrono::Duration::seconds(1),
1472 ),
1473 project_paths.clone(),
1474 cx,
1475 )
1476 })
1477 });
1478 save_task.await.unwrap();
1479 cx.run_until_parked();
1480
1481 cx.update(|cx| migrate_thread_metadata(cx));
1482 cx.run_until_parked();
1483
1484 let list = cx.update(|cx| {
1485 let store = ThreadMetadataStore::global(cx);
1486 store.read(cx).entries().cloned().collect::<Vec<_>>()
1487 });
1488
1489 assert_eq!(list.len(), 1);
1490 assert_eq!(list[0].session_id.0.as_ref(), "existing-session");
1491 }
1492
1493 #[gpui::test]
1494 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
1495 cx: &mut TestAppContext,
1496 ) {
1497 init_test(cx);
1498
1499 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1500 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1501 let now = Utc::now();
1502
1503 // Create 7 threads for project A and 3 for project B
1504 let mut threads_to_save = Vec::new();
1505 for i in 0..7 {
1506 threads_to_save.push((
1507 format!("a-session-{i}"),
1508 format!("Thread A{i}"),
1509 project_a_paths.clone(),
1510 now + chrono::Duration::seconds(i as i64),
1511 ));
1512 }
1513 for i in 0..3 {
1514 threads_to_save.push((
1515 format!("b-session-{i}"),
1516 format!("Thread B{i}"),
1517 project_b_paths.clone(),
1518 now + chrono::Duration::seconds(i as i64),
1519 ));
1520 }
1521
1522 for (session_id, title, paths, updated_at) in &threads_to_save {
1523 let save_task = cx.update(|cx| {
1524 let thread_store = ThreadStore::global(cx);
1525 let session_id = session_id.to_string();
1526 let title = title.to_string();
1527 let paths = paths.clone();
1528 thread_store.update(cx, |store, cx| {
1529 store.save_thread(
1530 acp::SessionId::new(session_id),
1531 make_db_thread(&title, *updated_at),
1532 paths,
1533 cx,
1534 )
1535 })
1536 });
1537 save_task.await.unwrap();
1538 cx.run_until_parked();
1539 }
1540
1541 cx.update(|cx| migrate_thread_metadata(cx));
1542 cx.run_until_parked();
1543
1544 let list = cx.update(|cx| {
1545 let store = ThreadMetadataStore::global(cx);
1546 store.read(cx).entries().cloned().collect::<Vec<_>>()
1547 });
1548
1549 assert_eq!(list.len(), 10);
1550
1551 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
1552 let mut project_a_entries: Vec<_> = list
1553 .iter()
1554 .filter(|m| m.folder_paths == project_a_paths)
1555 .collect();
1556 assert_eq!(project_a_entries.len(), 7);
1557 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
1558
1559 for entry in &project_a_entries[..5] {
1560 assert!(
1561 !entry.archived,
1562 "Expected {} to be unarchived (top 5 most recent)",
1563 entry.session_id.0
1564 );
1565 }
1566 for entry in &project_a_entries[5..] {
1567 assert!(
1568 entry.archived,
1569 "Expected {} to be archived (older than top 5)",
1570 entry.session_id.0
1571 );
1572 }
1573
1574 // Project B: all 3 should be unarchived (under the limit)
1575 let project_b_entries: Vec<_> = list
1576 .iter()
1577 .filter(|m| m.folder_paths == project_b_paths)
1578 .collect();
1579 assert_eq!(project_b_entries.len(), 3);
1580 assert!(project_b_entries.iter().all(|m| !m.archived));
1581 }
1582
1583 #[gpui::test]
1584 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
1585 init_test(cx);
1586
1587 let fs = FakeFs::new(cx.executor());
1588 let project = Project::test(fs, None::<&Path>, cx).await;
1589 let connection = Rc::new(StubAgentConnection::new());
1590
1591 let thread = cx
1592 .update(|cx| {
1593 connection
1594 .clone()
1595 .new_session(project.clone(), PathList::default(), cx)
1596 })
1597 .await
1598 .unwrap();
1599 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1600
1601 cx.update(|cx| {
1602 thread.update(cx, |thread, cx| {
1603 thread.set_title("Draft Thread".into(), cx).detach();
1604 });
1605 });
1606 cx.run_until_parked();
1607
1608 let metadata_ids = cx.update(|cx| {
1609 ThreadMetadataStore::global(cx)
1610 .read(cx)
1611 .entry_ids()
1612 .collect::<Vec<_>>()
1613 });
1614 assert!(
1615 metadata_ids.is_empty(),
1616 "expected empty draft thread title updates to be ignored"
1617 );
1618
1619 cx.update(|cx| {
1620 thread.update(cx, |thread, cx| {
1621 thread.push_user_content_block(None, "Hello".into(), cx);
1622 });
1623 });
1624 cx.run_until_parked();
1625
1626 let metadata_ids = cx.update(|cx| {
1627 ThreadMetadataStore::global(cx)
1628 .read(cx)
1629 .entry_ids()
1630 .collect::<Vec<_>>()
1631 });
1632 assert_eq!(metadata_ids, vec![session_id]);
1633 }
1634
1635 #[gpui::test]
1636 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
1637 init_test(cx);
1638
1639 let fs = FakeFs::new(cx.executor());
1640 let project = Project::test(fs, None::<&Path>, cx).await;
1641 let connection = Rc::new(StubAgentConnection::new());
1642
1643 let thread = cx
1644 .update(|cx| {
1645 connection
1646 .clone()
1647 .new_session(project.clone(), PathList::default(), cx)
1648 })
1649 .await
1650 .unwrap();
1651 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1652
1653 cx.update(|cx| {
1654 thread.update(cx, |thread, cx| {
1655 thread.push_user_content_block(None, "Hello".into(), cx);
1656 });
1657 });
1658 cx.run_until_parked();
1659
1660 let metadata_ids = cx.update(|cx| {
1661 ThreadMetadataStore::global(cx)
1662 .read(cx)
1663 .entry_ids()
1664 .collect::<Vec<_>>()
1665 });
1666 assert_eq!(metadata_ids, vec![session_id.clone()]);
1667
1668 drop(thread);
1669 cx.update(|_| {});
1670 cx.run_until_parked();
1671
1672 let metadata_ids = cx.update(|cx| {
1673 ThreadMetadataStore::global(cx)
1674 .read(cx)
1675 .entry_ids()
1676 .collect::<Vec<_>>()
1677 });
1678 assert_eq!(metadata_ids, vec![session_id]);
1679 }
1680
1681 #[gpui::test]
1682 async fn test_threads_without_project_association_are_archived_by_default(
1683 cx: &mut TestAppContext,
1684 ) {
1685 init_test(cx);
1686
1687 let fs = FakeFs::new(cx.executor());
1688 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
1689 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
1690 let connection = Rc::new(StubAgentConnection::new());
1691
1692 let thread_without_worktree = cx
1693 .update(|cx| {
1694 connection.clone().new_session(
1695 project_without_worktree.clone(),
1696 PathList::default(),
1697 cx,
1698 )
1699 })
1700 .await
1701 .unwrap();
1702 let session_without_worktree =
1703 cx.read(|cx| thread_without_worktree.read(cx).session_id().clone());
1704
1705 cx.update(|cx| {
1706 thread_without_worktree.update(cx, |thread, cx| {
1707 thread.push_user_content_block(None, "content".into(), cx);
1708 thread.set_title("No Project Thread".into(), cx).detach();
1709 });
1710 });
1711 cx.run_until_parked();
1712
1713 let thread_with_worktree = cx
1714 .update(|cx| {
1715 connection.clone().new_session(
1716 project_with_worktree.clone(),
1717 PathList::default(),
1718 cx,
1719 )
1720 })
1721 .await
1722 .unwrap();
1723 let session_with_worktree =
1724 cx.read(|cx| thread_with_worktree.read(cx).session_id().clone());
1725
1726 cx.update(|cx| {
1727 thread_with_worktree.update(cx, |thread, cx| {
1728 thread.push_user_content_block(None, "content".into(), cx);
1729 thread.set_title("Project Thread".into(), cx).detach();
1730 });
1731 });
1732 cx.run_until_parked();
1733
1734 cx.update(|cx| {
1735 let store = ThreadMetadataStore::global(cx);
1736 let store = store.read(cx);
1737
1738 let without_worktree = store
1739 .entry(&session_without_worktree)
1740 .expect("missing metadata for thread without project association");
1741 assert!(without_worktree.folder_paths.is_empty());
1742 assert!(
1743 without_worktree.archived,
1744 "expected thread without project association to be archived"
1745 );
1746
1747 let with_worktree = store
1748 .entry(&session_with_worktree)
1749 .expect("missing metadata for thread with project association");
1750 assert_eq!(
1751 with_worktree.folder_paths,
1752 PathList::new(&[Path::new("/project-a")])
1753 );
1754 assert!(
1755 !with_worktree.archived,
1756 "expected thread with project association to remain unarchived"
1757 );
1758 });
1759 }
1760
1761 #[gpui::test]
1762 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
1763 init_test(cx);
1764
1765 let fs = FakeFs::new(cx.executor());
1766 let project = Project::test(fs, None::<&Path>, cx).await;
1767 let connection = Rc::new(StubAgentConnection::new());
1768
1769 // Create a regular (non-subagent) AcpThread.
1770 let regular_thread = cx
1771 .update(|cx| {
1772 connection
1773 .clone()
1774 .new_session(project.clone(), PathList::default(), cx)
1775 })
1776 .await
1777 .unwrap();
1778
1779 let regular_session_id = cx.read(|cx| regular_thread.read(cx).session_id().clone());
1780
1781 // Set a title on the regular thread to trigger a save via handle_thread_update.
1782 cx.update(|cx| {
1783 regular_thread.update(cx, |thread, cx| {
1784 thread.push_user_content_block(None, "content".into(), cx);
1785 thread.set_title("Regular Thread".into(), cx).detach();
1786 });
1787 });
1788 cx.run_until_parked();
1789
1790 // Create a subagent AcpThread
1791 let subagent_session_id = acp::SessionId::new("subagent-session");
1792 let subagent_thread = cx.update(|cx| {
1793 let action_log = cx.new(|_| ActionLog::new(project.clone()));
1794 cx.new(|cx| {
1795 acp_thread::AcpThread::new(
1796 Some(regular_session_id.clone()),
1797 Some("Subagent Thread".into()),
1798 None,
1799 connection.clone(),
1800 project.clone(),
1801 action_log,
1802 subagent_session_id.clone(),
1803 watch::Receiver::constant(acp::PromptCapabilities::new()),
1804 cx,
1805 )
1806 })
1807 });
1808
1809 // Set a title on the subagent thread to trigger handle_thread_update.
1810 cx.update(|cx| {
1811 subagent_thread.update(cx, |thread, cx| {
1812 thread
1813 .set_title("Subagent Thread Title".into(), cx)
1814 .detach();
1815 });
1816 });
1817 cx.run_until_parked();
1818
1819 // List all metadata from the store cache.
1820 let list = cx.update(|cx| {
1821 let store = ThreadMetadataStore::global(cx);
1822 store.read(cx).entries().cloned().collect::<Vec<_>>()
1823 });
1824
1825 // The subagent thread should NOT appear in the sidebar metadata.
1826 // Only the regular thread should be listed.
1827 assert_eq!(
1828 list.len(),
1829 1,
1830 "Expected only the regular thread in sidebar metadata, \
1831 but found {} entries (subagent threads are leaking into the sidebar)",
1832 list.len(),
1833 );
1834 assert_eq!(list[0].session_id, regular_session_id);
1835 assert_eq!(list[0].title.as_ref(), "Regular Thread");
1836 }
1837
1838 #[test]
1839 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
1840 let now = Utc::now();
1841
1842 let operations = vec![
1843 DbOperation::Upsert(make_metadata(
1844 "session-1",
1845 "First Thread",
1846 now,
1847 PathList::default(),
1848 )),
1849 DbOperation::Delete(acp::SessionId::new("session-1")),
1850 ];
1851
1852 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
1853
1854 assert_eq!(deduped.len(), 1);
1855 assert_eq!(
1856 deduped[0],
1857 DbOperation::Delete(acp::SessionId::new("session-1"))
1858 );
1859 }
1860
1861 #[test]
1862 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
1863 let now = Utc::now();
1864 let later = now + chrono::Duration::seconds(1);
1865
1866 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
1867 let new_metadata = make_metadata("session-1", "New Title", later, PathList::default());
1868
1869 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1870 DbOperation::Upsert(old_metadata),
1871 DbOperation::Upsert(new_metadata.clone()),
1872 ]);
1873
1874 assert_eq!(deduped.len(), 1);
1875 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
1876 }
1877
1878 #[test]
1879 fn test_dedup_db_operations_preserves_distinct_sessions() {
1880 let now = Utc::now();
1881
1882 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
1883 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
1884 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1885 DbOperation::Upsert(metadata1.clone()),
1886 DbOperation::Upsert(metadata2.clone()),
1887 ]);
1888
1889 assert_eq!(deduped.len(), 2);
1890 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
1891 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
1892 }
1893
1894 #[gpui::test]
1895 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
1896 init_test(cx);
1897
1898 let paths = PathList::new(&[Path::new("/project-a")]);
1899 let now = Utc::now();
1900 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
1901
1902 cx.update(|cx| {
1903 let store = ThreadMetadataStore::global(cx);
1904 store.update(cx, |store, cx| {
1905 store.save(metadata, cx);
1906 });
1907 });
1908
1909 cx.run_until_parked();
1910
1911 cx.update(|cx| {
1912 let store = ThreadMetadataStore::global(cx);
1913 let store = store.read(cx);
1914
1915 let path_entries = store
1916 .entries_for_path(&paths)
1917 .map(|e| e.session_id.0.to_string())
1918 .collect::<Vec<_>>();
1919 assert_eq!(path_entries, vec!["session-1"]);
1920
1921 let archived = store
1922 .archived_entries()
1923 .map(|e| e.session_id.0.to_string())
1924 .collect::<Vec<_>>();
1925 assert!(archived.is_empty());
1926 });
1927
1928 cx.update(|cx| {
1929 let store = ThreadMetadataStore::global(cx);
1930 store.update(cx, |store, cx| {
1931 store.archive(&acp::SessionId::new("session-1"), None, cx);
1932 });
1933 });
1934
1935 // Thread 1 should now be archived
1936 cx.run_until_parked();
1937
1938 cx.update(|cx| {
1939 let store = ThreadMetadataStore::global(cx);
1940 let store = store.read(cx);
1941
1942 let path_entries = store
1943 .entries_for_path(&paths)
1944 .map(|e| e.session_id.0.to_string())
1945 .collect::<Vec<_>>();
1946 assert!(path_entries.is_empty());
1947
1948 let archived = store.archived_entries().collect::<Vec<_>>();
1949 assert_eq!(archived.len(), 1);
1950 assert_eq!(archived[0].session_id.0.as_ref(), "session-1");
1951 assert!(archived[0].archived);
1952 });
1953
1954 cx.update(|cx| {
1955 let store = ThreadMetadataStore::global(cx);
1956 store.update(cx, |store, cx| {
1957 store.unarchive(&acp::SessionId::new("session-1"), cx);
1958 });
1959 });
1960
1961 cx.run_until_parked();
1962
1963 cx.update(|cx| {
1964 let store = ThreadMetadataStore::global(cx);
1965 let store = store.read(cx);
1966
1967 let path_entries = store
1968 .entries_for_path(&paths)
1969 .map(|e| e.session_id.0.to_string())
1970 .collect::<Vec<_>>();
1971 assert_eq!(path_entries, vec!["session-1"]);
1972
1973 let archived = store
1974 .archived_entries()
1975 .map(|e| e.session_id.0.to_string())
1976 .collect::<Vec<_>>();
1977 assert!(archived.is_empty());
1978 });
1979 }
1980
1981 #[gpui::test]
1982 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
1983 init_test(cx);
1984
1985 let paths = PathList::new(&[Path::new("/project-a")]);
1986 let now = Utc::now();
1987
1988 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
1989 let metadata2 = make_metadata(
1990 "session-2",
1991 "Archived Thread",
1992 now - chrono::Duration::seconds(1),
1993 paths.clone(),
1994 );
1995
1996 cx.update(|cx| {
1997 let store = ThreadMetadataStore::global(cx);
1998 store.update(cx, |store, cx| {
1999 store.save(metadata1, cx);
2000 store.save(metadata2, cx);
2001 });
2002 });
2003
2004 cx.run_until_parked();
2005
2006 cx.update(|cx| {
2007 let store = ThreadMetadataStore::global(cx);
2008 store.update(cx, |store, cx| {
2009 store.archive(&acp::SessionId::new("session-2"), None, cx);
2010 });
2011 });
2012
2013 cx.run_until_parked();
2014
2015 cx.update(|cx| {
2016 let store = ThreadMetadataStore::global(cx);
2017 let store = store.read(cx);
2018
2019 let path_entries = store
2020 .entries_for_path(&paths)
2021 .map(|e| e.session_id.0.to_string())
2022 .collect::<Vec<_>>();
2023 assert_eq!(path_entries, vec!["session-1"]);
2024
2025 let all_entries = store
2026 .entries()
2027 .map(|e| e.session_id.0.to_string())
2028 .collect::<Vec<_>>();
2029 assert_eq!(all_entries.len(), 2);
2030 assert!(all_entries.contains(&"session-1".to_string()));
2031 assert!(all_entries.contains(&"session-2".to_string()));
2032
2033 let archived = store
2034 .archived_entries()
2035 .map(|e| e.session_id.0.to_string())
2036 .collect::<Vec<_>>();
2037 assert_eq!(archived, vec!["session-2"]);
2038 });
2039 }
2040
2041 #[gpui::test]
2042 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2043 init_test(cx);
2044
2045 let paths = PathList::new(&[Path::new("/project-a")]);
2046 let now = Utc::now();
2047
2048 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2049 let m2 = make_metadata(
2050 "session-2",
2051 "Thread Two",
2052 now - chrono::Duration::seconds(1),
2053 paths.clone(),
2054 );
2055 let m3 = make_metadata(
2056 "session-3",
2057 "Thread Three",
2058 now - chrono::Duration::seconds(2),
2059 paths,
2060 );
2061
2062 cx.update(|cx| {
2063 let store = ThreadMetadataStore::global(cx);
2064 store.update(cx, |store, cx| {
2065 store.save_all(vec![m1, m2, m3], cx);
2066 });
2067 });
2068
2069 cx.run_until_parked();
2070
2071 cx.update(|cx| {
2072 let store = ThreadMetadataStore::global(cx);
2073 let store = store.read(cx);
2074
2075 let all_entries = store
2076 .entries()
2077 .map(|e| e.session_id.0.to_string())
2078 .collect::<Vec<_>>();
2079 assert_eq!(all_entries.len(), 3);
2080 assert!(all_entries.contains(&"session-1".to_string()));
2081 assert!(all_entries.contains(&"session-2".to_string()));
2082 assert!(all_entries.contains(&"session-3".to_string()));
2083
2084 let entry_ids = store.entry_ids().collect::<Vec<_>>();
2085 assert_eq!(entry_ids.len(), 3);
2086 });
2087 }
2088
2089 #[gpui::test]
2090 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2091 init_test(cx);
2092
2093 let paths = PathList::new(&[Path::new("/project-a")]);
2094 let now = Utc::now();
2095 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2096
2097 cx.update(|cx| {
2098 let store = ThreadMetadataStore::global(cx);
2099 store.update(cx, |store, cx| {
2100 store.save(metadata, cx);
2101 });
2102 });
2103
2104 cx.run_until_parked();
2105
2106 cx.update(|cx| {
2107 let store = ThreadMetadataStore::global(cx);
2108 store.update(cx, |store, cx| {
2109 store.archive(&acp::SessionId::new("session-1"), None, cx);
2110 });
2111 });
2112
2113 cx.run_until_parked();
2114
2115 cx.update(|cx| {
2116 let store = ThreadMetadataStore::global(cx);
2117 store.update(cx, |store, cx| {
2118 let _ = store.reload(cx);
2119 });
2120 });
2121
2122 cx.run_until_parked();
2123
2124 cx.update(|cx| {
2125 let store = ThreadMetadataStore::global(cx);
2126 let store = store.read(cx);
2127
2128 let thread = store
2129 .entries()
2130 .find(|e| e.session_id.0.as_ref() == "session-1")
2131 .expect("thread should exist after reload");
2132 assert!(thread.archived);
2133
2134 let path_entries = store
2135 .entries_for_path(&paths)
2136 .map(|e| e.session_id.0.to_string())
2137 .collect::<Vec<_>>();
2138 assert!(path_entries.is_empty());
2139
2140 let archived = store
2141 .archived_entries()
2142 .map(|e| e.session_id.0.to_string())
2143 .collect::<Vec<_>>();
2144 assert_eq!(archived, vec!["session-1"]);
2145 });
2146 }
2147
2148 #[gpui::test]
2149 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2150 init_test(cx);
2151
2152 cx.run_until_parked();
2153
2154 cx.update(|cx| {
2155 let store = ThreadMetadataStore::global(cx);
2156 store.update(cx, |store, cx| {
2157 store.archive(&acp::SessionId::new("nonexistent"), None, cx);
2158 });
2159 });
2160
2161 cx.run_until_parked();
2162
2163 cx.update(|cx| {
2164 let store = ThreadMetadataStore::global(cx);
2165 let store = store.read(cx);
2166
2167 assert!(store.is_empty());
2168 assert_eq!(store.entries().count(), 0);
2169 assert_eq!(store.archived_entries().count(), 0);
2170 });
2171 }
2172
2173 #[gpui::test]
2174 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2175 init_test(cx);
2176
2177 let paths = PathList::new(&[Path::new("/project-a")]);
2178 let now = Utc::now();
2179 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2180 let session_id = metadata.session_id.clone();
2181
2182 cx.update(|cx| {
2183 let store = ThreadMetadataStore::global(cx);
2184 store.update(cx, |store, cx| {
2185 store.save(metadata.clone(), cx);
2186 store.archive(&session_id, None, cx);
2187 });
2188 });
2189
2190 cx.run_until_parked();
2191
2192 cx.update(|cx| {
2193 let store = ThreadMetadataStore::global(cx);
2194 let store = store.read(cx);
2195
2196 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2197 pretty_assertions::assert_eq!(
2198 entries,
2199 vec![ThreadMetadata {
2200 archived: true,
2201 ..metadata
2202 }]
2203 );
2204 });
2205 }
2206
2207 #[gpui::test]
2208 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2209 init_test(cx);
2210 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2211
2212 let id = store
2213 .read_with(cx, |store, cx| {
2214 store.create_archived_worktree(
2215 "/tmp/worktree".to_string(),
2216 "/home/user/repo".to_string(),
2217 Some("feature-branch".to_string()),
2218 "staged_aaa".to_string(),
2219 "unstaged_bbb".to_string(),
2220 "original_000".to_string(),
2221 cx,
2222 )
2223 })
2224 .await
2225 .unwrap();
2226
2227 store
2228 .read_with(cx, |store, cx| {
2229 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2230 })
2231 .await
2232 .unwrap();
2233
2234 let worktrees = store
2235 .read_with(cx, |store, cx| {
2236 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2237 })
2238 .await
2239 .unwrap();
2240
2241 assert_eq!(worktrees.len(), 1);
2242 let wt = &worktrees[0];
2243 assert_eq!(wt.id, id);
2244 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2245 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2246 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2247 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2248 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2249 assert_eq!(wt.original_commit_hash, "original_000");
2250 }
2251
2252 #[gpui::test]
2253 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2254 init_test(cx);
2255 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2256
2257 let id = store
2258 .read_with(cx, |store, cx| {
2259 store.create_archived_worktree(
2260 "/tmp/worktree".to_string(),
2261 "/home/user/repo".to_string(),
2262 Some("main".to_string()),
2263 "deadbeef".to_string(),
2264 "deadbeef".to_string(),
2265 "original_000".to_string(),
2266 cx,
2267 )
2268 })
2269 .await
2270 .unwrap();
2271
2272 store
2273 .read_with(cx, |store, cx| {
2274 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2275 })
2276 .await
2277 .unwrap();
2278
2279 store
2280 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2281 .await
2282 .unwrap();
2283
2284 let worktrees = store
2285 .read_with(cx, |store, cx| {
2286 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2287 })
2288 .await
2289 .unwrap();
2290 assert!(worktrees.is_empty());
2291 }
2292
2293 #[gpui::test]
2294 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2295 init_test(cx);
2296 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2297
2298 let id = store
2299 .read_with(cx, |store, cx| {
2300 store.create_archived_worktree(
2301 "/tmp/worktree".to_string(),
2302 "/home/user/repo".to_string(),
2303 None,
2304 "abc123".to_string(),
2305 "abc123".to_string(),
2306 "original_000".to_string(),
2307 cx,
2308 )
2309 })
2310 .await
2311 .unwrap();
2312
2313 store
2314 .read_with(cx, |store, cx| {
2315 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2316 })
2317 .await
2318 .unwrap();
2319
2320 store
2321 .read_with(cx, |store, cx| {
2322 store.link_thread_to_archived_worktree("session-2".to_string(), id, cx)
2323 })
2324 .await
2325 .unwrap();
2326
2327 let wt1 = store
2328 .read_with(cx, |store, cx| {
2329 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2330 })
2331 .await
2332 .unwrap();
2333
2334 let wt2 = store
2335 .read_with(cx, |store, cx| {
2336 store.get_archived_worktrees_for_thread("session-2".to_string(), cx)
2337 })
2338 .await
2339 .unwrap();
2340
2341 assert_eq!(wt1.len(), 1);
2342 assert_eq!(wt2.len(), 1);
2343 assert_eq!(wt1[0].id, wt2[0].id);
2344 }
2345
2346 #[gpui::test]
2347 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
2348 init_test(cx);
2349 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2350
2351 let original_paths = PathList::new(&[
2352 Path::new("/projects/worktree-a"),
2353 Path::new("/projects/worktree-b"),
2354 Path::new("/other/unrelated"),
2355 ]);
2356 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2357
2358 store.update(cx, |store, cx| {
2359 store.save_manually(meta, cx);
2360 });
2361
2362 let replacements = vec![
2363 (
2364 PathBuf::from("/projects/worktree-a"),
2365 PathBuf::from("/restored/worktree-a"),
2366 ),
2367 (
2368 PathBuf::from("/projects/worktree-b"),
2369 PathBuf::from("/restored/worktree-b"),
2370 ),
2371 ];
2372
2373 store.update(cx, |store, cx| {
2374 store.complete_worktree_restore(
2375 &acp::SessionId::new("session-multi"),
2376 &replacements,
2377 cx,
2378 );
2379 });
2380
2381 let entry = store.read_with(cx, |store, _cx| {
2382 store.entry(&acp::SessionId::new("session-multi")).cloned()
2383 });
2384 let entry = entry.unwrap();
2385 let paths = entry.folder_paths.paths();
2386 assert_eq!(paths.len(), 3);
2387 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2388 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2389 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2390 }
2391
2392 #[gpui::test]
2393 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
2394 init_test(cx);
2395 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2396
2397 let original_paths =
2398 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2399 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2400
2401 store.update(cx, |store, cx| {
2402 store.save_manually(meta, cx);
2403 });
2404
2405 let replacements = vec![
2406 (
2407 PathBuf::from("/projects/worktree-a"),
2408 PathBuf::from("/new/worktree-a"),
2409 ),
2410 (
2411 PathBuf::from("/nonexistent/path"),
2412 PathBuf::from("/should/not/appear"),
2413 ),
2414 ];
2415
2416 store.update(cx, |store, cx| {
2417 store.complete_worktree_restore(
2418 &acp::SessionId::new("session-partial"),
2419 &replacements,
2420 cx,
2421 );
2422 });
2423
2424 let entry = store.read_with(cx, |store, _cx| {
2425 store
2426 .entry(&acp::SessionId::new("session-partial"))
2427 .cloned()
2428 });
2429 let entry = entry.unwrap();
2430 let paths = entry.folder_paths.paths();
2431 assert_eq!(paths.len(), 2);
2432 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2433 assert!(paths.contains(&PathBuf::from("/other/path")));
2434 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2435 }
2436
2437 #[gpui::test]
2438 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
2439 init_test(cx);
2440 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2441
2442 let original_paths = PathList::new(&[
2443 Path::new("/projects/worktree-a"),
2444 Path::new("/projects/worktree-b"),
2445 Path::new("/other/unrelated"),
2446 ]);
2447 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2448
2449 store.update(cx, |store, cx| {
2450 store.save_manually(meta, cx);
2451 });
2452
2453 let replacements = vec![
2454 (
2455 PathBuf::from("/projects/worktree-a"),
2456 PathBuf::from("/restored/worktree-a"),
2457 ),
2458 (
2459 PathBuf::from("/projects/worktree-b"),
2460 PathBuf::from("/restored/worktree-b"),
2461 ),
2462 ];
2463
2464 store.update(cx, |store, cx| {
2465 store.update_restored_worktree_paths(
2466 &acp::SessionId::new("session-multi"),
2467 &replacements,
2468 cx,
2469 );
2470 });
2471
2472 let entry = store.read_with(cx, |store, _cx| {
2473 store.entry(&acp::SessionId::new("session-multi")).cloned()
2474 });
2475 let entry = entry.unwrap();
2476 let paths = entry.folder_paths.paths();
2477 assert_eq!(paths.len(), 3);
2478 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2479 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2480 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2481 }
2482
2483 #[gpui::test]
2484 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
2485 init_test(cx);
2486 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2487
2488 let original_paths =
2489 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2490 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2491
2492 store.update(cx, |store, cx| {
2493 store.save_manually(meta, cx);
2494 });
2495
2496 let replacements = vec![
2497 (
2498 PathBuf::from("/projects/worktree-a"),
2499 PathBuf::from("/new/worktree-a"),
2500 ),
2501 (
2502 PathBuf::from("/nonexistent/path"),
2503 PathBuf::from("/should/not/appear"),
2504 ),
2505 ];
2506
2507 store.update(cx, |store, cx| {
2508 store.update_restored_worktree_paths(
2509 &acp::SessionId::new("session-partial"),
2510 &replacements,
2511 cx,
2512 );
2513 });
2514
2515 let entry = store.read_with(cx, |store, _cx| {
2516 store
2517 .entry(&acp::SessionId::new("session-partial"))
2518 .cloned()
2519 });
2520 let entry = entry.unwrap();
2521 let paths = entry.folder_paths.paths();
2522 assert_eq!(paths.len(), 2);
2523 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2524 assert!(paths.contains(&PathBuf::from("/other/path")));
2525 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2526 }
2527
2528 #[gpui::test]
2529 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
2530 init_test(cx);
2531 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2532
2533 let id1 = store
2534 .read_with(cx, |store, cx| {
2535 store.create_archived_worktree(
2536 "/projects/worktree-a".to_string(),
2537 "/home/user/repo".to_string(),
2538 Some("branch-a".to_string()),
2539 "staged_a".to_string(),
2540 "unstaged_a".to_string(),
2541 "original_000".to_string(),
2542 cx,
2543 )
2544 })
2545 .await
2546 .unwrap();
2547
2548 let id2 = store
2549 .read_with(cx, |store, cx| {
2550 store.create_archived_worktree(
2551 "/projects/worktree-b".to_string(),
2552 "/home/user/repo".to_string(),
2553 Some("branch-b".to_string()),
2554 "staged_b".to_string(),
2555 "unstaged_b".to_string(),
2556 "original_000".to_string(),
2557 cx,
2558 )
2559 })
2560 .await
2561 .unwrap();
2562
2563 store
2564 .read_with(cx, |store, cx| {
2565 store.link_thread_to_archived_worktree("session-1".to_string(), id1, cx)
2566 })
2567 .await
2568 .unwrap();
2569
2570 store
2571 .read_with(cx, |store, cx| {
2572 store.link_thread_to_archived_worktree("session-1".to_string(), id2, cx)
2573 })
2574 .await
2575 .unwrap();
2576
2577 let worktrees = store
2578 .read_with(cx, |store, cx| {
2579 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2580 })
2581 .await
2582 .unwrap();
2583
2584 assert_eq!(worktrees.len(), 2);
2585
2586 let paths: Vec<&Path> = worktrees
2587 .iter()
2588 .map(|w| w.worktree_path.as_path())
2589 .collect();
2590 assert!(paths.contains(&Path::new("/projects/worktree-a")));
2591 assert!(paths.contains(&Path::new("/projects/worktree-b")));
2592 }
2593}