1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use acp_thread::AcpThreadEvent;
7use agent::{ThreadStore, ZED_AGENT_ID};
8use agent_client_protocol as acp;
9use anyhow::Context as _;
10use chrono::{DateTime, Utc};
11use collections::{HashMap, HashSet};
12use db::{
13 sqlez::{
14 bindable::Column, domain::Domain, statement::Statement,
15 thread_safe_connection::ThreadSafeConnection,
16 },
17 sqlez_macros::sql,
18};
19use feature_flags::{AgentV2FeatureFlag, FeatureFlagAppExt};
20use futures::{FutureExt as _, future::Shared};
21use gpui::{AppContext as _, Entity, Global, Subscription, Task};
22use project::AgentId;
23use ui::{App, Context, SharedString};
24use util::ResultExt as _;
25use workspace::PathList;
26
27use crate::DEFAULT_THREAD_TITLE;
28
29pub fn init(cx: &mut App) {
30 ThreadMetadataStore::init_global(cx);
31
32 if cx.has_flag::<AgentV2FeatureFlag>() {
33 migrate_thread_metadata(cx);
34 }
35 cx.observe_flag::<AgentV2FeatureFlag, _>(|has_flag, cx| {
36 if has_flag {
37 migrate_thread_metadata(cx);
38 }
39 })
40 .detach();
41}
42
43/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
44/// We skip migrating threads that do not have a project.
45///
46/// TODO: Remove this after N weeks of shipping the sidebar
47fn migrate_thread_metadata(cx: &mut App) {
48 let store = ThreadMetadataStore::global(cx);
49 let db = store.read(cx).db.clone();
50
51 cx.spawn(async move |cx| {
52 let existing_entries = db.list_ids()?.into_iter().collect::<HashSet<_>>();
53
54 let is_first_migration = existing_entries.is_empty();
55
56 let mut to_migrate = store.read_with(cx, |_store, cx| {
57 ThreadStore::global(cx)
58 .read(cx)
59 .entries()
60 .filter_map(|entry| {
61 if existing_entries.contains(&entry.id.0) {
62 return None;
63 }
64
65 Some(ThreadMetadata {
66 session_id: entry.id,
67 agent_id: ZED_AGENT_ID.clone(),
68 title: entry.title,
69 updated_at: entry.updated_at,
70 created_at: entry.created_at,
71 folder_paths: entry.folder_paths,
72 main_worktree_paths: PathList::default(),
73 archived: true,
74 })
75 })
76 .collect::<Vec<_>>()
77 });
78
79 if to_migrate.is_empty() {
80 return anyhow::Ok(());
81 }
82
83 // On the first migration (no entries in DB yet), keep the 5 most
84 // recent threads per project unarchived.
85 if is_first_migration {
86 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
87 for entry in &mut to_migrate {
88 if entry.folder_paths.is_empty() {
89 continue;
90 }
91 per_project
92 .entry(entry.folder_paths.clone())
93 .or_default()
94 .push(entry);
95 }
96 for entries in per_project.values_mut() {
97 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
98 for entry in entries.iter_mut().take(5) {
99 entry.archived = false;
100 }
101 }
102 }
103
104 log::info!("Migrating {} thread store entries", to_migrate.len());
105
106 // Manually save each entry to the database and call reload, otherwise
107 // we'll end up triggering lots of reloads after each save
108 for entry in to_migrate {
109 db.save(entry).await?;
110 }
111
112 log::info!("Finished migrating thread store entries");
113
114 let _ = store.update(cx, |store, cx| store.reload(cx));
115 anyhow::Ok(())
116 })
117 .detach_and_log_err(cx);
118}
119
120struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
121impl Global for GlobalThreadMetadataStore {}
122
123/// Lightweight metadata for any thread (native or ACP), enough to populate
124/// the sidebar list and route to the correct load path when clicked.
125#[derive(Debug, Clone, PartialEq)]
126pub struct ThreadMetadata {
127 pub session_id: acp::SessionId,
128 pub agent_id: AgentId,
129 pub title: SharedString,
130 pub updated_at: DateTime<Utc>,
131 pub created_at: Option<DateTime<Utc>>,
132 pub folder_paths: PathList,
133 pub main_worktree_paths: PathList,
134 pub archived: bool,
135}
136
137impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
138 fn from(meta: &ThreadMetadata) -> Self {
139 Self {
140 session_id: meta.session_id.clone(),
141 work_dirs: Some(meta.folder_paths.clone()),
142 title: Some(meta.title.clone()),
143 updated_at: Some(meta.updated_at),
144 created_at: meta.created_at,
145 meta: None,
146 }
147 }
148}
149
150/// Record of a git worktree that was archived (deleted from disk) when its
151/// last thread was archived.
152pub struct ArchivedGitWorktree {
153 /// Auto-incrementing primary key.
154 pub id: i64,
155 /// Absolute path to the directory of the worktree before it was deleted.
156 /// Used when restoring, to put the recreated worktree back where it was.
157 /// If the path already exists on disk, the worktree is assumed to be
158 /// already restored and is used as-is.
159 pub worktree_path: PathBuf,
160 /// Absolute path of the main repository ("main worktree") that owned this worktree.
161 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
162 /// If the main repo isn't found on disk, unarchiving fails because we only store
163 /// commit hashes, and without the actual git repo being available, we can't restore
164 /// the files.
165 pub main_repo_path: PathBuf,
166 /// Branch that was checked out in the worktree at archive time. `None` if
167 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
168 /// could happen if the user made a detached one outside of Zed.
169 /// On restore, we try to switch to this branch. If that fails (e.g. it's
170 /// checked out elsewhere), we auto-generate a new one.
171 pub branch_name: Option<String>,
172 /// SHA of the WIP commit that captures files that were staged (but not yet
173 /// committed) at the time of archiving. This commit can be empty if the
174 /// user had no staged files at the time. It sits directly on top of whatever
175 /// the user's last actual commit was.
176 pub staged_commit_hash: String,
177 /// SHA of the WIP commit that captures files that were unstaged (including
178 /// untracked) at the time of archiving. This commit can be empty if the user
179 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
180 /// After doing `git reset` past both of these commits, we're back in the state
181 /// we had before archiving, including what was staged, what was unstaged, and
182 /// what was committed.
183 pub unstaged_commit_hash: String,
184 /// SHA of the commit that HEAD pointed at before we created the two WIP
185 /// commits during archival. After resetting past the WIP commits during
186 /// restore, HEAD should land back on this commit. It also serves as a
187 /// pre-restore sanity check (abort if this commit no longer exists in the
188 /// repo) and as a fallback target if the WIP resets fail.
189 pub original_commit_hash: String,
190}
191
192/// The store holds all metadata needed to show threads in the sidebar/the archive.
193///
194/// Automatically listens to AcpThread events and updates metadata if it has changed.
195pub struct ThreadMetadataStore {
196 db: ThreadMetadataDb,
197 threads: HashMap<acp::SessionId, ThreadMetadata>,
198 threads_by_paths: HashMap<PathList, HashSet<acp::SessionId>>,
199 threads_by_main_paths: HashMap<PathList, HashSet<acp::SessionId>>,
200 reload_task: Option<Shared<Task<()>>>,
201 session_subscriptions: HashMap<acp::SessionId, Subscription>,
202 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
203 _db_operations_task: Task<()>,
204}
205
206#[derive(Debug, PartialEq)]
207enum DbOperation {
208 Upsert(ThreadMetadata),
209 Delete(acp::SessionId),
210}
211
212impl DbOperation {
213 fn id(&self) -> &acp::SessionId {
214 match self {
215 DbOperation::Upsert(thread) => &thread.session_id,
216 DbOperation::Delete(session_id) => session_id,
217 }
218 }
219}
220
221impl ThreadMetadataStore {
222 #[cfg(not(any(test, feature = "test-support")))]
223 pub fn init_global(cx: &mut App) {
224 if cx.has_global::<Self>() {
225 return;
226 }
227
228 let db = ThreadMetadataDb::global(cx);
229 let thread_store = cx.new(|cx| Self::new(db, cx));
230 cx.set_global(GlobalThreadMetadataStore(thread_store));
231 }
232
233 #[cfg(any(test, feature = "test-support"))]
234 pub fn init_global(cx: &mut App) {
235 let thread = std::thread::current();
236 let test_name = thread.name().unwrap_or("unknown_test");
237 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
238 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
239 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
240 cx.set_global(GlobalThreadMetadataStore(thread_store));
241 }
242
243 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
244 cx.try_global::<GlobalThreadMetadataStore>()
245 .map(|store| store.0.clone())
246 }
247
248 pub fn global(cx: &App) -> Entity<Self> {
249 cx.global::<GlobalThreadMetadataStore>().0.clone()
250 }
251
252 pub fn is_empty(&self) -> bool {
253 self.threads.is_empty()
254 }
255
256 /// Returns all thread IDs.
257 pub fn entry_ids(&self) -> impl Iterator<Item = acp::SessionId> + '_ {
258 self.threads.keys().cloned()
259 }
260
261 /// Returns the metadata for a specific thread, if it exists.
262 pub fn entry(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
263 self.threads.get(session_id)
264 }
265
266 /// Returns all threads.
267 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
268 self.threads.values()
269 }
270
271 /// Returns all archived threads.
272 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
273 self.entries().filter(|t| t.archived)
274 }
275
276 /// Returns all threads for the given path list, excluding archived threads.
277 pub fn entries_for_path(
278 &self,
279 path_list: &PathList,
280 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
281 self.threads_by_paths
282 .get(path_list)
283 .into_iter()
284 .flatten()
285 .filter_map(|s| self.threads.get(s))
286 .filter(|s| !s.archived)
287 }
288
289 /// Returns threads whose `main_worktree_paths` matches the given path list,
290 /// excluding archived threads. This finds threads that were opened in a
291 /// linked worktree but are associated with the given main worktree.
292 pub fn entries_for_main_worktree_path(
293 &self,
294 path_list: &PathList,
295 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
296 self.threads_by_main_paths
297 .get(path_list)
298 .into_iter()
299 .flatten()
300 .filter_map(|s| self.threads.get(s))
301 .filter(|s| !s.archived)
302 }
303
304 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
305 let db = self.db.clone();
306 self.reload_task.take();
307
308 let list_task = cx
309 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
310
311 let reload_task = cx
312 .spawn(async move |this, cx| {
313 let Some(rows) = list_task.await.log_err() else {
314 return;
315 };
316
317 this.update(cx, |this, cx| {
318 this.threads.clear();
319 this.threads_by_paths.clear();
320 this.threads_by_main_paths.clear();
321
322 for row in rows {
323 this.threads_by_paths
324 .entry(row.folder_paths.clone())
325 .or_default()
326 .insert(row.session_id.clone());
327 if !row.main_worktree_paths.is_empty() {
328 this.threads_by_main_paths
329 .entry(row.main_worktree_paths.clone())
330 .or_default()
331 .insert(row.session_id.clone());
332 }
333 this.threads.insert(row.session_id.clone(), row);
334 }
335
336 cx.notify();
337 })
338 .ok();
339 })
340 .shared();
341 self.reload_task = Some(reload_task.clone());
342 reload_task
343 }
344
345 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
346 if !cx.has_flag::<AgentV2FeatureFlag>() {
347 return;
348 }
349
350 for metadata in metadata {
351 self.save_internal(metadata);
352 }
353 cx.notify();
354 }
355
356 #[cfg(any(test, feature = "test-support"))]
357 pub fn save_manually(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
358 self.save(metadata, cx)
359 }
360
361 fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
362 if !cx.has_flag::<AgentV2FeatureFlag>() {
363 return;
364 }
365
366 self.save_internal(metadata);
367 cx.notify();
368 }
369
370 fn save_internal(&mut self, metadata: ThreadMetadata) {
371 if let Some(thread) = self.threads.get(&metadata.session_id) {
372 if thread.folder_paths != metadata.folder_paths {
373 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
374 session_ids.remove(&metadata.session_id);
375 }
376 }
377 if thread.main_worktree_paths != metadata.main_worktree_paths
378 && !thread.main_worktree_paths.is_empty()
379 {
380 if let Some(session_ids) = self
381 .threads_by_main_paths
382 .get_mut(&thread.main_worktree_paths)
383 {
384 session_ids.remove(&metadata.session_id);
385 }
386 }
387 }
388
389 self.threads
390 .insert(metadata.session_id.clone(), metadata.clone());
391
392 self.threads_by_paths
393 .entry(metadata.folder_paths.clone())
394 .or_default()
395 .insert(metadata.session_id.clone());
396
397 if !metadata.main_worktree_paths.is_empty() {
398 self.threads_by_main_paths
399 .entry(metadata.main_worktree_paths.clone())
400 .or_default()
401 .insert(metadata.session_id.clone());
402 }
403
404 self.pending_thread_ops_tx
405 .try_send(DbOperation::Upsert(metadata))
406 .log_err();
407 }
408
409 pub fn update_working_directories(
410 &mut self,
411 session_id: &acp::SessionId,
412 work_dirs: PathList,
413 cx: &mut Context<Self>,
414 ) {
415 if !cx.has_flag::<AgentV2FeatureFlag>() {
416 return;
417 }
418
419 if let Some(thread) = self.threads.get(session_id) {
420 self.save_internal(ThreadMetadata {
421 folder_paths: work_dirs,
422 ..thread.clone()
423 });
424 cx.notify();
425 }
426 }
427
428 pub fn archive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
429 self.update_archived(session_id, true, cx);
430 }
431
432 pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
433 self.update_archived(session_id, false, cx);
434 }
435
436 pub fn complete_worktree_restore(
437 &mut self,
438 session_id: &acp::SessionId,
439 path_replacements: &[(PathBuf, PathBuf)],
440 cx: &mut Context<Self>,
441 ) {
442 if let Some(thread) = self.threads.get(session_id).cloned() {
443 let mut paths: Vec<PathBuf> = thread.folder_paths.paths().to_vec();
444 for (old_path, new_path) in path_replacements {
445 if let Some(pos) = paths.iter().position(|p| p == old_path) {
446 paths[pos] = new_path.clone();
447 }
448 }
449 let new_folder_paths = PathList::new(&paths);
450 self.save_internal(ThreadMetadata {
451 folder_paths: new_folder_paths,
452 ..thread
453 });
454 cx.notify();
455 }
456 }
457
458 pub fn create_archived_worktree(
459 &self,
460 worktree_path: String,
461 main_repo_path: String,
462 branch_name: Option<String>,
463 staged_commit_hash: String,
464 unstaged_commit_hash: String,
465 original_commit_hash: String,
466 cx: &App,
467 ) -> Task<anyhow::Result<i64>> {
468 let db = self.db.clone();
469 cx.background_spawn(async move {
470 db.create_archived_worktree(
471 worktree_path,
472 main_repo_path,
473 branch_name,
474 staged_commit_hash,
475 unstaged_commit_hash,
476 original_commit_hash,
477 )
478 .await
479 })
480 }
481
482 pub fn link_thread_to_archived_worktree(
483 &self,
484 session_id: String,
485 archived_worktree_id: i64,
486 cx: &App,
487 ) -> Task<anyhow::Result<()>> {
488 let db = self.db.clone();
489 cx.background_spawn(async move {
490 db.link_thread_to_archived_worktree(session_id, archived_worktree_id)
491 .await
492 })
493 }
494
495 pub fn get_archived_worktrees_for_thread(
496 &self,
497 session_id: String,
498 cx: &App,
499 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
500 let db = self.db.clone();
501 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(session_id).await })
502 }
503
504 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
505 let db = self.db.clone();
506 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
507 }
508
509 pub fn all_session_ids_for_path<'a>(
510 &'a self,
511 path_list: &PathList,
512 ) -> impl Iterator<Item = &'a acp::SessionId> {
513 self.threads_by_paths
514 .get(path_list)
515 .into_iter()
516 .flat_map(|session_ids| session_ids.iter())
517 }
518 fn update_archived(
519 &mut self,
520 session_id: &acp::SessionId,
521 archived: bool,
522 cx: &mut Context<Self>,
523 ) {
524 if !cx.has_flag::<AgentV2FeatureFlag>() {
525 return;
526 }
527
528 if let Some(thread) = self.threads.get(session_id) {
529 self.save_internal(ThreadMetadata {
530 archived,
531 ..thread.clone()
532 });
533 cx.notify();
534 }
535 }
536
537 pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context<Self>) {
538 if !cx.has_flag::<AgentV2FeatureFlag>() {
539 return;
540 }
541
542 if let Some(thread) = self.threads.get(&session_id) {
543 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
544 session_ids.remove(&session_id);
545 }
546 if !thread.main_worktree_paths.is_empty() {
547 if let Some(session_ids) = self
548 .threads_by_main_paths
549 .get_mut(&thread.main_worktree_paths)
550 {
551 session_ids.remove(&session_id);
552 }
553 }
554 }
555 self.threads.remove(&session_id);
556 self.pending_thread_ops_tx
557 .try_send(DbOperation::Delete(session_id))
558 .log_err();
559 cx.notify();
560 }
561
562 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
563 let weak_store = cx.weak_entity();
564
565 cx.observe_new::<acp_thread::AcpThread>(move |thread, _window, cx| {
566 // Don't track subagent threads in the sidebar.
567 if thread.parent_session_id().is_some() {
568 return;
569 }
570
571 let thread_entity = cx.entity();
572
573 cx.on_release({
574 let weak_store = weak_store.clone();
575 move |thread, cx| {
576 weak_store
577 .update(cx, |store, _cx| {
578 let session_id = thread.session_id().clone();
579 store.session_subscriptions.remove(&session_id);
580 })
581 .ok();
582 }
583 })
584 .detach();
585
586 weak_store
587 .update(cx, |this, cx| {
588 let subscription = cx.subscribe(&thread_entity, Self::handle_thread_event);
589 this.session_subscriptions
590 .insert(thread.session_id().clone(), subscription);
591 })
592 .ok();
593 })
594 .detach();
595
596 let (tx, rx) = smol::channel::unbounded();
597 let _db_operations_task = cx.background_spawn({
598 let db = db.clone();
599 async move {
600 while let Ok(first_update) = rx.recv().await {
601 let mut updates = vec![first_update];
602 while let Ok(update) = rx.try_recv() {
603 updates.push(update);
604 }
605 let updates = Self::dedup_db_operations(updates);
606 for operation in updates {
607 match operation {
608 DbOperation::Upsert(metadata) => {
609 db.save(metadata).await.log_err();
610 }
611 DbOperation::Delete(session_id) => {
612 db.delete(session_id).await.log_err();
613 }
614 }
615 }
616 }
617 }
618 });
619
620 let mut this = Self {
621 db,
622 threads: HashMap::default(),
623 threads_by_paths: HashMap::default(),
624 threads_by_main_paths: HashMap::default(),
625 reload_task: None,
626 session_subscriptions: HashMap::default(),
627 pending_thread_ops_tx: tx,
628 _db_operations_task,
629 };
630 let _ = this.reload(cx);
631 this
632 }
633
634 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
635 let mut ops = HashMap::default();
636 for operation in operations.into_iter().rev() {
637 if ops.contains_key(operation.id()) {
638 continue;
639 }
640 ops.insert(operation.id().clone(), operation);
641 }
642 ops.into_values().collect()
643 }
644
645 fn handle_thread_event(
646 &mut self,
647 thread: Entity<acp_thread::AcpThread>,
648 event: &AcpThreadEvent,
649 cx: &mut Context<Self>,
650 ) {
651 // Don't track subagent threads in the sidebar.
652 if thread.read(cx).parent_session_id().is_some() {
653 return;
654 }
655
656 match event {
657 AcpThreadEvent::NewEntry
658 | AcpThreadEvent::TitleUpdated
659 | AcpThreadEvent::EntryUpdated(_)
660 | AcpThreadEvent::EntriesRemoved(_)
661 | AcpThreadEvent::ToolAuthorizationRequested(_)
662 | AcpThreadEvent::ToolAuthorizationReceived(_)
663 | AcpThreadEvent::Retry(_)
664 | AcpThreadEvent::Stopped(_)
665 | AcpThreadEvent::Error
666 | AcpThreadEvent::LoadError(_)
667 | AcpThreadEvent::Refusal
668 | AcpThreadEvent::WorkingDirectoriesUpdated => {
669 let thread_ref = thread.read(cx);
670 if thread_ref.entries().is_empty() {
671 return;
672 }
673
674 let existing_thread = self.threads.get(thread_ref.session_id());
675 let session_id = thread_ref.session_id().clone();
676 let title = thread_ref
677 .title()
678 .unwrap_or_else(|| DEFAULT_THREAD_TITLE.into());
679
680 let updated_at = Utc::now();
681
682 let created_at = existing_thread
683 .and_then(|t| t.created_at)
684 .unwrap_or_else(|| updated_at);
685
686 let agent_id = thread_ref.connection().agent_id();
687
688 let folder_paths = {
689 let project = thread_ref.project().read(cx);
690 let paths: Vec<Arc<Path>> = project
691 .visible_worktrees(cx)
692 .map(|worktree| worktree.read(cx).abs_path())
693 .collect();
694 PathList::new(&paths)
695 };
696
697 let main_worktree_paths = {
698 let project = thread_ref.project().read(cx);
699 let mut main_paths: Vec<Arc<Path>> = Vec::new();
700 for repo in project.repositories(cx).values() {
701 let snapshot = repo.read(cx).snapshot();
702 if snapshot.is_linked_worktree() {
703 main_paths.push(snapshot.original_repo_abs_path.clone());
704 }
705 }
706 main_paths.sort();
707 main_paths.dedup();
708 PathList::new(&main_paths)
709 };
710
711 // Threads without a folder path (e.g. started in an empty
712 // window) are archived by default so they don't get lost,
713 // because they won't show up in the sidebar. Users can reload
714 // them from the archive.
715 let archived = existing_thread
716 .map(|t| t.archived)
717 .unwrap_or(folder_paths.is_empty());
718
719 let metadata = ThreadMetadata {
720 session_id,
721 agent_id,
722 title,
723 created_at: Some(created_at),
724 updated_at,
725 folder_paths,
726 main_worktree_paths,
727 archived,
728 };
729
730 self.save(metadata, cx);
731 }
732 AcpThreadEvent::TokenUsageUpdated
733 | AcpThreadEvent::SubagentSpawned(_)
734 | AcpThreadEvent::PromptCapabilitiesUpdated
735 | AcpThreadEvent::AvailableCommandsUpdated(_)
736 | AcpThreadEvent::ModeUpdated(_)
737 | AcpThreadEvent::ConfigOptionsUpdated(_) => {}
738 }
739 }
740}
741
742impl Global for ThreadMetadataStore {}
743
744struct ThreadMetadataDb(ThreadSafeConnection);
745
746impl Domain for ThreadMetadataDb {
747 const NAME: &str = stringify!(ThreadMetadataDb);
748
749 const MIGRATIONS: &[&str] = &[
750 sql!(
751 CREATE TABLE IF NOT EXISTS sidebar_threads(
752 session_id TEXT PRIMARY KEY,
753 agent_id TEXT,
754 title TEXT NOT NULL,
755 updated_at TEXT NOT NULL,
756 created_at TEXT,
757 folder_paths TEXT,
758 folder_paths_order TEXT
759 ) STRICT;
760 ),
761 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
762 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
763 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
764 sql!(
765 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
766 id INTEGER PRIMARY KEY,
767 worktree_path TEXT NOT NULL,
768 main_repo_path TEXT NOT NULL,
769 branch_name TEXT,
770 commit_hash TEXT NOT NULL,
771 restored INTEGER NOT NULL DEFAULT 0
772 ) STRICT;
773
774 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
775 session_id TEXT NOT NULL,
776 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
777 PRIMARY KEY (session_id, archived_worktree_id)
778 ) STRICT;
779 ),
780 sql!(
781 ALTER TABLE archived_git_worktrees ADD COLUMN staged_commit_hash TEXT;
782 ALTER TABLE archived_git_worktrees ADD COLUMN unstaged_commit_hash TEXT;
783 UPDATE archived_git_worktrees SET staged_commit_hash = commit_hash, unstaged_commit_hash = commit_hash WHERE staged_commit_hash IS NULL;
784 ),
785 sql!(
786 ALTER TABLE archived_git_worktrees ADD COLUMN original_commit_hash TEXT;
787 UPDATE archived_git_worktrees SET original_commit_hash = commit_hash WHERE original_commit_hash IS NULL;
788 ),
789 ];
790}
791
792db::static_connection!(ThreadMetadataDb, []);
793
794impl ThreadMetadataDb {
795 pub fn list_ids(&self) -> anyhow::Result<Vec<Arc<str>>> {
796 self.select::<Arc<str>>(
797 "SELECT session_id FROM sidebar_threads \
798 ORDER BY updated_at DESC",
799 )?()
800 }
801
802 /// List all sidebar thread metadata, ordered by updated_at descending.
803 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
804 self.select::<ThreadMetadata>(
805 "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order \
806 FROM sidebar_threads \
807 ORDER BY updated_at DESC"
808 )?()
809 }
810
811 /// Upsert metadata for a thread.
812 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
813 let id = row.session_id.0.clone();
814 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
815 None
816 } else {
817 Some(row.agent_id.to_string())
818 };
819 let title = row.title.to_string();
820 let updated_at = row.updated_at.to_rfc3339();
821 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
822 let serialized = row.folder_paths.serialize();
823 let (folder_paths, folder_paths_order) = if row.folder_paths.is_empty() {
824 (None, None)
825 } else {
826 (Some(serialized.paths), Some(serialized.order))
827 };
828 let main_serialized = row.main_worktree_paths.serialize();
829 let (main_worktree_paths, main_worktree_paths_order) = if row.main_worktree_paths.is_empty()
830 {
831 (None, None)
832 } else {
833 (Some(main_serialized.paths), Some(main_serialized.order))
834 };
835 let archived = row.archived;
836
837 self.write(move |conn| {
838 let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order) \
839 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) \
840 ON CONFLICT(session_id) DO UPDATE SET \
841 agent_id = excluded.agent_id, \
842 title = excluded.title, \
843 updated_at = excluded.updated_at, \
844 created_at = excluded.created_at, \
845 folder_paths = excluded.folder_paths, \
846 folder_paths_order = excluded.folder_paths_order, \
847 archived = excluded.archived, \
848 main_worktree_paths = excluded.main_worktree_paths, \
849 main_worktree_paths_order = excluded.main_worktree_paths_order";
850 let mut stmt = Statement::prepare(conn, sql)?;
851 let mut i = stmt.bind(&id, 1)?;
852 i = stmt.bind(&agent_id, i)?;
853 i = stmt.bind(&title, i)?;
854 i = stmt.bind(&updated_at, i)?;
855 i = stmt.bind(&created_at, i)?;
856 i = stmt.bind(&folder_paths, i)?;
857 i = stmt.bind(&folder_paths_order, i)?;
858 i = stmt.bind(&archived, i)?;
859 i = stmt.bind(&main_worktree_paths, i)?;
860 stmt.bind(&main_worktree_paths_order, i)?;
861 stmt.exec()
862 })
863 .await
864 }
865
866 /// Delete metadata for a single thread.
867 pub async fn delete(&self, session_id: acp::SessionId) -> anyhow::Result<()> {
868 let id = session_id.0.clone();
869 self.write(move |conn| {
870 let mut stmt =
871 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE session_id = ?")?;
872 stmt.bind(&id, 1)?;
873 stmt.exec()
874 })
875 .await
876 }
877
878 pub async fn create_archived_worktree(
879 &self,
880 worktree_path: String,
881 main_repo_path: String,
882 branch_name: Option<String>,
883 staged_commit_hash: String,
884 unstaged_commit_hash: String,
885 original_commit_hash: String,
886 ) -> anyhow::Result<i64> {
887 self.write(move |conn| {
888 let mut stmt = Statement::prepare(
889 conn,
890 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, commit_hash, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
891 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7) \
892 RETURNING id",
893 )?;
894 let mut i = stmt.bind(&worktree_path, 1)?;
895 i = stmt.bind(&main_repo_path, i)?;
896 i = stmt.bind(&branch_name, i)?;
897 i = stmt.bind(&unstaged_commit_hash, i)?;
898 i = stmt.bind(&staged_commit_hash, i)?;
899 i = stmt.bind(&unstaged_commit_hash, i)?;
900 stmt.bind(&original_commit_hash, i)?;
901 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
902 })
903 .await
904 }
905
906 pub async fn link_thread_to_archived_worktree(
907 &self,
908 session_id: String,
909 archived_worktree_id: i64,
910 ) -> anyhow::Result<()> {
911 self.write(move |conn| {
912 let mut stmt = Statement::prepare(
913 conn,
914 "INSERT INTO thread_archived_worktrees(session_id, archived_worktree_id) \
915 VALUES (?1, ?2)",
916 )?;
917 let i = stmt.bind(&session_id, 1)?;
918 stmt.bind(&archived_worktree_id, i)?;
919 stmt.exec()
920 })
921 .await
922 }
923
924 pub async fn get_archived_worktrees_for_thread(
925 &self,
926 session_id: String,
927 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
928 self.select_bound::<String, ArchivedGitWorktree>(
929 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
930 FROM archived_git_worktrees a \
931 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
932 WHERE t.session_id = ?1",
933 )?(session_id)
934 }
935
936 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
937 self.write(move |conn| {
938 let mut stmt = Statement::prepare(
939 conn,
940 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
941 )?;
942 stmt.bind(&id, 1)?;
943 stmt.exec()?;
944
945 let mut stmt =
946 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
947 stmt.bind(&id, 1)?;
948 stmt.exec()
949 })
950 .await
951 }
952}
953
954impl Column for ThreadMetadata {
955 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
956 let (id, next): (Arc<str>, i32) = Column::column(statement, start_index)?;
957 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
958 let (title, next): (String, i32) = Column::column(statement, next)?;
959 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
960 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
961 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
962 let (folder_paths_order_str, next): (Option<String>, i32) =
963 Column::column(statement, next)?;
964 let (archived, next): (bool, i32) = Column::column(statement, next)?;
965 let (main_worktree_paths_str, next): (Option<String>, i32) =
966 Column::column(statement, next)?;
967 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
968 Column::column(statement, next)?;
969
970 let agent_id = agent_id
971 .map(|id| AgentId::new(id))
972 .unwrap_or(ZED_AGENT_ID.clone());
973
974 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
975 let created_at = created_at_str
976 .as_deref()
977 .map(DateTime::parse_from_rfc3339)
978 .transpose()?
979 .map(|dt| dt.with_timezone(&Utc));
980
981 let folder_paths = folder_paths_str
982 .map(|paths| {
983 PathList::deserialize(&util::path_list::SerializedPathList {
984 paths,
985 order: folder_paths_order_str.unwrap_or_default(),
986 })
987 })
988 .unwrap_or_default();
989
990 let main_worktree_paths = main_worktree_paths_str
991 .map(|paths| {
992 PathList::deserialize(&util::path_list::SerializedPathList {
993 paths,
994 order: main_worktree_paths_order_str.unwrap_or_default(),
995 })
996 })
997 .unwrap_or_default();
998
999 Ok((
1000 ThreadMetadata {
1001 session_id: acp::SessionId::new(id),
1002 agent_id,
1003 title: title.into(),
1004 updated_at,
1005 created_at,
1006 folder_paths,
1007 main_worktree_paths,
1008 archived,
1009 },
1010 next,
1011 ))
1012 }
1013}
1014
1015impl Column for ArchivedGitWorktree {
1016 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1017 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1018 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1019 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1020 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1021 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1022 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1023 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1024
1025 Ok((
1026 ArchivedGitWorktree {
1027 id,
1028 worktree_path: PathBuf::from(worktree_path_str),
1029 main_repo_path: PathBuf::from(main_repo_path_str),
1030 branch_name,
1031 staged_commit_hash,
1032 unstaged_commit_hash,
1033 original_commit_hash,
1034 },
1035 next,
1036 ))
1037 }
1038}
1039
1040#[cfg(test)]
1041mod tests {
1042 use super::*;
1043 use acp_thread::{AgentConnection, StubAgentConnection};
1044 use action_log::ActionLog;
1045 use agent::DbThread;
1046 use agent_client_protocol as acp;
1047 use feature_flags::FeatureFlagAppExt;
1048 use gpui::TestAppContext;
1049 use project::FakeFs;
1050 use project::Project;
1051 use std::path::Path;
1052 use std::rc::Rc;
1053
1054 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1055 DbThread {
1056 title: title.to_string().into(),
1057 messages: Vec::new(),
1058 updated_at,
1059 detailed_summary: None,
1060 initial_project_snapshot: None,
1061 cumulative_token_usage: Default::default(),
1062 request_token_usage: Default::default(),
1063 model: None,
1064 profile: None,
1065 imported: false,
1066 subagent_context: None,
1067 speed: None,
1068 thinking_enabled: false,
1069 thinking_effort: None,
1070 draft_prompt: None,
1071 ui_scroll_position: None,
1072 }
1073 }
1074
1075 fn make_metadata(
1076 session_id: &str,
1077 title: &str,
1078 updated_at: DateTime<Utc>,
1079 folder_paths: PathList,
1080 ) -> ThreadMetadata {
1081 ThreadMetadata {
1082 archived: false,
1083 session_id: acp::SessionId::new(session_id),
1084 agent_id: agent::ZED_AGENT_ID.clone(),
1085 title: title.to_string().into(),
1086 updated_at,
1087 created_at: Some(updated_at),
1088 folder_paths,
1089 main_worktree_paths: PathList::default(),
1090 }
1091 }
1092
1093 fn init_test(cx: &mut TestAppContext) {
1094 cx.update(|cx| {
1095 let settings_store = settings::SettingsStore::test(cx);
1096 cx.set_global(settings_store);
1097 cx.update_flags(true, vec!["agent-v2".to_string()]);
1098 ThreadMetadataStore::init_global(cx);
1099 ThreadStore::init_global(cx);
1100 });
1101 cx.run_until_parked();
1102 }
1103
1104 #[gpui::test]
1105 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1106 let first_paths = PathList::new(&[Path::new("/project-a")]);
1107 let second_paths = PathList::new(&[Path::new("/project-b")]);
1108 let now = Utc::now();
1109 let older = now - chrono::Duration::seconds(1);
1110
1111 let thread = std::thread::current();
1112 let test_name = thread.name().unwrap_or("unknown_test");
1113 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1114 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1115 &db_name,
1116 )));
1117
1118 db.save(make_metadata(
1119 "session-1",
1120 "First Thread",
1121 now,
1122 first_paths.clone(),
1123 ))
1124 .await
1125 .unwrap();
1126 db.save(make_metadata(
1127 "session-2",
1128 "Second Thread",
1129 older,
1130 second_paths.clone(),
1131 ))
1132 .await
1133 .unwrap();
1134
1135 cx.update(|cx| {
1136 let settings_store = settings::SettingsStore::test(cx);
1137 cx.set_global(settings_store);
1138 cx.update_flags(true, vec!["agent-v2".to_string()]);
1139 ThreadMetadataStore::init_global(cx);
1140 });
1141
1142 cx.run_until_parked();
1143
1144 cx.update(|cx| {
1145 let store = ThreadMetadataStore::global(cx);
1146 let store = store.read(cx);
1147
1148 let entry_ids = store
1149 .entry_ids()
1150 .map(|session_id| session_id.0.to_string())
1151 .collect::<Vec<_>>();
1152 assert_eq!(entry_ids.len(), 2);
1153 assert!(entry_ids.contains(&"session-1".to_string()));
1154 assert!(entry_ids.contains(&"session-2".to_string()));
1155
1156 let first_path_entries = store
1157 .entries_for_path(&first_paths)
1158 .map(|entry| entry.session_id.0.to_string())
1159 .collect::<Vec<_>>();
1160 assert_eq!(first_path_entries, vec!["session-1"]);
1161
1162 let second_path_entries = store
1163 .entries_for_path(&second_paths)
1164 .map(|entry| entry.session_id.0.to_string())
1165 .collect::<Vec<_>>();
1166 assert_eq!(second_path_entries, vec!["session-2"]);
1167 });
1168 }
1169
1170 #[gpui::test]
1171 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1172 init_test(cx);
1173
1174 let first_paths = PathList::new(&[Path::new("/project-a")]);
1175 let second_paths = PathList::new(&[Path::new("/project-b")]);
1176 let initial_time = Utc::now();
1177 let updated_time = initial_time + chrono::Duration::seconds(1);
1178
1179 let initial_metadata = make_metadata(
1180 "session-1",
1181 "First Thread",
1182 initial_time,
1183 first_paths.clone(),
1184 );
1185
1186 let second_metadata = make_metadata(
1187 "session-2",
1188 "Second Thread",
1189 initial_time,
1190 second_paths.clone(),
1191 );
1192
1193 cx.update(|cx| {
1194 let store = ThreadMetadataStore::global(cx);
1195 store.update(cx, |store, cx| {
1196 store.save(initial_metadata, cx);
1197 store.save(second_metadata, cx);
1198 });
1199 });
1200
1201 cx.run_until_parked();
1202
1203 cx.update(|cx| {
1204 let store = ThreadMetadataStore::global(cx);
1205 let store = store.read(cx);
1206
1207 let first_path_entries = store
1208 .entries_for_path(&first_paths)
1209 .map(|entry| entry.session_id.0.to_string())
1210 .collect::<Vec<_>>();
1211 assert_eq!(first_path_entries, vec!["session-1"]);
1212
1213 let second_path_entries = store
1214 .entries_for_path(&second_paths)
1215 .map(|entry| entry.session_id.0.to_string())
1216 .collect::<Vec<_>>();
1217 assert_eq!(second_path_entries, vec!["session-2"]);
1218 });
1219
1220 let moved_metadata = make_metadata(
1221 "session-1",
1222 "First Thread",
1223 updated_time,
1224 second_paths.clone(),
1225 );
1226
1227 cx.update(|cx| {
1228 let store = ThreadMetadataStore::global(cx);
1229 store.update(cx, |store, cx| {
1230 store.save(moved_metadata, cx);
1231 });
1232 });
1233
1234 cx.run_until_parked();
1235
1236 cx.update(|cx| {
1237 let store = ThreadMetadataStore::global(cx);
1238 let store = store.read(cx);
1239
1240 let entry_ids = store
1241 .entry_ids()
1242 .map(|session_id| session_id.0.to_string())
1243 .collect::<Vec<_>>();
1244 assert_eq!(entry_ids.len(), 2);
1245 assert!(entry_ids.contains(&"session-1".to_string()));
1246 assert!(entry_ids.contains(&"session-2".to_string()));
1247
1248 let first_path_entries = store
1249 .entries_for_path(&first_paths)
1250 .map(|entry| entry.session_id.0.to_string())
1251 .collect::<Vec<_>>();
1252 assert!(first_path_entries.is_empty());
1253
1254 let second_path_entries = store
1255 .entries_for_path(&second_paths)
1256 .map(|entry| entry.session_id.0.to_string())
1257 .collect::<Vec<_>>();
1258 assert_eq!(second_path_entries.len(), 2);
1259 assert!(second_path_entries.contains(&"session-1".to_string()));
1260 assert!(second_path_entries.contains(&"session-2".to_string()));
1261 });
1262
1263 cx.update(|cx| {
1264 let store = ThreadMetadataStore::global(cx);
1265 store.update(cx, |store, cx| {
1266 store.delete(acp::SessionId::new("session-2"), cx);
1267 });
1268 });
1269
1270 cx.run_until_parked();
1271
1272 cx.update(|cx| {
1273 let store = ThreadMetadataStore::global(cx);
1274 let store = store.read(cx);
1275
1276 let entry_ids = store
1277 .entry_ids()
1278 .map(|session_id| session_id.0.to_string())
1279 .collect::<Vec<_>>();
1280 assert_eq!(entry_ids, vec!["session-1"]);
1281
1282 let second_path_entries = store
1283 .entries_for_path(&second_paths)
1284 .map(|entry| entry.session_id.0.to_string())
1285 .collect::<Vec<_>>();
1286 assert_eq!(second_path_entries, vec!["session-1"]);
1287 });
1288 }
1289
1290 #[gpui::test]
1291 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1292 init_test(cx);
1293
1294 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1295 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1296 let now = Utc::now();
1297
1298 let existing_metadata = ThreadMetadata {
1299 session_id: acp::SessionId::new("a-session-0"),
1300 agent_id: agent::ZED_AGENT_ID.clone(),
1301 title: "Existing Metadata".into(),
1302 updated_at: now - chrono::Duration::seconds(10),
1303 created_at: Some(now - chrono::Duration::seconds(10)),
1304 folder_paths: project_a_paths.clone(),
1305 main_worktree_paths: PathList::default(),
1306 archived: false,
1307 };
1308
1309 cx.update(|cx| {
1310 let store = ThreadMetadataStore::global(cx);
1311 store.update(cx, |store, cx| {
1312 store.save(existing_metadata, cx);
1313 });
1314 });
1315 cx.run_until_parked();
1316
1317 let threads_to_save = vec![
1318 (
1319 "a-session-0",
1320 "Thread A0 From Native Store",
1321 project_a_paths.clone(),
1322 now,
1323 ),
1324 (
1325 "a-session-1",
1326 "Thread A1",
1327 project_a_paths.clone(),
1328 now + chrono::Duration::seconds(1),
1329 ),
1330 (
1331 "b-session-0",
1332 "Thread B0",
1333 project_b_paths.clone(),
1334 now + chrono::Duration::seconds(2),
1335 ),
1336 (
1337 "projectless",
1338 "Projectless",
1339 PathList::default(),
1340 now + chrono::Duration::seconds(3),
1341 ),
1342 ];
1343
1344 for (session_id, title, paths, updated_at) in &threads_to_save {
1345 let save_task = cx.update(|cx| {
1346 let thread_store = ThreadStore::global(cx);
1347 let session_id = session_id.to_string();
1348 let title = title.to_string();
1349 let paths = paths.clone();
1350 thread_store.update(cx, |store, cx| {
1351 store.save_thread(
1352 acp::SessionId::new(session_id),
1353 make_db_thread(&title, *updated_at),
1354 paths,
1355 cx,
1356 )
1357 })
1358 });
1359 save_task.await.unwrap();
1360 cx.run_until_parked();
1361 }
1362
1363 cx.update(|cx| migrate_thread_metadata(cx));
1364 cx.run_until_parked();
1365
1366 let list = cx.update(|cx| {
1367 let store = ThreadMetadataStore::global(cx);
1368 store.read(cx).entries().cloned().collect::<Vec<_>>()
1369 });
1370
1371 assert_eq!(list.len(), 4);
1372 assert!(
1373 list.iter()
1374 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1375 );
1376
1377 let existing_metadata = list
1378 .iter()
1379 .find(|metadata| metadata.session_id.0.as_ref() == "a-session-0")
1380 .unwrap();
1381 assert_eq!(existing_metadata.title.as_ref(), "Existing Metadata");
1382 assert!(!existing_metadata.archived);
1383
1384 let migrated_session_ids = list
1385 .iter()
1386 .map(|metadata| metadata.session_id.0.as_ref())
1387 .collect::<Vec<_>>();
1388 assert!(migrated_session_ids.contains(&"a-session-1"));
1389 assert!(migrated_session_ids.contains(&"b-session-0"));
1390 assert!(migrated_session_ids.contains(&"projectless"));
1391
1392 let migrated_entries = list
1393 .iter()
1394 .filter(|metadata| metadata.session_id.0.as_ref() != "a-session-0")
1395 .collect::<Vec<_>>();
1396 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1397 }
1398
1399 #[gpui::test]
1400 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1401 cx: &mut TestAppContext,
1402 ) {
1403 init_test(cx);
1404
1405 let project_paths = PathList::new(&[Path::new("/project-a")]);
1406 let existing_updated_at = Utc::now();
1407
1408 let existing_metadata = ThreadMetadata {
1409 session_id: acp::SessionId::new("existing-session"),
1410 agent_id: agent::ZED_AGENT_ID.clone(),
1411 title: "Existing Metadata".into(),
1412 updated_at: existing_updated_at,
1413 created_at: Some(existing_updated_at),
1414 folder_paths: project_paths.clone(),
1415 main_worktree_paths: PathList::default(),
1416 archived: false,
1417 };
1418
1419 cx.update(|cx| {
1420 let store = ThreadMetadataStore::global(cx);
1421 store.update(cx, |store, cx| {
1422 store.save(existing_metadata, cx);
1423 });
1424 });
1425 cx.run_until_parked();
1426
1427 let save_task = cx.update(|cx| {
1428 let thread_store = ThreadStore::global(cx);
1429 thread_store.update(cx, |store, cx| {
1430 store.save_thread(
1431 acp::SessionId::new("existing-session"),
1432 make_db_thread(
1433 "Updated Native Thread Title",
1434 existing_updated_at + chrono::Duration::seconds(1),
1435 ),
1436 project_paths.clone(),
1437 cx,
1438 )
1439 })
1440 });
1441 save_task.await.unwrap();
1442 cx.run_until_parked();
1443
1444 cx.update(|cx| migrate_thread_metadata(cx));
1445 cx.run_until_parked();
1446
1447 let list = cx.update(|cx| {
1448 let store = ThreadMetadataStore::global(cx);
1449 store.read(cx).entries().cloned().collect::<Vec<_>>()
1450 });
1451
1452 assert_eq!(list.len(), 1);
1453 assert_eq!(list[0].session_id.0.as_ref(), "existing-session");
1454 }
1455
1456 #[gpui::test]
1457 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
1458 cx: &mut TestAppContext,
1459 ) {
1460 init_test(cx);
1461
1462 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1463 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1464 let now = Utc::now();
1465
1466 // Create 7 threads for project A and 3 for project B
1467 let mut threads_to_save = Vec::new();
1468 for i in 0..7 {
1469 threads_to_save.push((
1470 format!("a-session-{i}"),
1471 format!("Thread A{i}"),
1472 project_a_paths.clone(),
1473 now + chrono::Duration::seconds(i as i64),
1474 ));
1475 }
1476 for i in 0..3 {
1477 threads_to_save.push((
1478 format!("b-session-{i}"),
1479 format!("Thread B{i}"),
1480 project_b_paths.clone(),
1481 now + chrono::Duration::seconds(i as i64),
1482 ));
1483 }
1484
1485 for (session_id, title, paths, updated_at) in &threads_to_save {
1486 let save_task = cx.update(|cx| {
1487 let thread_store = ThreadStore::global(cx);
1488 let session_id = session_id.to_string();
1489 let title = title.to_string();
1490 let paths = paths.clone();
1491 thread_store.update(cx, |store, cx| {
1492 store.save_thread(
1493 acp::SessionId::new(session_id),
1494 make_db_thread(&title, *updated_at),
1495 paths,
1496 cx,
1497 )
1498 })
1499 });
1500 save_task.await.unwrap();
1501 cx.run_until_parked();
1502 }
1503
1504 cx.update(|cx| migrate_thread_metadata(cx));
1505 cx.run_until_parked();
1506
1507 let list = cx.update(|cx| {
1508 let store = ThreadMetadataStore::global(cx);
1509 store.read(cx).entries().cloned().collect::<Vec<_>>()
1510 });
1511
1512 assert_eq!(list.len(), 10);
1513
1514 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
1515 let mut project_a_entries: Vec<_> = list
1516 .iter()
1517 .filter(|m| m.folder_paths == project_a_paths)
1518 .collect();
1519 assert_eq!(project_a_entries.len(), 7);
1520 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
1521
1522 for entry in &project_a_entries[..5] {
1523 assert!(
1524 !entry.archived,
1525 "Expected {} to be unarchived (top 5 most recent)",
1526 entry.session_id.0
1527 );
1528 }
1529 for entry in &project_a_entries[5..] {
1530 assert!(
1531 entry.archived,
1532 "Expected {} to be archived (older than top 5)",
1533 entry.session_id.0
1534 );
1535 }
1536
1537 // Project B: all 3 should be unarchived (under the limit)
1538 let project_b_entries: Vec<_> = list
1539 .iter()
1540 .filter(|m| m.folder_paths == project_b_paths)
1541 .collect();
1542 assert_eq!(project_b_entries.len(), 3);
1543 assert!(project_b_entries.iter().all(|m| !m.archived));
1544 }
1545
1546 #[gpui::test]
1547 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
1548 init_test(cx);
1549
1550 let fs = FakeFs::new(cx.executor());
1551 let project = Project::test(fs, None::<&Path>, cx).await;
1552 let connection = Rc::new(StubAgentConnection::new());
1553
1554 let thread = cx
1555 .update(|cx| {
1556 connection
1557 .clone()
1558 .new_session(project.clone(), PathList::default(), cx)
1559 })
1560 .await
1561 .unwrap();
1562 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1563
1564 cx.update(|cx| {
1565 thread.update(cx, |thread, cx| {
1566 thread.set_title("Draft Thread".into(), cx).detach();
1567 });
1568 });
1569 cx.run_until_parked();
1570
1571 let metadata_ids = cx.update(|cx| {
1572 ThreadMetadataStore::global(cx)
1573 .read(cx)
1574 .entry_ids()
1575 .collect::<Vec<_>>()
1576 });
1577 assert!(
1578 metadata_ids.is_empty(),
1579 "expected empty draft thread title updates to be ignored"
1580 );
1581
1582 cx.update(|cx| {
1583 thread.update(cx, |thread, cx| {
1584 thread.push_user_content_block(None, "Hello".into(), cx);
1585 });
1586 });
1587 cx.run_until_parked();
1588
1589 let metadata_ids = cx.update(|cx| {
1590 ThreadMetadataStore::global(cx)
1591 .read(cx)
1592 .entry_ids()
1593 .collect::<Vec<_>>()
1594 });
1595 assert_eq!(metadata_ids, vec![session_id]);
1596 }
1597
1598 #[gpui::test]
1599 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
1600 init_test(cx);
1601
1602 let fs = FakeFs::new(cx.executor());
1603 let project = Project::test(fs, None::<&Path>, cx).await;
1604 let connection = Rc::new(StubAgentConnection::new());
1605
1606 let thread = cx
1607 .update(|cx| {
1608 connection
1609 .clone()
1610 .new_session(project.clone(), PathList::default(), cx)
1611 })
1612 .await
1613 .unwrap();
1614 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1615
1616 cx.update(|cx| {
1617 thread.update(cx, |thread, cx| {
1618 thread.push_user_content_block(None, "Hello".into(), cx);
1619 });
1620 });
1621 cx.run_until_parked();
1622
1623 let metadata_ids = cx.update(|cx| {
1624 ThreadMetadataStore::global(cx)
1625 .read(cx)
1626 .entry_ids()
1627 .collect::<Vec<_>>()
1628 });
1629 assert_eq!(metadata_ids, vec![session_id.clone()]);
1630
1631 drop(thread);
1632 cx.update(|_| {});
1633 cx.run_until_parked();
1634
1635 let metadata_ids = cx.update(|cx| {
1636 ThreadMetadataStore::global(cx)
1637 .read(cx)
1638 .entry_ids()
1639 .collect::<Vec<_>>()
1640 });
1641 assert_eq!(metadata_ids, vec![session_id]);
1642 }
1643
1644 #[gpui::test]
1645 async fn test_threads_without_project_association_are_archived_by_default(
1646 cx: &mut TestAppContext,
1647 ) {
1648 init_test(cx);
1649
1650 let fs = FakeFs::new(cx.executor());
1651 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
1652 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
1653 let connection = Rc::new(StubAgentConnection::new());
1654
1655 let thread_without_worktree = cx
1656 .update(|cx| {
1657 connection.clone().new_session(
1658 project_without_worktree.clone(),
1659 PathList::default(),
1660 cx,
1661 )
1662 })
1663 .await
1664 .unwrap();
1665 let session_without_worktree =
1666 cx.read(|cx| thread_without_worktree.read(cx).session_id().clone());
1667
1668 cx.update(|cx| {
1669 thread_without_worktree.update(cx, |thread, cx| {
1670 thread.push_user_content_block(None, "content".into(), cx);
1671 thread.set_title("No Project Thread".into(), cx).detach();
1672 });
1673 });
1674 cx.run_until_parked();
1675
1676 let thread_with_worktree = cx
1677 .update(|cx| {
1678 connection.clone().new_session(
1679 project_with_worktree.clone(),
1680 PathList::default(),
1681 cx,
1682 )
1683 })
1684 .await
1685 .unwrap();
1686 let session_with_worktree =
1687 cx.read(|cx| thread_with_worktree.read(cx).session_id().clone());
1688
1689 cx.update(|cx| {
1690 thread_with_worktree.update(cx, |thread, cx| {
1691 thread.push_user_content_block(None, "content".into(), cx);
1692 thread.set_title("Project Thread".into(), cx).detach();
1693 });
1694 });
1695 cx.run_until_parked();
1696
1697 cx.update(|cx| {
1698 let store = ThreadMetadataStore::global(cx);
1699 let store = store.read(cx);
1700
1701 let without_worktree = store
1702 .entry(&session_without_worktree)
1703 .expect("missing metadata for thread without project association");
1704 assert!(without_worktree.folder_paths.is_empty());
1705 assert!(
1706 without_worktree.archived,
1707 "expected thread without project association to be archived"
1708 );
1709
1710 let with_worktree = store
1711 .entry(&session_with_worktree)
1712 .expect("missing metadata for thread with project association");
1713 assert_eq!(
1714 with_worktree.folder_paths,
1715 PathList::new(&[Path::new("/project-a")])
1716 );
1717 assert!(
1718 !with_worktree.archived,
1719 "expected thread with project association to remain unarchived"
1720 );
1721 });
1722 }
1723
1724 #[gpui::test]
1725 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
1726 init_test(cx);
1727
1728 let fs = FakeFs::new(cx.executor());
1729 let project = Project::test(fs, None::<&Path>, cx).await;
1730 let connection = Rc::new(StubAgentConnection::new());
1731
1732 // Create a regular (non-subagent) AcpThread.
1733 let regular_thread = cx
1734 .update(|cx| {
1735 connection
1736 .clone()
1737 .new_session(project.clone(), PathList::default(), cx)
1738 })
1739 .await
1740 .unwrap();
1741
1742 let regular_session_id = cx.read(|cx| regular_thread.read(cx).session_id().clone());
1743
1744 // Set a title on the regular thread to trigger a save via handle_thread_update.
1745 cx.update(|cx| {
1746 regular_thread.update(cx, |thread, cx| {
1747 thread.push_user_content_block(None, "content".into(), cx);
1748 thread.set_title("Regular Thread".into(), cx).detach();
1749 });
1750 });
1751 cx.run_until_parked();
1752
1753 // Create a subagent AcpThread
1754 let subagent_session_id = acp::SessionId::new("subagent-session");
1755 let subagent_thread = cx.update(|cx| {
1756 let action_log = cx.new(|_| ActionLog::new(project.clone()));
1757 cx.new(|cx| {
1758 acp_thread::AcpThread::new(
1759 Some(regular_session_id.clone()),
1760 Some("Subagent Thread".into()),
1761 None,
1762 connection.clone(),
1763 project.clone(),
1764 action_log,
1765 subagent_session_id.clone(),
1766 watch::Receiver::constant(acp::PromptCapabilities::new()),
1767 cx,
1768 )
1769 })
1770 });
1771
1772 // Set a title on the subagent thread to trigger handle_thread_update.
1773 cx.update(|cx| {
1774 subagent_thread.update(cx, |thread, cx| {
1775 thread
1776 .set_title("Subagent Thread Title".into(), cx)
1777 .detach();
1778 });
1779 });
1780 cx.run_until_parked();
1781
1782 // List all metadata from the store cache.
1783 let list = cx.update(|cx| {
1784 let store = ThreadMetadataStore::global(cx);
1785 store.read(cx).entries().cloned().collect::<Vec<_>>()
1786 });
1787
1788 // The subagent thread should NOT appear in the sidebar metadata.
1789 // Only the regular thread should be listed.
1790 assert_eq!(
1791 list.len(),
1792 1,
1793 "Expected only the regular thread in sidebar metadata, \
1794 but found {} entries (subagent threads are leaking into the sidebar)",
1795 list.len(),
1796 );
1797 assert_eq!(list[0].session_id, regular_session_id);
1798 assert_eq!(list[0].title.as_ref(), "Regular Thread");
1799 }
1800
1801 #[test]
1802 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
1803 let now = Utc::now();
1804
1805 let operations = vec![
1806 DbOperation::Upsert(make_metadata(
1807 "session-1",
1808 "First Thread",
1809 now,
1810 PathList::default(),
1811 )),
1812 DbOperation::Delete(acp::SessionId::new("session-1")),
1813 ];
1814
1815 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
1816
1817 assert_eq!(deduped.len(), 1);
1818 assert_eq!(
1819 deduped[0],
1820 DbOperation::Delete(acp::SessionId::new("session-1"))
1821 );
1822 }
1823
1824 #[test]
1825 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
1826 let now = Utc::now();
1827 let later = now + chrono::Duration::seconds(1);
1828
1829 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
1830 let new_metadata = make_metadata("session-1", "New Title", later, PathList::default());
1831
1832 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1833 DbOperation::Upsert(old_metadata),
1834 DbOperation::Upsert(new_metadata.clone()),
1835 ]);
1836
1837 assert_eq!(deduped.len(), 1);
1838 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
1839 }
1840
1841 #[test]
1842 fn test_dedup_db_operations_preserves_distinct_sessions() {
1843 let now = Utc::now();
1844
1845 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
1846 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
1847 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1848 DbOperation::Upsert(metadata1.clone()),
1849 DbOperation::Upsert(metadata2.clone()),
1850 ]);
1851
1852 assert_eq!(deduped.len(), 2);
1853 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
1854 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
1855 }
1856
1857 #[gpui::test]
1858 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
1859 init_test(cx);
1860
1861 let paths = PathList::new(&[Path::new("/project-a")]);
1862 let now = Utc::now();
1863 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
1864
1865 cx.update(|cx| {
1866 let store = ThreadMetadataStore::global(cx);
1867 store.update(cx, |store, cx| {
1868 store.save(metadata, cx);
1869 });
1870 });
1871
1872 cx.run_until_parked();
1873
1874 cx.update(|cx| {
1875 let store = ThreadMetadataStore::global(cx);
1876 let store = store.read(cx);
1877
1878 let path_entries = store
1879 .entries_for_path(&paths)
1880 .map(|e| e.session_id.0.to_string())
1881 .collect::<Vec<_>>();
1882 assert_eq!(path_entries, vec!["session-1"]);
1883
1884 let archived = store
1885 .archived_entries()
1886 .map(|e| e.session_id.0.to_string())
1887 .collect::<Vec<_>>();
1888 assert!(archived.is_empty());
1889 });
1890
1891 cx.update(|cx| {
1892 let store = ThreadMetadataStore::global(cx);
1893 store.update(cx, |store, cx| {
1894 store.archive(&acp::SessionId::new("session-1"), cx);
1895 });
1896 });
1897
1898 cx.run_until_parked();
1899
1900 cx.update(|cx| {
1901 let store = ThreadMetadataStore::global(cx);
1902 let store = store.read(cx);
1903
1904 let path_entries = store
1905 .entries_for_path(&paths)
1906 .map(|e| e.session_id.0.to_string())
1907 .collect::<Vec<_>>();
1908 assert!(path_entries.is_empty());
1909
1910 let archived = store.archived_entries().collect::<Vec<_>>();
1911 assert_eq!(archived.len(), 1);
1912 assert_eq!(archived[0].session_id.0.as_ref(), "session-1");
1913 assert!(archived[0].archived);
1914 });
1915
1916 cx.update(|cx| {
1917 let store = ThreadMetadataStore::global(cx);
1918 store.update(cx, |store, cx| {
1919 store.unarchive(&acp::SessionId::new("session-1"), cx);
1920 });
1921 });
1922
1923 cx.run_until_parked();
1924
1925 cx.update(|cx| {
1926 let store = ThreadMetadataStore::global(cx);
1927 let store = store.read(cx);
1928
1929 let path_entries = store
1930 .entries_for_path(&paths)
1931 .map(|e| e.session_id.0.to_string())
1932 .collect::<Vec<_>>();
1933 assert_eq!(path_entries, vec!["session-1"]);
1934
1935 let archived = store
1936 .archived_entries()
1937 .map(|e| e.session_id.0.to_string())
1938 .collect::<Vec<_>>();
1939 assert!(archived.is_empty());
1940 });
1941 }
1942
1943 #[gpui::test]
1944 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
1945 init_test(cx);
1946
1947 let paths = PathList::new(&[Path::new("/project-a")]);
1948 let now = Utc::now();
1949
1950 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
1951 let metadata2 = make_metadata(
1952 "session-2",
1953 "Archived Thread",
1954 now - chrono::Duration::seconds(1),
1955 paths.clone(),
1956 );
1957
1958 cx.update(|cx| {
1959 let store = ThreadMetadataStore::global(cx);
1960 store.update(cx, |store, cx| {
1961 store.save(metadata1, cx);
1962 store.save(metadata2, cx);
1963 });
1964 });
1965
1966 cx.run_until_parked();
1967
1968 cx.update(|cx| {
1969 let store = ThreadMetadataStore::global(cx);
1970 store.update(cx, |store, cx| {
1971 store.archive(&acp::SessionId::new("session-2"), cx);
1972 });
1973 });
1974
1975 cx.run_until_parked();
1976
1977 cx.update(|cx| {
1978 let store = ThreadMetadataStore::global(cx);
1979 let store = store.read(cx);
1980
1981 let path_entries = store
1982 .entries_for_path(&paths)
1983 .map(|e| e.session_id.0.to_string())
1984 .collect::<Vec<_>>();
1985 assert_eq!(path_entries, vec!["session-1"]);
1986
1987 let all_entries = store
1988 .entries()
1989 .map(|e| e.session_id.0.to_string())
1990 .collect::<Vec<_>>();
1991 assert_eq!(all_entries.len(), 2);
1992 assert!(all_entries.contains(&"session-1".to_string()));
1993 assert!(all_entries.contains(&"session-2".to_string()));
1994
1995 let archived = store
1996 .archived_entries()
1997 .map(|e| e.session_id.0.to_string())
1998 .collect::<Vec<_>>();
1999 assert_eq!(archived, vec!["session-2"]);
2000 });
2001 }
2002
2003 #[gpui::test]
2004 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2005 init_test(cx);
2006
2007 let paths = PathList::new(&[Path::new("/project-a")]);
2008 let now = Utc::now();
2009
2010 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2011 let m2 = make_metadata(
2012 "session-2",
2013 "Thread Two",
2014 now - chrono::Duration::seconds(1),
2015 paths.clone(),
2016 );
2017 let m3 = make_metadata(
2018 "session-3",
2019 "Thread Three",
2020 now - chrono::Duration::seconds(2),
2021 paths,
2022 );
2023
2024 cx.update(|cx| {
2025 let store = ThreadMetadataStore::global(cx);
2026 store.update(cx, |store, cx| {
2027 store.save_all(vec![m1, m2, m3], cx);
2028 });
2029 });
2030
2031 cx.run_until_parked();
2032
2033 cx.update(|cx| {
2034 let store = ThreadMetadataStore::global(cx);
2035 let store = store.read(cx);
2036
2037 let all_entries = store
2038 .entries()
2039 .map(|e| e.session_id.0.to_string())
2040 .collect::<Vec<_>>();
2041 assert_eq!(all_entries.len(), 3);
2042 assert!(all_entries.contains(&"session-1".to_string()));
2043 assert!(all_entries.contains(&"session-2".to_string()));
2044 assert!(all_entries.contains(&"session-3".to_string()));
2045
2046 let entry_ids = store.entry_ids().collect::<Vec<_>>();
2047 assert_eq!(entry_ids.len(), 3);
2048 });
2049 }
2050
2051 #[gpui::test]
2052 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2053 init_test(cx);
2054
2055 let paths = PathList::new(&[Path::new("/project-a")]);
2056 let now = Utc::now();
2057 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2058
2059 cx.update(|cx| {
2060 let store = ThreadMetadataStore::global(cx);
2061 store.update(cx, |store, cx| {
2062 store.save(metadata, cx);
2063 });
2064 });
2065
2066 cx.run_until_parked();
2067
2068 cx.update(|cx| {
2069 let store = ThreadMetadataStore::global(cx);
2070 store.update(cx, |store, cx| {
2071 store.archive(&acp::SessionId::new("session-1"), cx);
2072 });
2073 });
2074
2075 cx.run_until_parked();
2076
2077 cx.update(|cx| {
2078 let store = ThreadMetadataStore::global(cx);
2079 store.update(cx, |store, cx| {
2080 let _ = store.reload(cx);
2081 });
2082 });
2083
2084 cx.run_until_parked();
2085
2086 cx.update(|cx| {
2087 let store = ThreadMetadataStore::global(cx);
2088 let store = store.read(cx);
2089
2090 let thread = store
2091 .entries()
2092 .find(|e| e.session_id.0.as_ref() == "session-1")
2093 .expect("thread should exist after reload");
2094 assert!(thread.archived);
2095
2096 let path_entries = store
2097 .entries_for_path(&paths)
2098 .map(|e| e.session_id.0.to_string())
2099 .collect::<Vec<_>>();
2100 assert!(path_entries.is_empty());
2101
2102 let archived = store
2103 .archived_entries()
2104 .map(|e| e.session_id.0.to_string())
2105 .collect::<Vec<_>>();
2106 assert_eq!(archived, vec!["session-1"]);
2107 });
2108 }
2109
2110 #[gpui::test]
2111 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2112 init_test(cx);
2113
2114 cx.run_until_parked();
2115
2116 cx.update(|cx| {
2117 let store = ThreadMetadataStore::global(cx);
2118 store.update(cx, |store, cx| {
2119 store.archive(&acp::SessionId::new("nonexistent"), cx);
2120 });
2121 });
2122
2123 cx.run_until_parked();
2124
2125 cx.update(|cx| {
2126 let store = ThreadMetadataStore::global(cx);
2127 let store = store.read(cx);
2128
2129 assert!(store.is_empty());
2130 assert_eq!(store.entries().count(), 0);
2131 assert_eq!(store.archived_entries().count(), 0);
2132 });
2133 }
2134
2135 #[gpui::test]
2136 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2137 init_test(cx);
2138
2139 let paths = PathList::new(&[Path::new("/project-a")]);
2140 let now = Utc::now();
2141 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2142 let session_id = metadata.session_id.clone();
2143
2144 cx.update(|cx| {
2145 let store = ThreadMetadataStore::global(cx);
2146 store.update(cx, |store, cx| {
2147 store.save(metadata.clone(), cx);
2148 store.archive(&session_id, cx);
2149 });
2150 });
2151
2152 cx.run_until_parked();
2153
2154 cx.update(|cx| {
2155 let store = ThreadMetadataStore::global(cx);
2156 let store = store.read(cx);
2157
2158 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2159 pretty_assertions::assert_eq!(
2160 entries,
2161 vec![ThreadMetadata {
2162 archived: true,
2163 ..metadata
2164 }]
2165 );
2166 });
2167 }
2168
2169 #[gpui::test]
2170 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2171 init_test(cx);
2172 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2173
2174 let id = store
2175 .read_with(cx, |store, cx| {
2176 store.create_archived_worktree(
2177 "/tmp/worktree".to_string(),
2178 "/home/user/repo".to_string(),
2179 Some("feature-branch".to_string()),
2180 "staged_aaa".to_string(),
2181 "unstaged_bbb".to_string(),
2182 "original_000".to_string(),
2183 cx,
2184 )
2185 })
2186 .await
2187 .unwrap();
2188
2189 store
2190 .read_with(cx, |store, cx| {
2191 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2192 })
2193 .await
2194 .unwrap();
2195
2196 let worktrees = store
2197 .read_with(cx, |store, cx| {
2198 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2199 })
2200 .await
2201 .unwrap();
2202
2203 assert_eq!(worktrees.len(), 1);
2204 let wt = &worktrees[0];
2205 assert_eq!(wt.id, id);
2206 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2207 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2208 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2209 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2210 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2211 assert_eq!(wt.original_commit_hash, "original_000");
2212 }
2213
2214 #[gpui::test]
2215 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2216 init_test(cx);
2217 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2218
2219 let id = store
2220 .read_with(cx, |store, cx| {
2221 store.create_archived_worktree(
2222 "/tmp/worktree".to_string(),
2223 "/home/user/repo".to_string(),
2224 Some("main".to_string()),
2225 "deadbeef".to_string(),
2226 "deadbeef".to_string(),
2227 "original_000".to_string(),
2228 cx,
2229 )
2230 })
2231 .await
2232 .unwrap();
2233
2234 store
2235 .read_with(cx, |store, cx| {
2236 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2237 })
2238 .await
2239 .unwrap();
2240
2241 store
2242 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2243 .await
2244 .unwrap();
2245
2246 let worktrees = store
2247 .read_with(cx, |store, cx| {
2248 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2249 })
2250 .await
2251 .unwrap();
2252 assert!(worktrees.is_empty());
2253 }
2254
2255 #[gpui::test]
2256 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2257 init_test(cx);
2258 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2259
2260 let id = store
2261 .read_with(cx, |store, cx| {
2262 store.create_archived_worktree(
2263 "/tmp/worktree".to_string(),
2264 "/home/user/repo".to_string(),
2265 None,
2266 "abc123".to_string(),
2267 "abc123".to_string(),
2268 "original_000".to_string(),
2269 cx,
2270 )
2271 })
2272 .await
2273 .unwrap();
2274
2275 store
2276 .read_with(cx, |store, cx| {
2277 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2278 })
2279 .await
2280 .unwrap();
2281
2282 store
2283 .read_with(cx, |store, cx| {
2284 store.link_thread_to_archived_worktree("session-2".to_string(), id, cx)
2285 })
2286 .await
2287 .unwrap();
2288
2289 let wt1 = store
2290 .read_with(cx, |store, cx| {
2291 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2292 })
2293 .await
2294 .unwrap();
2295
2296 let wt2 = store
2297 .read_with(cx, |store, cx| {
2298 store.get_archived_worktrees_for_thread("session-2".to_string(), cx)
2299 })
2300 .await
2301 .unwrap();
2302
2303 assert_eq!(wt1.len(), 1);
2304 assert_eq!(wt2.len(), 1);
2305 assert_eq!(wt1[0].id, wt2[0].id);
2306 }
2307
2308 // Verifies that all_session_ids_for_path returns both archived and
2309 // unarchived threads. This is intentional: the method is used during
2310 // archival to find every thread referencing a worktree so they can
2311 // all be linked to the archived worktree record.
2312 #[gpui::test]
2313 async fn test_all_session_ids_for_path(cx: &mut TestAppContext) {
2314 init_test(cx);
2315 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2316 let paths = PathList::new(&[Path::new("/project-x")]);
2317
2318 let meta1 = ThreadMetadata {
2319 session_id: acp::SessionId::new("session-1"),
2320 agent_id: agent::ZED_AGENT_ID.clone(),
2321 title: "Thread 1".into(),
2322 updated_at: Utc::now(),
2323 created_at: Some(Utc::now()),
2324 folder_paths: paths.clone(),
2325 main_worktree_paths: PathList::default(),
2326 archived: false,
2327 };
2328 let meta2 = ThreadMetadata {
2329 session_id: acp::SessionId::new("session-2"),
2330 agent_id: agent::ZED_AGENT_ID.clone(),
2331 title: "Thread 2".into(),
2332 updated_at: Utc::now(),
2333 created_at: Some(Utc::now()),
2334 folder_paths: paths.clone(),
2335 main_worktree_paths: PathList::default(),
2336 archived: true,
2337 };
2338
2339 store.update(cx, |store, _cx| {
2340 store.save_internal(meta1);
2341 store.save_internal(meta2);
2342 });
2343
2344 let ids: HashSet<acp::SessionId> = store.read_with(cx, |store, _cx| {
2345 store.all_session_ids_for_path(&paths).cloned().collect()
2346 });
2347
2348 assert!(ids.contains(&acp::SessionId::new("session-1")));
2349 assert!(ids.contains(&acp::SessionId::new("session-2")));
2350 assert_eq!(ids.len(), 2);
2351 }
2352
2353 #[gpui::test]
2354 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
2355 init_test(cx);
2356 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2357
2358 let original_paths = PathList::new(&[
2359 Path::new("/projects/worktree-a"),
2360 Path::new("/projects/worktree-b"),
2361 Path::new("/other/unrelated"),
2362 ]);
2363 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2364
2365 store.update(cx, |store, cx| {
2366 store.save_manually(meta, cx);
2367 });
2368
2369 let replacements = vec![
2370 (
2371 PathBuf::from("/projects/worktree-a"),
2372 PathBuf::from("/restored/worktree-a"),
2373 ),
2374 (
2375 PathBuf::from("/projects/worktree-b"),
2376 PathBuf::from("/restored/worktree-b"),
2377 ),
2378 ];
2379
2380 store.update(cx, |store, cx| {
2381 store.complete_worktree_restore(
2382 &acp::SessionId::new("session-multi"),
2383 &replacements,
2384 cx,
2385 );
2386 });
2387
2388 let entry = store.read_with(cx, |store, _cx| {
2389 store.entry(&acp::SessionId::new("session-multi")).cloned()
2390 });
2391 let entry = entry.unwrap();
2392 let paths = entry.folder_paths.paths();
2393 assert_eq!(paths.len(), 3);
2394 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2395 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2396 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2397 }
2398
2399 #[gpui::test]
2400 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
2401 init_test(cx);
2402 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2403
2404 let original_paths =
2405 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2406 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2407
2408 store.update(cx, |store, cx| {
2409 store.save_manually(meta, cx);
2410 });
2411
2412 let replacements = vec![
2413 (
2414 PathBuf::from("/projects/worktree-a"),
2415 PathBuf::from("/new/worktree-a"),
2416 ),
2417 (
2418 PathBuf::from("/nonexistent/path"),
2419 PathBuf::from("/should/not/appear"),
2420 ),
2421 ];
2422
2423 store.update(cx, |store, cx| {
2424 store.complete_worktree_restore(
2425 &acp::SessionId::new("session-partial"),
2426 &replacements,
2427 cx,
2428 );
2429 });
2430
2431 let entry = store.read_with(cx, |store, _cx| {
2432 store
2433 .entry(&acp::SessionId::new("session-partial"))
2434 .cloned()
2435 });
2436 let entry = entry.unwrap();
2437 let paths = entry.folder_paths.paths();
2438 assert_eq!(paths.len(), 2);
2439 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2440 assert!(paths.contains(&PathBuf::from("/other/path")));
2441 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2442 }
2443
2444 #[gpui::test]
2445 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
2446 init_test(cx);
2447 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2448
2449 let id1 = store
2450 .read_with(cx, |store, cx| {
2451 store.create_archived_worktree(
2452 "/projects/worktree-a".to_string(),
2453 "/home/user/repo".to_string(),
2454 Some("branch-a".to_string()),
2455 "staged_a".to_string(),
2456 "unstaged_a".to_string(),
2457 "original_000".to_string(),
2458 cx,
2459 )
2460 })
2461 .await
2462 .unwrap();
2463
2464 let id2 = store
2465 .read_with(cx, |store, cx| {
2466 store.create_archived_worktree(
2467 "/projects/worktree-b".to_string(),
2468 "/home/user/repo".to_string(),
2469 Some("branch-b".to_string()),
2470 "staged_b".to_string(),
2471 "unstaged_b".to_string(),
2472 "original_000".to_string(),
2473 cx,
2474 )
2475 })
2476 .await
2477 .unwrap();
2478
2479 store
2480 .read_with(cx, |store, cx| {
2481 store.link_thread_to_archived_worktree("session-1".to_string(), id1, cx)
2482 })
2483 .await
2484 .unwrap();
2485
2486 store
2487 .read_with(cx, |store, cx| {
2488 store.link_thread_to_archived_worktree("session-1".to_string(), id2, cx)
2489 })
2490 .await
2491 .unwrap();
2492
2493 let worktrees = store
2494 .read_with(cx, |store, cx| {
2495 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2496 })
2497 .await
2498 .unwrap();
2499
2500 assert_eq!(worktrees.len(), 2);
2501
2502 let paths: Vec<&Path> = worktrees
2503 .iter()
2504 .map(|w| w.worktree_path.as_path())
2505 .collect();
2506 assert!(paths.contains(&Path::new("/projects/worktree-a")));
2507 assert!(paths.contains(&Path::new("/projects/worktree-b")));
2508 }
2509}