1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use acp_thread::AcpThreadEvent;
7use agent::{ThreadStore, ZED_AGENT_ID};
8use agent_client_protocol as acp;
9use anyhow::Context as _;
10use chrono::{DateTime, Utc};
11use collections::{HashMap, HashSet};
12use db::{
13 sqlez::{
14 bindable::Column, domain::Domain, statement::Statement,
15 thread_safe_connection::ThreadSafeConnection,
16 },
17 sqlez_macros::sql,
18};
19use futures::{FutureExt as _, future::Shared};
20use gpui::{AppContext as _, Entity, Global, Subscription, Task};
21use project::AgentId;
22use ui::{App, Context, SharedString};
23use util::ResultExt as _;
24use workspace::PathList;
25
26use crate::DEFAULT_THREAD_TITLE;
27
28pub fn init(cx: &mut App) {
29 ThreadMetadataStore::init_global(cx);
30 migrate_thread_metadata(cx);
31}
32
33/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
34/// We skip migrating threads that do not have a project.
35///
36/// TODO: Remove this after N weeks of shipping the sidebar
37fn migrate_thread_metadata(cx: &mut App) {
38 let store = ThreadMetadataStore::global(cx);
39 let db = store.read(cx).db.clone();
40
41 cx.spawn(async move |cx| {
42 let existing_entries = db.list_ids()?.into_iter().collect::<HashSet<_>>();
43
44 let is_first_migration = existing_entries.is_empty();
45
46 let mut to_migrate = store.read_with(cx, |_store, cx| {
47 ThreadStore::global(cx)
48 .read(cx)
49 .entries()
50 .filter_map(|entry| {
51 if existing_entries.contains(&entry.id.0) {
52 return None;
53 }
54
55 Some(ThreadMetadata {
56 session_id: entry.id,
57 agent_id: ZED_AGENT_ID.clone(),
58 title: entry.title,
59 updated_at: entry.updated_at,
60 created_at: entry.created_at,
61 folder_paths: entry.folder_paths,
62 main_worktree_paths: PathList::default(),
63 archived: true,
64 })
65 })
66 .collect::<Vec<_>>()
67 });
68
69 if to_migrate.is_empty() {
70 return anyhow::Ok(());
71 }
72
73 // On the first migration (no entries in DB yet), keep the 5 most
74 // recent threads per project unarchived.
75 if is_first_migration {
76 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
77 for entry in &mut to_migrate {
78 if entry.folder_paths.is_empty() {
79 continue;
80 }
81 per_project
82 .entry(entry.folder_paths.clone())
83 .or_default()
84 .push(entry);
85 }
86 for entries in per_project.values_mut() {
87 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
88 for entry in entries.iter_mut().take(5) {
89 entry.archived = false;
90 }
91 }
92 }
93
94 log::info!("Migrating {} thread store entries", to_migrate.len());
95
96 // Manually save each entry to the database and call reload, otherwise
97 // we'll end up triggering lots of reloads after each save
98 for entry in to_migrate {
99 db.save(entry).await?;
100 }
101
102 log::info!("Finished migrating thread store entries");
103
104 let _ = store.update(cx, |store, cx| store.reload(cx));
105 anyhow::Ok(())
106 })
107 .detach_and_log_err(cx);
108}
109
110struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
111impl Global for GlobalThreadMetadataStore {}
112
113/// Lightweight metadata for any thread (native or ACP), enough to populate
114/// the sidebar list and route to the correct load path when clicked.
115#[derive(Debug, Clone, PartialEq)]
116pub struct ThreadMetadata {
117 pub session_id: acp::SessionId,
118 pub agent_id: AgentId,
119 pub title: SharedString,
120 pub updated_at: DateTime<Utc>,
121 pub created_at: Option<DateTime<Utc>>,
122 pub folder_paths: PathList,
123 pub main_worktree_paths: PathList,
124 pub archived: bool,
125}
126
127impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
128 fn from(meta: &ThreadMetadata) -> Self {
129 Self {
130 session_id: meta.session_id.clone(),
131 work_dirs: Some(meta.folder_paths.clone()),
132 title: Some(meta.title.clone()),
133 updated_at: Some(meta.updated_at),
134 created_at: meta.created_at,
135 meta: None,
136 }
137 }
138}
139
140/// Record of a git worktree that was archived (deleted from disk) when its
141/// last thread was archived.
142pub struct ArchivedGitWorktree {
143 /// Auto-incrementing primary key.
144 pub id: i64,
145 /// Absolute path to the directory of the worktree before it was deleted.
146 /// Used when restoring, to put the recreated worktree back where it was.
147 /// If the path already exists on disk, the worktree is assumed to be
148 /// already restored and is used as-is.
149 pub worktree_path: PathBuf,
150 /// Absolute path of the main repository ("main worktree") that owned this worktree.
151 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
152 /// If the main repo isn't found on disk, unarchiving fails because we only store
153 /// commit hashes, and without the actual git repo being available, we can't restore
154 /// the files.
155 pub main_repo_path: PathBuf,
156 /// Branch that was checked out in the worktree at archive time. `None` if
157 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
158 /// could happen if the user made a detached one outside of Zed.
159 /// On restore, we try to switch to this branch. If that fails (e.g. it's
160 /// checked out elsewhere), we auto-generate a new one.
161 pub branch_name: Option<String>,
162 /// SHA of the WIP commit that captures files that were staged (but not yet
163 /// committed) at the time of archiving. This commit can be empty if the
164 /// user had no staged files at the time. It sits directly on top of whatever
165 /// the user's last actual commit was.
166 pub staged_commit_hash: String,
167 /// SHA of the WIP commit that captures files that were unstaged (including
168 /// untracked) at the time of archiving. This commit can be empty if the user
169 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
170 /// After doing `git reset` past both of these commits, we're back in the state
171 /// we had before archiving, including what was staged, what was unstaged, and
172 /// what was committed.
173 pub unstaged_commit_hash: String,
174 /// SHA of the commit that HEAD pointed at before we created the two WIP
175 /// commits during archival. After resetting past the WIP commits during
176 /// restore, HEAD should land back on this commit. It also serves as a
177 /// pre-restore sanity check (abort if this commit no longer exists in the
178 /// repo) and as a fallback target if the WIP resets fail.
179 pub original_commit_hash: String,
180}
181
182/// The store holds all metadata needed to show threads in the sidebar/the archive.
183///
184/// Automatically listens to AcpThread events and updates metadata if it has changed.
185pub struct ThreadMetadataStore {
186 db: ThreadMetadataDb,
187 threads: HashMap<acp::SessionId, ThreadMetadata>,
188 threads_by_paths: HashMap<PathList, HashSet<acp::SessionId>>,
189 threads_by_main_paths: HashMap<PathList, HashSet<acp::SessionId>>,
190 reload_task: Option<Shared<Task<()>>>,
191 session_subscriptions: HashMap<acp::SessionId, Subscription>,
192 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
193 _db_operations_task: Task<()>,
194 in_flight_archives: HashMap<acp::SessionId, (Task<()>, smol::channel::Sender<()>)>,
195}
196
197#[derive(Debug, PartialEq)]
198enum DbOperation {
199 Upsert(ThreadMetadata),
200 Delete(acp::SessionId),
201}
202
203impl DbOperation {
204 fn id(&self) -> &acp::SessionId {
205 match self {
206 DbOperation::Upsert(thread) => &thread.session_id,
207 DbOperation::Delete(session_id) => session_id,
208 }
209 }
210}
211
212impl ThreadMetadataStore {
213 #[cfg(not(any(test, feature = "test-support")))]
214 pub fn init_global(cx: &mut App) {
215 if cx.has_global::<Self>() {
216 return;
217 }
218
219 let db = ThreadMetadataDb::global(cx);
220 let thread_store = cx.new(|cx| Self::new(db, cx));
221 cx.set_global(GlobalThreadMetadataStore(thread_store));
222 }
223
224 #[cfg(any(test, feature = "test-support"))]
225 pub fn init_global(cx: &mut App) {
226 let thread = std::thread::current();
227 let test_name = thread.name().unwrap_or("unknown_test");
228 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
229 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
230 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
231 cx.set_global(GlobalThreadMetadataStore(thread_store));
232 }
233
234 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
235 cx.try_global::<GlobalThreadMetadataStore>()
236 .map(|store| store.0.clone())
237 }
238
239 pub fn global(cx: &App) -> Entity<Self> {
240 cx.global::<GlobalThreadMetadataStore>().0.clone()
241 }
242
243 pub fn is_empty(&self) -> bool {
244 self.threads.is_empty()
245 }
246
247 /// Returns all thread IDs.
248 pub fn entry_ids(&self) -> impl Iterator<Item = acp::SessionId> + '_ {
249 self.threads.keys().cloned()
250 }
251
252 /// Returns the metadata for a specific thread, if it exists.
253 pub fn entry(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
254 self.threads.get(session_id)
255 }
256
257 /// Returns all threads.
258 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
259 self.threads.values()
260 }
261
262 /// Returns all archived threads.
263 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
264 self.entries().filter(|t| t.archived)
265 }
266
267 /// Returns all threads for the given path list, excluding archived threads.
268 pub fn entries_for_path(
269 &self,
270 path_list: &PathList,
271 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
272 self.threads_by_paths
273 .get(path_list)
274 .into_iter()
275 .flatten()
276 .filter_map(|s| self.threads.get(s))
277 .filter(|s| !s.archived)
278 }
279
280 /// Returns threads whose `main_worktree_paths` matches the given path list,
281 /// excluding archived threads. This finds threads that were opened in a
282 /// linked worktree but are associated with the given main worktree.
283 pub fn entries_for_main_worktree_path(
284 &self,
285 path_list: &PathList,
286 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
287 self.threads_by_main_paths
288 .get(path_list)
289 .into_iter()
290 .flatten()
291 .filter_map(|s| self.threads.get(s))
292 .filter(|s| !s.archived)
293 }
294
295 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
296 let db = self.db.clone();
297 self.reload_task.take();
298
299 let list_task = cx
300 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
301
302 let reload_task = cx
303 .spawn(async move |this, cx| {
304 let Some(rows) = list_task.await.log_err() else {
305 return;
306 };
307
308 this.update(cx, |this, cx| {
309 this.threads.clear();
310 this.threads_by_paths.clear();
311 this.threads_by_main_paths.clear();
312
313 for row in rows {
314 this.threads_by_paths
315 .entry(row.folder_paths.clone())
316 .or_default()
317 .insert(row.session_id.clone());
318 if !row.main_worktree_paths.is_empty() {
319 this.threads_by_main_paths
320 .entry(row.main_worktree_paths.clone())
321 .or_default()
322 .insert(row.session_id.clone());
323 }
324 this.threads.insert(row.session_id.clone(), row);
325 }
326
327 cx.notify();
328 })
329 .ok();
330 })
331 .shared();
332 self.reload_task = Some(reload_task.clone());
333 reload_task
334 }
335
336 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
337 for metadata in metadata {
338 self.save_internal(metadata);
339 }
340 cx.notify();
341 }
342
343 #[cfg(any(test, feature = "test-support"))]
344 pub fn save_manually(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
345 self.save(metadata, cx)
346 }
347
348 fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
349 self.save_internal(metadata);
350 cx.notify();
351 }
352
353 fn save_internal(&mut self, metadata: ThreadMetadata) {
354 if let Some(thread) = self.threads.get(&metadata.session_id) {
355 if thread.folder_paths != metadata.folder_paths {
356 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
357 session_ids.remove(&metadata.session_id);
358 }
359 }
360 if thread.main_worktree_paths != metadata.main_worktree_paths
361 && !thread.main_worktree_paths.is_empty()
362 {
363 if let Some(session_ids) = self
364 .threads_by_main_paths
365 .get_mut(&thread.main_worktree_paths)
366 {
367 session_ids.remove(&metadata.session_id);
368 }
369 }
370 }
371
372 self.threads
373 .insert(metadata.session_id.clone(), metadata.clone());
374
375 self.threads_by_paths
376 .entry(metadata.folder_paths.clone())
377 .or_default()
378 .insert(metadata.session_id.clone());
379
380 if !metadata.main_worktree_paths.is_empty() {
381 self.threads_by_main_paths
382 .entry(metadata.main_worktree_paths.clone())
383 .or_default()
384 .insert(metadata.session_id.clone());
385 }
386
387 self.pending_thread_ops_tx
388 .try_send(DbOperation::Upsert(metadata))
389 .log_err();
390 }
391
392 pub fn update_working_directories(
393 &mut self,
394 session_id: &acp::SessionId,
395 work_dirs: PathList,
396 cx: &mut Context<Self>,
397 ) {
398 if let Some(thread) = self.threads.get(session_id) {
399 self.save_internal(ThreadMetadata {
400 folder_paths: work_dirs,
401 ..thread.clone()
402 });
403 cx.notify();
404 }
405 }
406
407 pub fn archive(
408 &mut self,
409 session_id: &acp::SessionId,
410 in_flight: Option<(Task<()>, smol::channel::Sender<()>)>,
411 cx: &mut Context<Self>,
412 ) {
413 self.update_archived(session_id, true, cx);
414
415 if let Some(in_flight) = in_flight {
416 self.in_flight_archives
417 .insert(session_id.clone(), in_flight);
418 }
419 }
420
421 pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
422 self.update_archived(session_id, false, cx);
423 // Dropping the Sender triggers cancellation in the background task.
424 self.in_flight_archives.remove(session_id);
425 }
426
427 pub fn cleanup_completed_archive(&mut self, session_id: &acp::SessionId) {
428 self.in_flight_archives.remove(session_id);
429 }
430
431 pub fn complete_worktree_restore(
432 &mut self,
433 session_id: &acp::SessionId,
434 path_replacements: &[(PathBuf, PathBuf)],
435 cx: &mut Context<Self>,
436 ) {
437 if let Some(thread) = self.threads.get(session_id).cloned() {
438 let mut paths: Vec<PathBuf> = thread.folder_paths.paths().to_vec();
439 for (old_path, new_path) in path_replacements {
440 if let Some(pos) = paths.iter().position(|p| p == old_path) {
441 paths[pos] = new_path.clone();
442 }
443 }
444 let new_folder_paths = PathList::new(&paths);
445 self.save_internal(ThreadMetadata {
446 folder_paths: new_folder_paths,
447 ..thread
448 });
449 cx.notify();
450 }
451 }
452
453 pub fn create_archived_worktree(
454 &self,
455 worktree_path: String,
456 main_repo_path: String,
457 branch_name: Option<String>,
458 staged_commit_hash: String,
459 unstaged_commit_hash: String,
460 original_commit_hash: String,
461 cx: &App,
462 ) -> Task<anyhow::Result<i64>> {
463 let db = self.db.clone();
464 cx.background_spawn(async move {
465 db.create_archived_worktree(
466 worktree_path,
467 main_repo_path,
468 branch_name,
469 staged_commit_hash,
470 unstaged_commit_hash,
471 original_commit_hash,
472 )
473 .await
474 })
475 }
476
477 pub fn link_thread_to_archived_worktree(
478 &self,
479 session_id: String,
480 archived_worktree_id: i64,
481 cx: &App,
482 ) -> Task<anyhow::Result<()>> {
483 let db = self.db.clone();
484 cx.background_spawn(async move {
485 db.link_thread_to_archived_worktree(session_id, archived_worktree_id)
486 .await
487 })
488 }
489
490 pub fn get_archived_worktrees_for_thread(
491 &self,
492 session_id: String,
493 cx: &App,
494 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
495 let db = self.db.clone();
496 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(session_id).await })
497 }
498
499 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
500 let db = self.db.clone();
501 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
502 }
503
504 pub fn unlink_thread_from_all_archived_worktrees(
505 &self,
506 session_id: String,
507 cx: &App,
508 ) -> Task<anyhow::Result<()>> {
509 let db = self.db.clone();
510 cx.background_spawn(async move {
511 db.unlink_thread_from_all_archived_worktrees(session_id)
512 .await
513 })
514 }
515
516 pub fn is_archived_worktree_referenced(
517 &self,
518 archived_worktree_id: i64,
519 cx: &App,
520 ) -> Task<anyhow::Result<bool>> {
521 let db = self.db.clone();
522 cx.background_spawn(async move {
523 db.is_archived_worktree_referenced(archived_worktree_id)
524 .await
525 })
526 }
527
528 pub fn all_session_ids_for_path<'a>(
529 &'a self,
530 path_list: &PathList,
531 ) -> impl Iterator<Item = &'a acp::SessionId> {
532 self.threads_by_paths
533 .get(path_list)
534 .into_iter()
535 .flat_map(|session_ids| session_ids.iter())
536 }
537 fn update_archived(
538 &mut self,
539 session_id: &acp::SessionId,
540 archived: bool,
541 cx: &mut Context<Self>,
542 ) {
543 if let Some(thread) = self.threads.get(session_id) {
544 self.save_internal(ThreadMetadata {
545 archived,
546 ..thread.clone()
547 });
548 cx.notify();
549 }
550 }
551
552 pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context<Self>) {
553 if let Some(thread) = self.threads.get(&session_id) {
554 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
555 session_ids.remove(&session_id);
556 }
557 if !thread.main_worktree_paths.is_empty() {
558 if let Some(session_ids) = self
559 .threads_by_main_paths
560 .get_mut(&thread.main_worktree_paths)
561 {
562 session_ids.remove(&session_id);
563 }
564 }
565 }
566 self.threads.remove(&session_id);
567 self.pending_thread_ops_tx
568 .try_send(DbOperation::Delete(session_id))
569 .log_err();
570 cx.notify();
571 }
572
573 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
574 let weak_store = cx.weak_entity();
575
576 cx.observe_new::<acp_thread::AcpThread>(move |thread, _window, cx| {
577 // Don't track subagent threads in the sidebar.
578 if thread.parent_session_id().is_some() {
579 return;
580 }
581
582 let thread_entity = cx.entity();
583
584 cx.on_release({
585 let weak_store = weak_store.clone();
586 move |thread, cx| {
587 weak_store
588 .update(cx, |store, _cx| {
589 let session_id = thread.session_id().clone();
590 store.session_subscriptions.remove(&session_id);
591 })
592 .ok();
593 }
594 })
595 .detach();
596
597 weak_store
598 .update(cx, |this, cx| {
599 let subscription = cx.subscribe(&thread_entity, Self::handle_thread_event);
600 this.session_subscriptions
601 .insert(thread.session_id().clone(), subscription);
602 })
603 .ok();
604 })
605 .detach();
606
607 let (tx, rx) = smol::channel::unbounded();
608 let _db_operations_task = cx.background_spawn({
609 let db = db.clone();
610 async move {
611 while let Ok(first_update) = rx.recv().await {
612 let mut updates = vec![first_update];
613 while let Ok(update) = rx.try_recv() {
614 updates.push(update);
615 }
616 let updates = Self::dedup_db_operations(updates);
617 for operation in updates {
618 match operation {
619 DbOperation::Upsert(metadata) => {
620 db.save(metadata).await.log_err();
621 }
622 DbOperation::Delete(session_id) => {
623 db.delete(session_id).await.log_err();
624 }
625 }
626 }
627 }
628 }
629 });
630
631 let mut this = Self {
632 db,
633 threads: HashMap::default(),
634 threads_by_paths: HashMap::default(),
635 threads_by_main_paths: HashMap::default(),
636 reload_task: None,
637 session_subscriptions: HashMap::default(),
638 pending_thread_ops_tx: tx,
639 _db_operations_task,
640 in_flight_archives: HashMap::default(),
641 };
642 let _ = this.reload(cx);
643 this
644 }
645
646 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
647 let mut ops = HashMap::default();
648 for operation in operations.into_iter().rev() {
649 if ops.contains_key(operation.id()) {
650 continue;
651 }
652 ops.insert(operation.id().clone(), operation);
653 }
654 ops.into_values().collect()
655 }
656
657 fn handle_thread_event(
658 &mut self,
659 thread: Entity<acp_thread::AcpThread>,
660 event: &AcpThreadEvent,
661 cx: &mut Context<Self>,
662 ) {
663 // Don't track subagent threads in the sidebar.
664 if thread.read(cx).parent_session_id().is_some() {
665 return;
666 }
667
668 match event {
669 AcpThreadEvent::NewEntry
670 | AcpThreadEvent::TitleUpdated
671 | AcpThreadEvent::EntryUpdated(_)
672 | AcpThreadEvent::EntriesRemoved(_)
673 | AcpThreadEvent::ToolAuthorizationRequested(_)
674 | AcpThreadEvent::ToolAuthorizationReceived(_)
675 | AcpThreadEvent::Retry(_)
676 | AcpThreadEvent::Stopped(_)
677 | AcpThreadEvent::Error
678 | AcpThreadEvent::LoadError(_)
679 | AcpThreadEvent::Refusal
680 | AcpThreadEvent::WorkingDirectoriesUpdated => {
681 let thread_ref = thread.read(cx);
682 if thread_ref.entries().is_empty() {
683 return;
684 }
685
686 let existing_thread = self.threads.get(thread_ref.session_id());
687 let session_id = thread_ref.session_id().clone();
688 let title = thread_ref
689 .title()
690 .unwrap_or_else(|| DEFAULT_THREAD_TITLE.into());
691
692 let updated_at = Utc::now();
693
694 let created_at = existing_thread
695 .and_then(|t| t.created_at)
696 .unwrap_or_else(|| updated_at);
697
698 let agent_id = thread_ref.connection().agent_id();
699
700 let folder_paths = {
701 let project = thread_ref.project().read(cx);
702 let paths: Vec<Arc<Path>> = project
703 .visible_worktrees(cx)
704 .map(|worktree| worktree.read(cx).abs_path())
705 .collect();
706 PathList::new(&paths)
707 };
708
709 let main_worktree_paths = thread_ref
710 .project()
711 .read(cx)
712 .project_group_key(cx)
713 .path_list()
714 .clone();
715
716 // Threads without a folder path (e.g. started in an empty
717 // window) are archived by default so they don't get lost,
718 // because they won't show up in the sidebar. Users can reload
719 // them from the archive.
720 let archived = existing_thread
721 .map(|t| t.archived)
722 .unwrap_or(folder_paths.is_empty());
723
724 let metadata = ThreadMetadata {
725 session_id,
726 agent_id,
727 title,
728 created_at: Some(created_at),
729 updated_at,
730 folder_paths,
731 main_worktree_paths,
732 archived,
733 };
734
735 self.save(metadata, cx);
736 }
737 AcpThreadEvent::TokenUsageUpdated
738 | AcpThreadEvent::SubagentSpawned(_)
739 | AcpThreadEvent::PromptCapabilitiesUpdated
740 | AcpThreadEvent::AvailableCommandsUpdated(_)
741 | AcpThreadEvent::ModeUpdated(_)
742 | AcpThreadEvent::ConfigOptionsUpdated(_) => {}
743 }
744 }
745}
746
747impl Global for ThreadMetadataStore {}
748
749struct ThreadMetadataDb(ThreadSafeConnection);
750
751impl Domain for ThreadMetadataDb {
752 const NAME: &str = stringify!(ThreadMetadataDb);
753
754 const MIGRATIONS: &[&str] = &[
755 sql!(
756 CREATE TABLE IF NOT EXISTS sidebar_threads(
757 session_id TEXT PRIMARY KEY,
758 agent_id TEXT,
759 title TEXT NOT NULL,
760 updated_at TEXT NOT NULL,
761 created_at TEXT,
762 folder_paths TEXT,
763 folder_paths_order TEXT
764 ) STRICT;
765 ),
766 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
767 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
768 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
769 sql!(
770 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
771 id INTEGER PRIMARY KEY,
772 worktree_path TEXT NOT NULL,
773 main_repo_path TEXT NOT NULL,
774 branch_name TEXT,
775 staged_commit_hash TEXT,
776 unstaged_commit_hash TEXT,
777 original_commit_hash TEXT
778 ) STRICT;
779
780 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
781 session_id TEXT NOT NULL,
782 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
783 PRIMARY KEY (session_id, archived_worktree_id)
784 ) STRICT;
785 ),
786 ];
787}
788
789db::static_connection!(ThreadMetadataDb, []);
790
791impl ThreadMetadataDb {
792 pub fn list_ids(&self) -> anyhow::Result<Vec<Arc<str>>> {
793 self.select::<Arc<str>>(
794 "SELECT session_id FROM sidebar_threads \
795 ORDER BY updated_at DESC",
796 )?()
797 }
798
799 /// List all sidebar thread metadata, ordered by updated_at descending.
800 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
801 self.select::<ThreadMetadata>(
802 "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order \
803 FROM sidebar_threads \
804 ORDER BY updated_at DESC"
805 )?()
806 }
807
808 /// Upsert metadata for a thread.
809 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
810 let id = row.session_id.0.clone();
811 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
812 None
813 } else {
814 Some(row.agent_id.to_string())
815 };
816 let title = row.title.to_string();
817 let updated_at = row.updated_at.to_rfc3339();
818 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
819 let serialized = row.folder_paths.serialize();
820 let (folder_paths, folder_paths_order) = if row.folder_paths.is_empty() {
821 (None, None)
822 } else {
823 (Some(serialized.paths), Some(serialized.order))
824 };
825 let main_serialized = row.main_worktree_paths.serialize();
826 let (main_worktree_paths, main_worktree_paths_order) = if row.main_worktree_paths.is_empty()
827 {
828 (None, None)
829 } else {
830 (Some(main_serialized.paths), Some(main_serialized.order))
831 };
832 let archived = row.archived;
833
834 self.write(move |conn| {
835 let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order) \
836 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) \
837 ON CONFLICT(session_id) DO UPDATE SET \
838 agent_id = excluded.agent_id, \
839 title = excluded.title, \
840 updated_at = excluded.updated_at, \
841 created_at = excluded.created_at, \
842 folder_paths = excluded.folder_paths, \
843 folder_paths_order = excluded.folder_paths_order, \
844 archived = excluded.archived, \
845 main_worktree_paths = excluded.main_worktree_paths, \
846 main_worktree_paths_order = excluded.main_worktree_paths_order";
847 let mut stmt = Statement::prepare(conn, sql)?;
848 let mut i = stmt.bind(&id, 1)?;
849 i = stmt.bind(&agent_id, i)?;
850 i = stmt.bind(&title, i)?;
851 i = stmt.bind(&updated_at, i)?;
852 i = stmt.bind(&created_at, i)?;
853 i = stmt.bind(&folder_paths, i)?;
854 i = stmt.bind(&folder_paths_order, i)?;
855 i = stmt.bind(&archived, i)?;
856 i = stmt.bind(&main_worktree_paths, i)?;
857 stmt.bind(&main_worktree_paths_order, i)?;
858 stmt.exec()
859 })
860 .await
861 }
862
863 /// Delete metadata for a single thread.
864 pub async fn delete(&self, session_id: acp::SessionId) -> anyhow::Result<()> {
865 let id = session_id.0.clone();
866 self.write(move |conn| {
867 let mut stmt =
868 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE session_id = ?")?;
869 stmt.bind(&id, 1)?;
870 stmt.exec()
871 })
872 .await
873 }
874
875 pub async fn create_archived_worktree(
876 &self,
877 worktree_path: String,
878 main_repo_path: String,
879 branch_name: Option<String>,
880 staged_commit_hash: String,
881 unstaged_commit_hash: String,
882 original_commit_hash: String,
883 ) -> anyhow::Result<i64> {
884 self.write(move |conn| {
885 let mut stmt = Statement::prepare(
886 conn,
887 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
888 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
889 RETURNING id",
890 )?;
891 let mut i = stmt.bind(&worktree_path, 1)?;
892 i = stmt.bind(&main_repo_path, i)?;
893 i = stmt.bind(&branch_name, i)?;
894 i = stmt.bind(&staged_commit_hash, i)?;
895 i = stmt.bind(&unstaged_commit_hash, i)?;
896 stmt.bind(&original_commit_hash, i)?;
897 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
898 })
899 .await
900 }
901
902 pub async fn link_thread_to_archived_worktree(
903 &self,
904 session_id: String,
905 archived_worktree_id: i64,
906 ) -> anyhow::Result<()> {
907 self.write(move |conn| {
908 let mut stmt = Statement::prepare(
909 conn,
910 "INSERT INTO thread_archived_worktrees(session_id, archived_worktree_id) \
911 VALUES (?1, ?2)",
912 )?;
913 let i = stmt.bind(&session_id, 1)?;
914 stmt.bind(&archived_worktree_id, i)?;
915 stmt.exec()
916 })
917 .await
918 }
919
920 pub async fn get_archived_worktrees_for_thread(
921 &self,
922 session_id: String,
923 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
924 self.select_bound::<String, ArchivedGitWorktree>(
925 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
926 FROM archived_git_worktrees a \
927 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
928 WHERE t.session_id = ?1",
929 )?(session_id)
930 }
931
932 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
933 self.write(move |conn| {
934 let mut stmt = Statement::prepare(
935 conn,
936 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
937 )?;
938 stmt.bind(&id, 1)?;
939 stmt.exec()?;
940
941 let mut stmt =
942 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
943 stmt.bind(&id, 1)?;
944 stmt.exec()
945 })
946 .await
947 }
948
949 pub async fn unlink_thread_from_all_archived_worktrees(
950 &self,
951 session_id: String,
952 ) -> anyhow::Result<()> {
953 self.write(move |conn| {
954 let mut stmt = Statement::prepare(
955 conn,
956 "DELETE FROM thread_archived_worktrees WHERE session_id = ?",
957 )?;
958 stmt.bind(&session_id, 1)?;
959 stmt.exec()
960 })
961 .await
962 }
963
964 pub async fn is_archived_worktree_referenced(
965 &self,
966 archived_worktree_id: i64,
967 ) -> anyhow::Result<bool> {
968 self.select_row_bound::<i64, i64>(
969 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
970 )?(archived_worktree_id)
971 .map(|count| count.unwrap_or(0) > 0)
972 }
973}
974
975impl Column for ThreadMetadata {
976 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
977 let (id, next): (Arc<str>, i32) = Column::column(statement, start_index)?;
978 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
979 let (title, next): (String, i32) = Column::column(statement, next)?;
980 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
981 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
982 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
983 let (folder_paths_order_str, next): (Option<String>, i32) =
984 Column::column(statement, next)?;
985 let (archived, next): (bool, i32) = Column::column(statement, next)?;
986 let (main_worktree_paths_str, next): (Option<String>, i32) =
987 Column::column(statement, next)?;
988 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
989 Column::column(statement, next)?;
990
991 let agent_id = agent_id
992 .map(|id| AgentId::new(id))
993 .unwrap_or(ZED_AGENT_ID.clone());
994
995 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
996 let created_at = created_at_str
997 .as_deref()
998 .map(DateTime::parse_from_rfc3339)
999 .transpose()?
1000 .map(|dt| dt.with_timezone(&Utc));
1001
1002 let folder_paths = folder_paths_str
1003 .map(|paths| {
1004 PathList::deserialize(&util::path_list::SerializedPathList {
1005 paths,
1006 order: folder_paths_order_str.unwrap_or_default(),
1007 })
1008 })
1009 .unwrap_or_default();
1010
1011 let main_worktree_paths = main_worktree_paths_str
1012 .map(|paths| {
1013 PathList::deserialize(&util::path_list::SerializedPathList {
1014 paths,
1015 order: main_worktree_paths_order_str.unwrap_or_default(),
1016 })
1017 })
1018 .unwrap_or_default();
1019
1020 Ok((
1021 ThreadMetadata {
1022 session_id: acp::SessionId::new(id),
1023 agent_id,
1024 title: title.into(),
1025 updated_at,
1026 created_at,
1027 folder_paths,
1028 main_worktree_paths,
1029 archived,
1030 },
1031 next,
1032 ))
1033 }
1034}
1035
1036impl Column for ArchivedGitWorktree {
1037 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1038 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1039 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1040 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1041 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1042 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1043 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1044 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1045
1046 Ok((
1047 ArchivedGitWorktree {
1048 id,
1049 worktree_path: PathBuf::from(worktree_path_str),
1050 main_repo_path: PathBuf::from(main_repo_path_str),
1051 branch_name,
1052 staged_commit_hash,
1053 unstaged_commit_hash,
1054 original_commit_hash,
1055 },
1056 next,
1057 ))
1058 }
1059}
1060
1061#[cfg(test)]
1062mod tests {
1063 use super::*;
1064 use acp_thread::{AgentConnection, StubAgentConnection};
1065 use action_log::ActionLog;
1066 use agent::DbThread;
1067 use agent_client_protocol as acp;
1068
1069 use gpui::TestAppContext;
1070 use project::FakeFs;
1071 use project::Project;
1072 use std::path::Path;
1073 use std::rc::Rc;
1074
1075 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1076 DbThread {
1077 title: title.to_string().into(),
1078 messages: Vec::new(),
1079 updated_at,
1080 detailed_summary: None,
1081 initial_project_snapshot: None,
1082 cumulative_token_usage: Default::default(),
1083 request_token_usage: Default::default(),
1084 model: None,
1085 profile: None,
1086 imported: false,
1087 subagent_context: None,
1088 speed: None,
1089 thinking_enabled: false,
1090 thinking_effort: None,
1091 draft_prompt: None,
1092 ui_scroll_position: None,
1093 }
1094 }
1095
1096 fn make_metadata(
1097 session_id: &str,
1098 title: &str,
1099 updated_at: DateTime<Utc>,
1100 folder_paths: PathList,
1101 ) -> ThreadMetadata {
1102 ThreadMetadata {
1103 archived: false,
1104 session_id: acp::SessionId::new(session_id),
1105 agent_id: agent::ZED_AGENT_ID.clone(),
1106 title: title.to_string().into(),
1107 updated_at,
1108 created_at: Some(updated_at),
1109 folder_paths,
1110 main_worktree_paths: PathList::default(),
1111 }
1112 }
1113
1114 fn init_test(cx: &mut TestAppContext) {
1115 cx.update(|cx| {
1116 let settings_store = settings::SettingsStore::test(cx);
1117 cx.set_global(settings_store);
1118 ThreadMetadataStore::init_global(cx);
1119 ThreadStore::init_global(cx);
1120 });
1121 cx.run_until_parked();
1122 }
1123
1124 #[gpui::test]
1125 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1126 let first_paths = PathList::new(&[Path::new("/project-a")]);
1127 let second_paths = PathList::new(&[Path::new("/project-b")]);
1128 let now = Utc::now();
1129 let older = now - chrono::Duration::seconds(1);
1130
1131 let thread = std::thread::current();
1132 let test_name = thread.name().unwrap_or("unknown_test");
1133 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1134 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1135 &db_name,
1136 )));
1137
1138 db.save(make_metadata(
1139 "session-1",
1140 "First Thread",
1141 now,
1142 first_paths.clone(),
1143 ))
1144 .await
1145 .unwrap();
1146 db.save(make_metadata(
1147 "session-2",
1148 "Second Thread",
1149 older,
1150 second_paths.clone(),
1151 ))
1152 .await
1153 .unwrap();
1154
1155 cx.update(|cx| {
1156 let settings_store = settings::SettingsStore::test(cx);
1157 cx.set_global(settings_store);
1158 ThreadMetadataStore::init_global(cx);
1159 });
1160
1161 cx.run_until_parked();
1162
1163 cx.update(|cx| {
1164 let store = ThreadMetadataStore::global(cx);
1165 let store = store.read(cx);
1166
1167 let entry_ids = store
1168 .entry_ids()
1169 .map(|session_id| session_id.0.to_string())
1170 .collect::<Vec<_>>();
1171 assert_eq!(entry_ids.len(), 2);
1172 assert!(entry_ids.contains(&"session-1".to_string()));
1173 assert!(entry_ids.contains(&"session-2".to_string()));
1174
1175 let first_path_entries = store
1176 .entries_for_path(&first_paths)
1177 .map(|entry| entry.session_id.0.to_string())
1178 .collect::<Vec<_>>();
1179 assert_eq!(first_path_entries, vec!["session-1"]);
1180
1181 let second_path_entries = store
1182 .entries_for_path(&second_paths)
1183 .map(|entry| entry.session_id.0.to_string())
1184 .collect::<Vec<_>>();
1185 assert_eq!(second_path_entries, vec!["session-2"]);
1186 });
1187 }
1188
1189 #[gpui::test]
1190 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1191 init_test(cx);
1192
1193 let first_paths = PathList::new(&[Path::new("/project-a")]);
1194 let second_paths = PathList::new(&[Path::new("/project-b")]);
1195 let initial_time = Utc::now();
1196 let updated_time = initial_time + chrono::Duration::seconds(1);
1197
1198 let initial_metadata = make_metadata(
1199 "session-1",
1200 "First Thread",
1201 initial_time,
1202 first_paths.clone(),
1203 );
1204
1205 let second_metadata = make_metadata(
1206 "session-2",
1207 "Second Thread",
1208 initial_time,
1209 second_paths.clone(),
1210 );
1211
1212 cx.update(|cx| {
1213 let store = ThreadMetadataStore::global(cx);
1214 store.update(cx, |store, cx| {
1215 store.save(initial_metadata, cx);
1216 store.save(second_metadata, cx);
1217 });
1218 });
1219
1220 cx.run_until_parked();
1221
1222 cx.update(|cx| {
1223 let store = ThreadMetadataStore::global(cx);
1224 let store = store.read(cx);
1225
1226 let first_path_entries = store
1227 .entries_for_path(&first_paths)
1228 .map(|entry| entry.session_id.0.to_string())
1229 .collect::<Vec<_>>();
1230 assert_eq!(first_path_entries, vec!["session-1"]);
1231
1232 let second_path_entries = store
1233 .entries_for_path(&second_paths)
1234 .map(|entry| entry.session_id.0.to_string())
1235 .collect::<Vec<_>>();
1236 assert_eq!(second_path_entries, vec!["session-2"]);
1237 });
1238
1239 let moved_metadata = make_metadata(
1240 "session-1",
1241 "First Thread",
1242 updated_time,
1243 second_paths.clone(),
1244 );
1245
1246 cx.update(|cx| {
1247 let store = ThreadMetadataStore::global(cx);
1248 store.update(cx, |store, cx| {
1249 store.save(moved_metadata, cx);
1250 });
1251 });
1252
1253 cx.run_until_parked();
1254
1255 cx.update(|cx| {
1256 let store = ThreadMetadataStore::global(cx);
1257 let store = store.read(cx);
1258
1259 let entry_ids = store
1260 .entry_ids()
1261 .map(|session_id| session_id.0.to_string())
1262 .collect::<Vec<_>>();
1263 assert_eq!(entry_ids.len(), 2);
1264 assert!(entry_ids.contains(&"session-1".to_string()));
1265 assert!(entry_ids.contains(&"session-2".to_string()));
1266
1267 let first_path_entries = store
1268 .entries_for_path(&first_paths)
1269 .map(|entry| entry.session_id.0.to_string())
1270 .collect::<Vec<_>>();
1271 assert!(first_path_entries.is_empty());
1272
1273 let second_path_entries = store
1274 .entries_for_path(&second_paths)
1275 .map(|entry| entry.session_id.0.to_string())
1276 .collect::<Vec<_>>();
1277 assert_eq!(second_path_entries.len(), 2);
1278 assert!(second_path_entries.contains(&"session-1".to_string()));
1279 assert!(second_path_entries.contains(&"session-2".to_string()));
1280 });
1281
1282 cx.update(|cx| {
1283 let store = ThreadMetadataStore::global(cx);
1284 store.update(cx, |store, cx| {
1285 store.delete(acp::SessionId::new("session-2"), cx);
1286 });
1287 });
1288
1289 cx.run_until_parked();
1290
1291 cx.update(|cx| {
1292 let store = ThreadMetadataStore::global(cx);
1293 let store = store.read(cx);
1294
1295 let entry_ids = store
1296 .entry_ids()
1297 .map(|session_id| session_id.0.to_string())
1298 .collect::<Vec<_>>();
1299 assert_eq!(entry_ids, vec!["session-1"]);
1300
1301 let second_path_entries = store
1302 .entries_for_path(&second_paths)
1303 .map(|entry| entry.session_id.0.to_string())
1304 .collect::<Vec<_>>();
1305 assert_eq!(second_path_entries, vec!["session-1"]);
1306 });
1307 }
1308
1309 #[gpui::test]
1310 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1311 init_test(cx);
1312
1313 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1314 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1315 let now = Utc::now();
1316
1317 let existing_metadata = ThreadMetadata {
1318 session_id: acp::SessionId::new("a-session-0"),
1319 agent_id: agent::ZED_AGENT_ID.clone(),
1320 title: "Existing Metadata".into(),
1321 updated_at: now - chrono::Duration::seconds(10),
1322 created_at: Some(now - chrono::Duration::seconds(10)),
1323 folder_paths: project_a_paths.clone(),
1324 main_worktree_paths: PathList::default(),
1325 archived: false,
1326 };
1327
1328 cx.update(|cx| {
1329 let store = ThreadMetadataStore::global(cx);
1330 store.update(cx, |store, cx| {
1331 store.save(existing_metadata, cx);
1332 });
1333 });
1334 cx.run_until_parked();
1335
1336 let threads_to_save = vec![
1337 (
1338 "a-session-0",
1339 "Thread A0 From Native Store",
1340 project_a_paths.clone(),
1341 now,
1342 ),
1343 (
1344 "a-session-1",
1345 "Thread A1",
1346 project_a_paths.clone(),
1347 now + chrono::Duration::seconds(1),
1348 ),
1349 (
1350 "b-session-0",
1351 "Thread B0",
1352 project_b_paths.clone(),
1353 now + chrono::Duration::seconds(2),
1354 ),
1355 (
1356 "projectless",
1357 "Projectless",
1358 PathList::default(),
1359 now + chrono::Duration::seconds(3),
1360 ),
1361 ];
1362
1363 for (session_id, title, paths, updated_at) in &threads_to_save {
1364 let save_task = cx.update(|cx| {
1365 let thread_store = ThreadStore::global(cx);
1366 let session_id = session_id.to_string();
1367 let title = title.to_string();
1368 let paths = paths.clone();
1369 thread_store.update(cx, |store, cx| {
1370 store.save_thread(
1371 acp::SessionId::new(session_id),
1372 make_db_thread(&title, *updated_at),
1373 paths,
1374 cx,
1375 )
1376 })
1377 });
1378 save_task.await.unwrap();
1379 cx.run_until_parked();
1380 }
1381
1382 cx.update(|cx| migrate_thread_metadata(cx));
1383 cx.run_until_parked();
1384
1385 let list = cx.update(|cx| {
1386 let store = ThreadMetadataStore::global(cx);
1387 store.read(cx).entries().cloned().collect::<Vec<_>>()
1388 });
1389
1390 assert_eq!(list.len(), 4);
1391 assert!(
1392 list.iter()
1393 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1394 );
1395
1396 let existing_metadata = list
1397 .iter()
1398 .find(|metadata| metadata.session_id.0.as_ref() == "a-session-0")
1399 .unwrap();
1400 assert_eq!(existing_metadata.title.as_ref(), "Existing Metadata");
1401 assert!(!existing_metadata.archived);
1402
1403 let migrated_session_ids = list
1404 .iter()
1405 .map(|metadata| metadata.session_id.0.as_ref())
1406 .collect::<Vec<_>>();
1407 assert!(migrated_session_ids.contains(&"a-session-1"));
1408 assert!(migrated_session_ids.contains(&"b-session-0"));
1409 assert!(migrated_session_ids.contains(&"projectless"));
1410
1411 let migrated_entries = list
1412 .iter()
1413 .filter(|metadata| metadata.session_id.0.as_ref() != "a-session-0")
1414 .collect::<Vec<_>>();
1415 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1416 }
1417
1418 #[gpui::test]
1419 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1420 cx: &mut TestAppContext,
1421 ) {
1422 init_test(cx);
1423
1424 let project_paths = PathList::new(&[Path::new("/project-a")]);
1425 let existing_updated_at = Utc::now();
1426
1427 let existing_metadata = ThreadMetadata {
1428 session_id: acp::SessionId::new("existing-session"),
1429 agent_id: agent::ZED_AGENT_ID.clone(),
1430 title: "Existing Metadata".into(),
1431 updated_at: existing_updated_at,
1432 created_at: Some(existing_updated_at),
1433 folder_paths: project_paths.clone(),
1434 main_worktree_paths: PathList::default(),
1435 archived: false,
1436 };
1437
1438 cx.update(|cx| {
1439 let store = ThreadMetadataStore::global(cx);
1440 store.update(cx, |store, cx| {
1441 store.save(existing_metadata, cx);
1442 });
1443 });
1444 cx.run_until_parked();
1445
1446 let save_task = cx.update(|cx| {
1447 let thread_store = ThreadStore::global(cx);
1448 thread_store.update(cx, |store, cx| {
1449 store.save_thread(
1450 acp::SessionId::new("existing-session"),
1451 make_db_thread(
1452 "Updated Native Thread Title",
1453 existing_updated_at + chrono::Duration::seconds(1),
1454 ),
1455 project_paths.clone(),
1456 cx,
1457 )
1458 })
1459 });
1460 save_task.await.unwrap();
1461 cx.run_until_parked();
1462
1463 cx.update(|cx| migrate_thread_metadata(cx));
1464 cx.run_until_parked();
1465
1466 let list = cx.update(|cx| {
1467 let store = ThreadMetadataStore::global(cx);
1468 store.read(cx).entries().cloned().collect::<Vec<_>>()
1469 });
1470
1471 assert_eq!(list.len(), 1);
1472 assert_eq!(list[0].session_id.0.as_ref(), "existing-session");
1473 }
1474
1475 #[gpui::test]
1476 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
1477 cx: &mut TestAppContext,
1478 ) {
1479 init_test(cx);
1480
1481 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1482 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1483 let now = Utc::now();
1484
1485 // Create 7 threads for project A and 3 for project B
1486 let mut threads_to_save = Vec::new();
1487 for i in 0..7 {
1488 threads_to_save.push((
1489 format!("a-session-{i}"),
1490 format!("Thread A{i}"),
1491 project_a_paths.clone(),
1492 now + chrono::Duration::seconds(i as i64),
1493 ));
1494 }
1495 for i in 0..3 {
1496 threads_to_save.push((
1497 format!("b-session-{i}"),
1498 format!("Thread B{i}"),
1499 project_b_paths.clone(),
1500 now + chrono::Duration::seconds(i as i64),
1501 ));
1502 }
1503
1504 for (session_id, title, paths, updated_at) in &threads_to_save {
1505 let save_task = cx.update(|cx| {
1506 let thread_store = ThreadStore::global(cx);
1507 let session_id = session_id.to_string();
1508 let title = title.to_string();
1509 let paths = paths.clone();
1510 thread_store.update(cx, |store, cx| {
1511 store.save_thread(
1512 acp::SessionId::new(session_id),
1513 make_db_thread(&title, *updated_at),
1514 paths,
1515 cx,
1516 )
1517 })
1518 });
1519 save_task.await.unwrap();
1520 cx.run_until_parked();
1521 }
1522
1523 cx.update(|cx| migrate_thread_metadata(cx));
1524 cx.run_until_parked();
1525
1526 let list = cx.update(|cx| {
1527 let store = ThreadMetadataStore::global(cx);
1528 store.read(cx).entries().cloned().collect::<Vec<_>>()
1529 });
1530
1531 assert_eq!(list.len(), 10);
1532
1533 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
1534 let mut project_a_entries: Vec<_> = list
1535 .iter()
1536 .filter(|m| m.folder_paths == project_a_paths)
1537 .collect();
1538 assert_eq!(project_a_entries.len(), 7);
1539 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
1540
1541 for entry in &project_a_entries[..5] {
1542 assert!(
1543 !entry.archived,
1544 "Expected {} to be unarchived (top 5 most recent)",
1545 entry.session_id.0
1546 );
1547 }
1548 for entry in &project_a_entries[5..] {
1549 assert!(
1550 entry.archived,
1551 "Expected {} to be archived (older than top 5)",
1552 entry.session_id.0
1553 );
1554 }
1555
1556 // Project B: all 3 should be unarchived (under the limit)
1557 let project_b_entries: Vec<_> = list
1558 .iter()
1559 .filter(|m| m.folder_paths == project_b_paths)
1560 .collect();
1561 assert_eq!(project_b_entries.len(), 3);
1562 assert!(project_b_entries.iter().all(|m| !m.archived));
1563 }
1564
1565 #[gpui::test]
1566 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
1567 init_test(cx);
1568
1569 let fs = FakeFs::new(cx.executor());
1570 let project = Project::test(fs, None::<&Path>, cx).await;
1571 let connection = Rc::new(StubAgentConnection::new());
1572
1573 let thread = cx
1574 .update(|cx| {
1575 connection
1576 .clone()
1577 .new_session(project.clone(), PathList::default(), cx)
1578 })
1579 .await
1580 .unwrap();
1581 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1582
1583 cx.update(|cx| {
1584 thread.update(cx, |thread, cx| {
1585 thread.set_title("Draft Thread".into(), cx).detach();
1586 });
1587 });
1588 cx.run_until_parked();
1589
1590 let metadata_ids = cx.update(|cx| {
1591 ThreadMetadataStore::global(cx)
1592 .read(cx)
1593 .entry_ids()
1594 .collect::<Vec<_>>()
1595 });
1596 assert!(
1597 metadata_ids.is_empty(),
1598 "expected empty draft thread title updates to be ignored"
1599 );
1600
1601 cx.update(|cx| {
1602 thread.update(cx, |thread, cx| {
1603 thread.push_user_content_block(None, "Hello".into(), cx);
1604 });
1605 });
1606 cx.run_until_parked();
1607
1608 let metadata_ids = cx.update(|cx| {
1609 ThreadMetadataStore::global(cx)
1610 .read(cx)
1611 .entry_ids()
1612 .collect::<Vec<_>>()
1613 });
1614 assert_eq!(metadata_ids, vec![session_id]);
1615 }
1616
1617 #[gpui::test]
1618 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
1619 init_test(cx);
1620
1621 let fs = FakeFs::new(cx.executor());
1622 let project = Project::test(fs, None::<&Path>, cx).await;
1623 let connection = Rc::new(StubAgentConnection::new());
1624
1625 let thread = cx
1626 .update(|cx| {
1627 connection
1628 .clone()
1629 .new_session(project.clone(), PathList::default(), cx)
1630 })
1631 .await
1632 .unwrap();
1633 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1634
1635 cx.update(|cx| {
1636 thread.update(cx, |thread, cx| {
1637 thread.push_user_content_block(None, "Hello".into(), cx);
1638 });
1639 });
1640 cx.run_until_parked();
1641
1642 let metadata_ids = cx.update(|cx| {
1643 ThreadMetadataStore::global(cx)
1644 .read(cx)
1645 .entry_ids()
1646 .collect::<Vec<_>>()
1647 });
1648 assert_eq!(metadata_ids, vec![session_id.clone()]);
1649
1650 drop(thread);
1651 cx.update(|_| {});
1652 cx.run_until_parked();
1653
1654 let metadata_ids = cx.update(|cx| {
1655 ThreadMetadataStore::global(cx)
1656 .read(cx)
1657 .entry_ids()
1658 .collect::<Vec<_>>()
1659 });
1660 assert_eq!(metadata_ids, vec![session_id]);
1661 }
1662
1663 #[gpui::test]
1664 async fn test_threads_without_project_association_are_archived_by_default(
1665 cx: &mut TestAppContext,
1666 ) {
1667 init_test(cx);
1668
1669 let fs = FakeFs::new(cx.executor());
1670 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
1671 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
1672 let connection = Rc::new(StubAgentConnection::new());
1673
1674 let thread_without_worktree = cx
1675 .update(|cx| {
1676 connection.clone().new_session(
1677 project_without_worktree.clone(),
1678 PathList::default(),
1679 cx,
1680 )
1681 })
1682 .await
1683 .unwrap();
1684 let session_without_worktree =
1685 cx.read(|cx| thread_without_worktree.read(cx).session_id().clone());
1686
1687 cx.update(|cx| {
1688 thread_without_worktree.update(cx, |thread, cx| {
1689 thread.push_user_content_block(None, "content".into(), cx);
1690 thread.set_title("No Project Thread".into(), cx).detach();
1691 });
1692 });
1693 cx.run_until_parked();
1694
1695 let thread_with_worktree = cx
1696 .update(|cx| {
1697 connection.clone().new_session(
1698 project_with_worktree.clone(),
1699 PathList::default(),
1700 cx,
1701 )
1702 })
1703 .await
1704 .unwrap();
1705 let session_with_worktree =
1706 cx.read(|cx| thread_with_worktree.read(cx).session_id().clone());
1707
1708 cx.update(|cx| {
1709 thread_with_worktree.update(cx, |thread, cx| {
1710 thread.push_user_content_block(None, "content".into(), cx);
1711 thread.set_title("Project Thread".into(), cx).detach();
1712 });
1713 });
1714 cx.run_until_parked();
1715
1716 cx.update(|cx| {
1717 let store = ThreadMetadataStore::global(cx);
1718 let store = store.read(cx);
1719
1720 let without_worktree = store
1721 .entry(&session_without_worktree)
1722 .expect("missing metadata for thread without project association");
1723 assert!(without_worktree.folder_paths.is_empty());
1724 assert!(
1725 without_worktree.archived,
1726 "expected thread without project association to be archived"
1727 );
1728
1729 let with_worktree = store
1730 .entry(&session_with_worktree)
1731 .expect("missing metadata for thread with project association");
1732 assert_eq!(
1733 with_worktree.folder_paths,
1734 PathList::new(&[Path::new("/project-a")])
1735 );
1736 assert!(
1737 !with_worktree.archived,
1738 "expected thread with project association to remain unarchived"
1739 );
1740 });
1741 }
1742
1743 #[gpui::test]
1744 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
1745 init_test(cx);
1746
1747 let fs = FakeFs::new(cx.executor());
1748 let project = Project::test(fs, None::<&Path>, cx).await;
1749 let connection = Rc::new(StubAgentConnection::new());
1750
1751 // Create a regular (non-subagent) AcpThread.
1752 let regular_thread = cx
1753 .update(|cx| {
1754 connection
1755 .clone()
1756 .new_session(project.clone(), PathList::default(), cx)
1757 })
1758 .await
1759 .unwrap();
1760
1761 let regular_session_id = cx.read(|cx| regular_thread.read(cx).session_id().clone());
1762
1763 // Set a title on the regular thread to trigger a save via handle_thread_update.
1764 cx.update(|cx| {
1765 regular_thread.update(cx, |thread, cx| {
1766 thread.push_user_content_block(None, "content".into(), cx);
1767 thread.set_title("Regular Thread".into(), cx).detach();
1768 });
1769 });
1770 cx.run_until_parked();
1771
1772 // Create a subagent AcpThread
1773 let subagent_session_id = acp::SessionId::new("subagent-session");
1774 let subagent_thread = cx.update(|cx| {
1775 let action_log = cx.new(|_| ActionLog::new(project.clone()));
1776 cx.new(|cx| {
1777 acp_thread::AcpThread::new(
1778 Some(regular_session_id.clone()),
1779 Some("Subagent Thread".into()),
1780 None,
1781 connection.clone(),
1782 project.clone(),
1783 action_log,
1784 subagent_session_id.clone(),
1785 watch::Receiver::constant(acp::PromptCapabilities::new()),
1786 cx,
1787 )
1788 })
1789 });
1790
1791 // Set a title on the subagent thread to trigger handle_thread_update.
1792 cx.update(|cx| {
1793 subagent_thread.update(cx, |thread, cx| {
1794 thread
1795 .set_title("Subagent Thread Title".into(), cx)
1796 .detach();
1797 });
1798 });
1799 cx.run_until_parked();
1800
1801 // List all metadata from the store cache.
1802 let list = cx.update(|cx| {
1803 let store = ThreadMetadataStore::global(cx);
1804 store.read(cx).entries().cloned().collect::<Vec<_>>()
1805 });
1806
1807 // The subagent thread should NOT appear in the sidebar metadata.
1808 // Only the regular thread should be listed.
1809 assert_eq!(
1810 list.len(),
1811 1,
1812 "Expected only the regular thread in sidebar metadata, \
1813 but found {} entries (subagent threads are leaking into the sidebar)",
1814 list.len(),
1815 );
1816 assert_eq!(list[0].session_id, regular_session_id);
1817 assert_eq!(list[0].title.as_ref(), "Regular Thread");
1818 }
1819
1820 #[test]
1821 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
1822 let now = Utc::now();
1823
1824 let operations = vec![
1825 DbOperation::Upsert(make_metadata(
1826 "session-1",
1827 "First Thread",
1828 now,
1829 PathList::default(),
1830 )),
1831 DbOperation::Delete(acp::SessionId::new("session-1")),
1832 ];
1833
1834 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
1835
1836 assert_eq!(deduped.len(), 1);
1837 assert_eq!(
1838 deduped[0],
1839 DbOperation::Delete(acp::SessionId::new("session-1"))
1840 );
1841 }
1842
1843 #[test]
1844 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
1845 let now = Utc::now();
1846 let later = now + chrono::Duration::seconds(1);
1847
1848 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
1849 let new_metadata = make_metadata("session-1", "New Title", later, PathList::default());
1850
1851 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1852 DbOperation::Upsert(old_metadata),
1853 DbOperation::Upsert(new_metadata.clone()),
1854 ]);
1855
1856 assert_eq!(deduped.len(), 1);
1857 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
1858 }
1859
1860 #[test]
1861 fn test_dedup_db_operations_preserves_distinct_sessions() {
1862 let now = Utc::now();
1863
1864 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
1865 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
1866 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1867 DbOperation::Upsert(metadata1.clone()),
1868 DbOperation::Upsert(metadata2.clone()),
1869 ]);
1870
1871 assert_eq!(deduped.len(), 2);
1872 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
1873 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
1874 }
1875
1876 #[gpui::test]
1877 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
1878 init_test(cx);
1879
1880 let paths = PathList::new(&[Path::new("/project-a")]);
1881 let now = Utc::now();
1882 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
1883
1884 cx.update(|cx| {
1885 let store = ThreadMetadataStore::global(cx);
1886 store.update(cx, |store, cx| {
1887 store.save(metadata, cx);
1888 });
1889 });
1890
1891 cx.run_until_parked();
1892
1893 cx.update(|cx| {
1894 let store = ThreadMetadataStore::global(cx);
1895 let store = store.read(cx);
1896
1897 let path_entries = store
1898 .entries_for_path(&paths)
1899 .map(|e| e.session_id.0.to_string())
1900 .collect::<Vec<_>>();
1901 assert_eq!(path_entries, vec!["session-1"]);
1902
1903 let archived = store
1904 .archived_entries()
1905 .map(|e| e.session_id.0.to_string())
1906 .collect::<Vec<_>>();
1907 assert!(archived.is_empty());
1908 });
1909
1910 cx.update(|cx| {
1911 let store = ThreadMetadataStore::global(cx);
1912 store.update(cx, |store, cx| {
1913 store.archive(&acp::SessionId::new("session-1"), None, cx);
1914 });
1915 });
1916
1917 // Thread 1 should now be archived
1918 cx.run_until_parked();
1919
1920 cx.update(|cx| {
1921 let store = ThreadMetadataStore::global(cx);
1922 let store = store.read(cx);
1923
1924 let path_entries = store
1925 .entries_for_path(&paths)
1926 .map(|e| e.session_id.0.to_string())
1927 .collect::<Vec<_>>();
1928 assert!(path_entries.is_empty());
1929
1930 let archived = store.archived_entries().collect::<Vec<_>>();
1931 assert_eq!(archived.len(), 1);
1932 assert_eq!(archived[0].session_id.0.as_ref(), "session-1");
1933 assert!(archived[0].archived);
1934 });
1935
1936 cx.update(|cx| {
1937 let store = ThreadMetadataStore::global(cx);
1938 store.update(cx, |store, cx| {
1939 store.unarchive(&acp::SessionId::new("session-1"), cx);
1940 });
1941 });
1942
1943 cx.run_until_parked();
1944
1945 cx.update(|cx| {
1946 let store = ThreadMetadataStore::global(cx);
1947 let store = store.read(cx);
1948
1949 let path_entries = store
1950 .entries_for_path(&paths)
1951 .map(|e| e.session_id.0.to_string())
1952 .collect::<Vec<_>>();
1953 assert_eq!(path_entries, vec!["session-1"]);
1954
1955 let archived = store
1956 .archived_entries()
1957 .map(|e| e.session_id.0.to_string())
1958 .collect::<Vec<_>>();
1959 assert!(archived.is_empty());
1960 });
1961 }
1962
1963 #[gpui::test]
1964 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
1965 init_test(cx);
1966
1967 let paths = PathList::new(&[Path::new("/project-a")]);
1968 let now = Utc::now();
1969
1970 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
1971 let metadata2 = make_metadata(
1972 "session-2",
1973 "Archived Thread",
1974 now - chrono::Duration::seconds(1),
1975 paths.clone(),
1976 );
1977
1978 cx.update(|cx| {
1979 let store = ThreadMetadataStore::global(cx);
1980 store.update(cx, |store, cx| {
1981 store.save(metadata1, cx);
1982 store.save(metadata2, cx);
1983 });
1984 });
1985
1986 cx.run_until_parked();
1987
1988 cx.update(|cx| {
1989 let store = ThreadMetadataStore::global(cx);
1990 store.update(cx, |store, cx| {
1991 store.archive(&acp::SessionId::new("session-2"), None, cx);
1992 });
1993 });
1994
1995 cx.run_until_parked();
1996
1997 cx.update(|cx| {
1998 let store = ThreadMetadataStore::global(cx);
1999 let store = store.read(cx);
2000
2001 let path_entries = store
2002 .entries_for_path(&paths)
2003 .map(|e| e.session_id.0.to_string())
2004 .collect::<Vec<_>>();
2005 assert_eq!(path_entries, vec!["session-1"]);
2006
2007 let all_entries = store
2008 .entries()
2009 .map(|e| e.session_id.0.to_string())
2010 .collect::<Vec<_>>();
2011 assert_eq!(all_entries.len(), 2);
2012 assert!(all_entries.contains(&"session-1".to_string()));
2013 assert!(all_entries.contains(&"session-2".to_string()));
2014
2015 let archived = store
2016 .archived_entries()
2017 .map(|e| e.session_id.0.to_string())
2018 .collect::<Vec<_>>();
2019 assert_eq!(archived, vec!["session-2"]);
2020 });
2021 }
2022
2023 #[gpui::test]
2024 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2025 init_test(cx);
2026
2027 let paths = PathList::new(&[Path::new("/project-a")]);
2028 let now = Utc::now();
2029
2030 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2031 let m2 = make_metadata(
2032 "session-2",
2033 "Thread Two",
2034 now - chrono::Duration::seconds(1),
2035 paths.clone(),
2036 );
2037 let m3 = make_metadata(
2038 "session-3",
2039 "Thread Three",
2040 now - chrono::Duration::seconds(2),
2041 paths,
2042 );
2043
2044 cx.update(|cx| {
2045 let store = ThreadMetadataStore::global(cx);
2046 store.update(cx, |store, cx| {
2047 store.save_all(vec![m1, m2, m3], cx);
2048 });
2049 });
2050
2051 cx.run_until_parked();
2052
2053 cx.update(|cx| {
2054 let store = ThreadMetadataStore::global(cx);
2055 let store = store.read(cx);
2056
2057 let all_entries = store
2058 .entries()
2059 .map(|e| e.session_id.0.to_string())
2060 .collect::<Vec<_>>();
2061 assert_eq!(all_entries.len(), 3);
2062 assert!(all_entries.contains(&"session-1".to_string()));
2063 assert!(all_entries.contains(&"session-2".to_string()));
2064 assert!(all_entries.contains(&"session-3".to_string()));
2065
2066 let entry_ids = store.entry_ids().collect::<Vec<_>>();
2067 assert_eq!(entry_ids.len(), 3);
2068 });
2069 }
2070
2071 #[gpui::test]
2072 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2073 init_test(cx);
2074
2075 let paths = PathList::new(&[Path::new("/project-a")]);
2076 let now = Utc::now();
2077 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2078
2079 cx.update(|cx| {
2080 let store = ThreadMetadataStore::global(cx);
2081 store.update(cx, |store, cx| {
2082 store.save(metadata, cx);
2083 });
2084 });
2085
2086 cx.run_until_parked();
2087
2088 cx.update(|cx| {
2089 let store = ThreadMetadataStore::global(cx);
2090 store.update(cx, |store, cx| {
2091 store.archive(&acp::SessionId::new("session-1"), None, cx);
2092 });
2093 });
2094
2095 cx.run_until_parked();
2096
2097 cx.update(|cx| {
2098 let store = ThreadMetadataStore::global(cx);
2099 store.update(cx, |store, cx| {
2100 let _ = store.reload(cx);
2101 });
2102 });
2103
2104 cx.run_until_parked();
2105
2106 cx.update(|cx| {
2107 let store = ThreadMetadataStore::global(cx);
2108 let store = store.read(cx);
2109
2110 let thread = store
2111 .entries()
2112 .find(|e| e.session_id.0.as_ref() == "session-1")
2113 .expect("thread should exist after reload");
2114 assert!(thread.archived);
2115
2116 let path_entries = store
2117 .entries_for_path(&paths)
2118 .map(|e| e.session_id.0.to_string())
2119 .collect::<Vec<_>>();
2120 assert!(path_entries.is_empty());
2121
2122 let archived = store
2123 .archived_entries()
2124 .map(|e| e.session_id.0.to_string())
2125 .collect::<Vec<_>>();
2126 assert_eq!(archived, vec!["session-1"]);
2127 });
2128 }
2129
2130 #[gpui::test]
2131 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2132 init_test(cx);
2133
2134 cx.run_until_parked();
2135
2136 cx.update(|cx| {
2137 let store = ThreadMetadataStore::global(cx);
2138 store.update(cx, |store, cx| {
2139 store.archive(&acp::SessionId::new("nonexistent"), None, cx);
2140 });
2141 });
2142
2143 cx.run_until_parked();
2144
2145 cx.update(|cx| {
2146 let store = ThreadMetadataStore::global(cx);
2147 let store = store.read(cx);
2148
2149 assert!(store.is_empty());
2150 assert_eq!(store.entries().count(), 0);
2151 assert_eq!(store.archived_entries().count(), 0);
2152 });
2153 }
2154
2155 #[gpui::test]
2156 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2157 init_test(cx);
2158
2159 let paths = PathList::new(&[Path::new("/project-a")]);
2160 let now = Utc::now();
2161 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2162 let session_id = metadata.session_id.clone();
2163
2164 cx.update(|cx| {
2165 let store = ThreadMetadataStore::global(cx);
2166 store.update(cx, |store, cx| {
2167 store.save(metadata.clone(), cx);
2168 store.archive(&session_id, None, cx);
2169 });
2170 });
2171
2172 cx.run_until_parked();
2173
2174 cx.update(|cx| {
2175 let store = ThreadMetadataStore::global(cx);
2176 let store = store.read(cx);
2177
2178 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2179 pretty_assertions::assert_eq!(
2180 entries,
2181 vec![ThreadMetadata {
2182 archived: true,
2183 ..metadata
2184 }]
2185 );
2186 });
2187 }
2188
2189 #[gpui::test]
2190 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2191 init_test(cx);
2192 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2193
2194 let id = store
2195 .read_with(cx, |store, cx| {
2196 store.create_archived_worktree(
2197 "/tmp/worktree".to_string(),
2198 "/home/user/repo".to_string(),
2199 Some("feature-branch".to_string()),
2200 "staged_aaa".to_string(),
2201 "unstaged_bbb".to_string(),
2202 "original_000".to_string(),
2203 cx,
2204 )
2205 })
2206 .await
2207 .unwrap();
2208
2209 store
2210 .read_with(cx, |store, cx| {
2211 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2212 })
2213 .await
2214 .unwrap();
2215
2216 let worktrees = store
2217 .read_with(cx, |store, cx| {
2218 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2219 })
2220 .await
2221 .unwrap();
2222
2223 assert_eq!(worktrees.len(), 1);
2224 let wt = &worktrees[0];
2225 assert_eq!(wt.id, id);
2226 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2227 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2228 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2229 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2230 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2231 assert_eq!(wt.original_commit_hash, "original_000");
2232 }
2233
2234 #[gpui::test]
2235 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2236 init_test(cx);
2237 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2238
2239 let id = store
2240 .read_with(cx, |store, cx| {
2241 store.create_archived_worktree(
2242 "/tmp/worktree".to_string(),
2243 "/home/user/repo".to_string(),
2244 Some("main".to_string()),
2245 "deadbeef".to_string(),
2246 "deadbeef".to_string(),
2247 "original_000".to_string(),
2248 cx,
2249 )
2250 })
2251 .await
2252 .unwrap();
2253
2254 store
2255 .read_with(cx, |store, cx| {
2256 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2257 })
2258 .await
2259 .unwrap();
2260
2261 store
2262 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2263 .await
2264 .unwrap();
2265
2266 let worktrees = store
2267 .read_with(cx, |store, cx| {
2268 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2269 })
2270 .await
2271 .unwrap();
2272 assert!(worktrees.is_empty());
2273 }
2274
2275 #[gpui::test]
2276 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2277 init_test(cx);
2278 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2279
2280 let id = store
2281 .read_with(cx, |store, cx| {
2282 store.create_archived_worktree(
2283 "/tmp/worktree".to_string(),
2284 "/home/user/repo".to_string(),
2285 None,
2286 "abc123".to_string(),
2287 "abc123".to_string(),
2288 "original_000".to_string(),
2289 cx,
2290 )
2291 })
2292 .await
2293 .unwrap();
2294
2295 store
2296 .read_with(cx, |store, cx| {
2297 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2298 })
2299 .await
2300 .unwrap();
2301
2302 store
2303 .read_with(cx, |store, cx| {
2304 store.link_thread_to_archived_worktree("session-2".to_string(), id, cx)
2305 })
2306 .await
2307 .unwrap();
2308
2309 let wt1 = store
2310 .read_with(cx, |store, cx| {
2311 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2312 })
2313 .await
2314 .unwrap();
2315
2316 let wt2 = store
2317 .read_with(cx, |store, cx| {
2318 store.get_archived_worktrees_for_thread("session-2".to_string(), cx)
2319 })
2320 .await
2321 .unwrap();
2322
2323 assert_eq!(wt1.len(), 1);
2324 assert_eq!(wt2.len(), 1);
2325 assert_eq!(wt1[0].id, wt2[0].id);
2326 }
2327
2328 // Verifies that all_session_ids_for_path returns both archived and
2329 // unarchived threads. This is intentional: the method is used during
2330 // archival to find every thread referencing a worktree so they can
2331 // all be linked to the archived worktree record.
2332 #[gpui::test]
2333 async fn test_all_session_ids_for_path(cx: &mut TestAppContext) {
2334 init_test(cx);
2335 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2336 let paths = PathList::new(&[Path::new("/project-x")]);
2337
2338 let meta1 = ThreadMetadata {
2339 session_id: acp::SessionId::new("session-1"),
2340 agent_id: agent::ZED_AGENT_ID.clone(),
2341 title: "Thread 1".into(),
2342 updated_at: Utc::now(),
2343 created_at: Some(Utc::now()),
2344 folder_paths: paths.clone(),
2345 main_worktree_paths: PathList::default(),
2346 archived: false,
2347 };
2348 let meta2 = ThreadMetadata {
2349 session_id: acp::SessionId::new("session-2"),
2350 agent_id: agent::ZED_AGENT_ID.clone(),
2351 title: "Thread 2".into(),
2352 updated_at: Utc::now(),
2353 created_at: Some(Utc::now()),
2354 folder_paths: paths.clone(),
2355 main_worktree_paths: PathList::default(),
2356 archived: true,
2357 };
2358
2359 store.update(cx, |store, _cx| {
2360 store.save_internal(meta1);
2361 store.save_internal(meta2);
2362 });
2363
2364 let ids: HashSet<acp::SessionId> = store.read_with(cx, |store, _cx| {
2365 store.all_session_ids_for_path(&paths).cloned().collect()
2366 });
2367
2368 assert!(ids.contains(&acp::SessionId::new("session-1")));
2369 assert!(ids.contains(&acp::SessionId::new("session-2")));
2370 assert_eq!(ids.len(), 2);
2371 }
2372
2373 #[gpui::test]
2374 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
2375 init_test(cx);
2376 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2377
2378 let original_paths = PathList::new(&[
2379 Path::new("/projects/worktree-a"),
2380 Path::new("/projects/worktree-b"),
2381 Path::new("/other/unrelated"),
2382 ]);
2383 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2384
2385 store.update(cx, |store, cx| {
2386 store.save_manually(meta, cx);
2387 });
2388
2389 let replacements = vec![
2390 (
2391 PathBuf::from("/projects/worktree-a"),
2392 PathBuf::from("/restored/worktree-a"),
2393 ),
2394 (
2395 PathBuf::from("/projects/worktree-b"),
2396 PathBuf::from("/restored/worktree-b"),
2397 ),
2398 ];
2399
2400 store.update(cx, |store, cx| {
2401 store.complete_worktree_restore(
2402 &acp::SessionId::new("session-multi"),
2403 &replacements,
2404 cx,
2405 );
2406 });
2407
2408 let entry = store.read_with(cx, |store, _cx| {
2409 store.entry(&acp::SessionId::new("session-multi")).cloned()
2410 });
2411 let entry = entry.unwrap();
2412 let paths = entry.folder_paths.paths();
2413 assert_eq!(paths.len(), 3);
2414 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2415 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2416 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2417 }
2418
2419 #[gpui::test]
2420 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
2421 init_test(cx);
2422 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2423
2424 let original_paths =
2425 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2426 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2427
2428 store.update(cx, |store, cx| {
2429 store.save_manually(meta, cx);
2430 });
2431
2432 let replacements = vec![
2433 (
2434 PathBuf::from("/projects/worktree-a"),
2435 PathBuf::from("/new/worktree-a"),
2436 ),
2437 (
2438 PathBuf::from("/nonexistent/path"),
2439 PathBuf::from("/should/not/appear"),
2440 ),
2441 ];
2442
2443 store.update(cx, |store, cx| {
2444 store.complete_worktree_restore(
2445 &acp::SessionId::new("session-partial"),
2446 &replacements,
2447 cx,
2448 );
2449 });
2450
2451 let entry = store.read_with(cx, |store, _cx| {
2452 store
2453 .entry(&acp::SessionId::new("session-partial"))
2454 .cloned()
2455 });
2456 let entry = entry.unwrap();
2457 let paths = entry.folder_paths.paths();
2458 assert_eq!(paths.len(), 2);
2459 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2460 assert!(paths.contains(&PathBuf::from("/other/path")));
2461 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2462 }
2463
2464 #[gpui::test]
2465 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
2466 init_test(cx);
2467 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2468
2469 let id1 = store
2470 .read_with(cx, |store, cx| {
2471 store.create_archived_worktree(
2472 "/projects/worktree-a".to_string(),
2473 "/home/user/repo".to_string(),
2474 Some("branch-a".to_string()),
2475 "staged_a".to_string(),
2476 "unstaged_a".to_string(),
2477 "original_000".to_string(),
2478 cx,
2479 )
2480 })
2481 .await
2482 .unwrap();
2483
2484 let id2 = store
2485 .read_with(cx, |store, cx| {
2486 store.create_archived_worktree(
2487 "/projects/worktree-b".to_string(),
2488 "/home/user/repo".to_string(),
2489 Some("branch-b".to_string()),
2490 "staged_b".to_string(),
2491 "unstaged_b".to_string(),
2492 "original_000".to_string(),
2493 cx,
2494 )
2495 })
2496 .await
2497 .unwrap();
2498
2499 store
2500 .read_with(cx, |store, cx| {
2501 store.link_thread_to_archived_worktree("session-1".to_string(), id1, cx)
2502 })
2503 .await
2504 .unwrap();
2505
2506 store
2507 .read_with(cx, |store, cx| {
2508 store.link_thread_to_archived_worktree("session-1".to_string(), id2, cx)
2509 })
2510 .await
2511 .unwrap();
2512
2513 let worktrees = store
2514 .read_with(cx, |store, cx| {
2515 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2516 })
2517 .await
2518 .unwrap();
2519
2520 assert_eq!(worktrees.len(), 2);
2521
2522 let paths: Vec<&Path> = worktrees
2523 .iter()
2524 .map(|w| w.worktree_path.as_path())
2525 .collect();
2526 assert!(paths.contains(&Path::new("/projects/worktree-a")));
2527 assert!(paths.contains(&Path::new("/projects/worktree-b")));
2528 }
2529}