1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use acp_thread::AcpThreadEvent;
7use agent::{ThreadStore, ZED_AGENT_ID};
8use agent_client_protocol as acp;
9use anyhow::Context as _;
10use chrono::{DateTime, Utc};
11use collections::{HashMap, HashSet};
12use db::{
13 sqlez::{
14 bindable::Column, domain::Domain, statement::Statement,
15 thread_safe_connection::ThreadSafeConnection,
16 },
17 sqlez_macros::sql,
18};
19use futures::{FutureExt as _, future::Shared};
20use gpui::{AppContext as _, Entity, Global, Subscription, Task};
21use project::AgentId;
22use ui::{App, Context, SharedString};
23use util::ResultExt as _;
24use workspace::PathList;
25
26use crate::DEFAULT_THREAD_TITLE;
27
28pub fn init(cx: &mut App) {
29 ThreadMetadataStore::init_global(cx);
30 migrate_thread_metadata(cx);
31}
32
33/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
34/// We skip migrating threads that do not have a project.
35///
36/// TODO: Remove this after N weeks of shipping the sidebar
37fn migrate_thread_metadata(cx: &mut App) {
38 let store = ThreadMetadataStore::global(cx);
39 let db = store.read(cx).db.clone();
40
41 cx.spawn(async move |cx| {
42 let existing_entries = db.list_ids()?.into_iter().collect::<HashSet<_>>();
43
44 let is_first_migration = existing_entries.is_empty();
45
46 let mut to_migrate = store.read_with(cx, |_store, cx| {
47 ThreadStore::global(cx)
48 .read(cx)
49 .entries()
50 .filter_map(|entry| {
51 if existing_entries.contains(&entry.id.0) {
52 return None;
53 }
54
55 Some(ThreadMetadata {
56 session_id: entry.id,
57 agent_id: ZED_AGENT_ID.clone(),
58 title: entry.title,
59 updated_at: entry.updated_at,
60 created_at: entry.created_at,
61 folder_paths: entry.folder_paths,
62 main_worktree_paths: PathList::default(),
63 archived: true,
64 })
65 })
66 .collect::<Vec<_>>()
67 });
68
69 if to_migrate.is_empty() {
70 return anyhow::Ok(());
71 }
72
73 // On the first migration (no entries in DB yet), keep the 5 most
74 // recent threads per project unarchived.
75 if is_first_migration {
76 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
77 for entry in &mut to_migrate {
78 if entry.folder_paths.is_empty() {
79 continue;
80 }
81 per_project
82 .entry(entry.folder_paths.clone())
83 .or_default()
84 .push(entry);
85 }
86 for entries in per_project.values_mut() {
87 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
88 for entry in entries.iter_mut().take(5) {
89 entry.archived = false;
90 }
91 }
92 }
93
94 log::info!("Migrating {} thread store entries", to_migrate.len());
95
96 // Manually save each entry to the database and call reload, otherwise
97 // we'll end up triggering lots of reloads after each save
98 for entry in to_migrate {
99 db.save(entry).await?;
100 }
101
102 log::info!("Finished migrating thread store entries");
103
104 let _ = store.update(cx, |store, cx| store.reload(cx));
105 anyhow::Ok(())
106 })
107 .detach_and_log_err(cx);
108}
109
110struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
111impl Global for GlobalThreadMetadataStore {}
112
113/// Lightweight metadata for any thread (native or ACP), enough to populate
114/// the sidebar list and route to the correct load path when clicked.
115#[derive(Debug, Clone, PartialEq)]
116pub struct ThreadMetadata {
117 pub session_id: acp::SessionId,
118 pub agent_id: AgentId,
119 pub title: SharedString,
120 pub updated_at: DateTime<Utc>,
121 pub created_at: Option<DateTime<Utc>>,
122 pub folder_paths: PathList,
123 pub main_worktree_paths: PathList,
124 pub archived: bool,
125}
126
127impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
128 fn from(meta: &ThreadMetadata) -> Self {
129 Self {
130 session_id: meta.session_id.clone(),
131 work_dirs: Some(meta.folder_paths.clone()),
132 title: Some(meta.title.clone()),
133 updated_at: Some(meta.updated_at),
134 created_at: meta.created_at,
135 meta: None,
136 }
137 }
138}
139
140/// Record of a git worktree that was archived (deleted from disk) when its
141/// last thread was archived.
142pub struct ArchivedGitWorktree {
143 /// Auto-incrementing primary key.
144 pub id: i64,
145 /// Absolute path to the directory of the worktree before it was deleted.
146 /// Used when restoring, to put the recreated worktree back where it was.
147 /// If the path already exists on disk, the worktree is assumed to be
148 /// already restored and is used as-is.
149 pub worktree_path: PathBuf,
150 /// Absolute path of the main repository ("main worktree") that owned this worktree.
151 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
152 /// If the main repo isn't found on disk, unarchiving fails because we only store
153 /// commit hashes, and without the actual git repo being available, we can't restore
154 /// the files.
155 pub main_repo_path: PathBuf,
156 /// Branch that was checked out in the worktree at archive time. `None` if
157 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
158 /// could happen if the user made a detached one outside of Zed.
159 /// On restore, we try to switch to this branch. If that fails (e.g. it's
160 /// checked out elsewhere), we auto-generate a new one.
161 pub branch_name: Option<String>,
162 /// SHA of the WIP commit that captures files that were staged (but not yet
163 /// committed) at the time of archiving. This commit can be empty if the
164 /// user had no staged files at the time. It sits directly on top of whatever
165 /// the user's last actual commit was.
166 pub staged_commit_hash: String,
167 /// SHA of the WIP commit that captures files that were unstaged (including
168 /// untracked) at the time of archiving. This commit can be empty if the user
169 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
170 /// After doing `git reset` past both of these commits, we're back in the state
171 /// we had before archiving, including what was staged, what was unstaged, and
172 /// what was committed.
173 pub unstaged_commit_hash: String,
174 /// SHA of the commit that HEAD pointed at before we created the two WIP
175 /// commits during archival. After resetting past the WIP commits during
176 /// restore, HEAD should land back on this commit. It also serves as a
177 /// pre-restore sanity check (abort if this commit no longer exists in the
178 /// repo) and as a fallback target if the WIP resets fail.
179 pub original_commit_hash: String,
180}
181
182/// The store holds all metadata needed to show threads in the sidebar/the archive.
183///
184/// Automatically listens to AcpThread events and updates metadata if it has changed.
185pub struct ThreadMetadataStore {
186 db: ThreadMetadataDb,
187 threads: HashMap<acp::SessionId, ThreadMetadata>,
188 threads_by_paths: HashMap<PathList, HashSet<acp::SessionId>>,
189 threads_by_main_paths: HashMap<PathList, HashSet<acp::SessionId>>,
190 reload_task: Option<Shared<Task<()>>>,
191 session_subscriptions: HashMap<acp::SessionId, Subscription>,
192 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
193 in_flight_archives: HashMap<acp::SessionId, (Task<()>, smol::channel::Sender<()>)>,
194 _db_operations_task: Task<()>,
195}
196
197#[derive(Debug, PartialEq)]
198enum DbOperation {
199 Upsert(ThreadMetadata),
200 Delete(acp::SessionId),
201}
202
203impl DbOperation {
204 fn id(&self) -> &acp::SessionId {
205 match self {
206 DbOperation::Upsert(thread) => &thread.session_id,
207 DbOperation::Delete(session_id) => session_id,
208 }
209 }
210}
211
212impl ThreadMetadataStore {
213 #[cfg(not(any(test, feature = "test-support")))]
214 pub fn init_global(cx: &mut App) {
215 if cx.has_global::<Self>() {
216 return;
217 }
218
219 let db = ThreadMetadataDb::global(cx);
220 let thread_store = cx.new(|cx| Self::new(db, cx));
221 cx.set_global(GlobalThreadMetadataStore(thread_store));
222 }
223
224 #[cfg(any(test, feature = "test-support"))]
225 pub fn init_global(cx: &mut App) {
226 let thread = std::thread::current();
227 let test_name = thread.name().unwrap_or("unknown_test");
228 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
229 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
230 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
231 cx.set_global(GlobalThreadMetadataStore(thread_store));
232 }
233
234 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
235 cx.try_global::<GlobalThreadMetadataStore>()
236 .map(|store| store.0.clone())
237 }
238
239 pub fn global(cx: &App) -> Entity<Self> {
240 cx.global::<GlobalThreadMetadataStore>().0.clone()
241 }
242
243 pub fn is_empty(&self) -> bool {
244 self.threads.is_empty()
245 }
246
247 /// Returns all thread IDs.
248 pub fn entry_ids(&self) -> impl Iterator<Item = acp::SessionId> + '_ {
249 self.threads.keys().cloned()
250 }
251
252 /// Returns the metadata for a specific thread, if it exists.
253 pub fn entry(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
254 self.threads.get(session_id)
255 }
256
257 /// Returns all threads.
258 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
259 self.threads.values()
260 }
261
262 /// Returns all archived threads.
263 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
264 self.entries().filter(|t| t.archived)
265 }
266
267 /// Returns all threads for the given path list, excluding archived threads.
268 pub fn entries_for_path(
269 &self,
270 path_list: &PathList,
271 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
272 self.threads_by_paths
273 .get(path_list)
274 .into_iter()
275 .flatten()
276 .filter_map(|s| self.threads.get(s))
277 .filter(|s| !s.archived)
278 }
279
280 /// Returns threads whose `main_worktree_paths` matches the given path list,
281 /// excluding archived threads. This finds threads that were opened in a
282 /// linked worktree but are associated with the given main worktree.
283 pub fn entries_for_main_worktree_path(
284 &self,
285 path_list: &PathList,
286 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
287 self.threads_by_main_paths
288 .get(path_list)
289 .into_iter()
290 .flatten()
291 .filter_map(|s| self.threads.get(s))
292 .filter(|s| !s.archived)
293 }
294
295 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
296 let db = self.db.clone();
297 self.reload_task.take();
298
299 let list_task = cx
300 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
301
302 let reload_task = cx
303 .spawn(async move |this, cx| {
304 let Some(rows) = list_task.await.log_err() else {
305 return;
306 };
307
308 this.update(cx, |this, cx| {
309 this.threads.clear();
310 this.threads_by_paths.clear();
311 this.threads_by_main_paths.clear();
312
313 for row in rows {
314 this.threads_by_paths
315 .entry(row.folder_paths.clone())
316 .or_default()
317 .insert(row.session_id.clone());
318 if !row.main_worktree_paths.is_empty() {
319 this.threads_by_main_paths
320 .entry(row.main_worktree_paths.clone())
321 .or_default()
322 .insert(row.session_id.clone());
323 }
324 this.threads.insert(row.session_id.clone(), row);
325 }
326
327 cx.notify();
328 })
329 .ok();
330 })
331 .shared();
332 self.reload_task = Some(reload_task.clone());
333 reload_task
334 }
335
336 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
337 for metadata in metadata {
338 self.save_internal(metadata);
339 }
340 cx.notify();
341 }
342
343 #[cfg(any(test, feature = "test-support"))]
344 pub fn save_manually(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
345 self.save(metadata, cx)
346 }
347
348 fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
349 self.save_internal(metadata);
350 cx.notify();
351 }
352
353 fn save_internal(&mut self, metadata: ThreadMetadata) {
354 if let Some(thread) = self.threads.get(&metadata.session_id) {
355 if thread.folder_paths != metadata.folder_paths {
356 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
357 session_ids.remove(&metadata.session_id);
358 }
359 }
360 if thread.main_worktree_paths != metadata.main_worktree_paths
361 && !thread.main_worktree_paths.is_empty()
362 {
363 if let Some(session_ids) = self
364 .threads_by_main_paths
365 .get_mut(&thread.main_worktree_paths)
366 {
367 session_ids.remove(&metadata.session_id);
368 }
369 }
370 }
371
372 self.threads
373 .insert(metadata.session_id.clone(), metadata.clone());
374
375 self.threads_by_paths
376 .entry(metadata.folder_paths.clone())
377 .or_default()
378 .insert(metadata.session_id.clone());
379
380 if !metadata.main_worktree_paths.is_empty() {
381 self.threads_by_main_paths
382 .entry(metadata.main_worktree_paths.clone())
383 .or_default()
384 .insert(metadata.session_id.clone());
385 }
386
387 self.pending_thread_ops_tx
388 .try_send(DbOperation::Upsert(metadata))
389 .log_err();
390 }
391
392 pub fn update_working_directories(
393 &mut self,
394 session_id: &acp::SessionId,
395 work_dirs: PathList,
396 cx: &mut Context<Self>,
397 ) {
398 if let Some(thread) = self.threads.get(session_id) {
399 self.save_internal(ThreadMetadata {
400 folder_paths: work_dirs,
401 ..thread.clone()
402 });
403 cx.notify();
404 }
405 }
406
407 pub fn archive(
408 &mut self,
409 session_id: &acp::SessionId,
410 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
411 cx: &mut Context<Self>,
412 ) {
413 self.update_archived(session_id, true, cx);
414
415 if let Some(job) = archive_job {
416 self.in_flight_archives.insert(session_id.clone(), job);
417 }
418 }
419
420 pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
421 self.update_archived(session_id, false, cx);
422 // Dropping the Sender triggers cancellation in the background task.
423 self.in_flight_archives.remove(session_id);
424 }
425
426 pub fn cleanup_completed_archive(&mut self, session_id: &acp::SessionId) {
427 self.in_flight_archives.remove(session_id);
428 }
429
430 /// Updates a thread's `folder_paths` after an archived worktree has been
431 /// restored to disk. The restored worktree may land at a different path
432 /// than it had before archival, so each `(old_path, new_path)` pair in
433 /// `path_replacements` is applied to the thread's stored folder paths.
434 pub fn update_restored_worktree_paths(
435 &mut self,
436 session_id: &acp::SessionId,
437 path_replacements: &[(PathBuf, PathBuf)],
438 cx: &mut Context<Self>,
439 ) {
440 if let Some(thread) = self.threads.get(session_id).cloned() {
441 let mut paths: Vec<PathBuf> = thread.folder_paths.paths().to_vec();
442 for (old_path, new_path) in path_replacements {
443 if let Some(pos) = paths.iter().position(|p| p == old_path) {
444 paths[pos] = new_path.clone();
445 }
446 }
447 let new_folder_paths = PathList::new(&paths);
448 self.save_internal(ThreadMetadata {
449 folder_paths: new_folder_paths,
450 ..thread
451 });
452 cx.notify();
453 }
454 }
455
456 pub fn complete_worktree_restore(
457 &mut self,
458 session_id: &acp::SessionId,
459 path_replacements: &[(PathBuf, PathBuf)],
460 cx: &mut Context<Self>,
461 ) {
462 if let Some(thread) = self.threads.get(session_id).cloned() {
463 let mut paths: Vec<PathBuf> = thread.folder_paths.paths().to_vec();
464 for (old_path, new_path) in path_replacements {
465 for path in &mut paths {
466 if path == old_path {
467 *path = new_path.clone();
468 }
469 }
470 }
471 let new_folder_paths = PathList::new(&paths);
472 self.save_internal(ThreadMetadata {
473 folder_paths: new_folder_paths,
474 ..thread
475 });
476 cx.notify();
477 }
478 }
479
480 pub fn update_main_worktree_paths(
481 &mut self,
482 old_paths: &PathList,
483 new_paths: PathList,
484 cx: &mut Context<Self>,
485 ) {
486 let session_ids = match self.threads_by_main_paths.remove(old_paths) {
487 Some(ids) if !ids.is_empty() => ids,
488 _ => return,
489 };
490
491 let new_index = self
492 .threads_by_main_paths
493 .entry(new_paths.clone())
494 .or_default();
495
496 for session_id in &session_ids {
497 new_index.insert(session_id.clone());
498
499 if let Some(thread) = self.threads.get_mut(session_id) {
500 thread.main_worktree_paths = new_paths.clone();
501 self.pending_thread_ops_tx
502 .try_send(DbOperation::Upsert(thread.clone()))
503 .log_err();
504 }
505 }
506
507 cx.notify();
508 }
509
510 pub fn create_archived_worktree(
511 &self,
512 worktree_path: String,
513 main_repo_path: String,
514 branch_name: Option<String>,
515 staged_commit_hash: String,
516 unstaged_commit_hash: String,
517 original_commit_hash: String,
518 cx: &App,
519 ) -> Task<anyhow::Result<i64>> {
520 let db = self.db.clone();
521 cx.background_spawn(async move {
522 db.create_archived_worktree(
523 worktree_path,
524 main_repo_path,
525 branch_name,
526 staged_commit_hash,
527 unstaged_commit_hash,
528 original_commit_hash,
529 )
530 .await
531 })
532 }
533
534 pub fn link_thread_to_archived_worktree(
535 &self,
536 session_id: String,
537 archived_worktree_id: i64,
538 cx: &App,
539 ) -> Task<anyhow::Result<()>> {
540 let db = self.db.clone();
541 cx.background_spawn(async move {
542 db.link_thread_to_archived_worktree(session_id, archived_worktree_id)
543 .await
544 })
545 }
546
547 pub fn get_archived_worktrees_for_thread(
548 &self,
549 session_id: String,
550 cx: &App,
551 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
552 let db = self.db.clone();
553 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(session_id).await })
554 }
555
556 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
557 let db = self.db.clone();
558 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
559 }
560
561 pub fn unlink_thread_from_all_archived_worktrees(
562 &self,
563 session_id: String,
564 cx: &App,
565 ) -> Task<anyhow::Result<()>> {
566 let db = self.db.clone();
567 cx.background_spawn(async move {
568 db.unlink_thread_from_all_archived_worktrees(session_id)
569 .await
570 })
571 }
572
573 pub fn is_archived_worktree_referenced(
574 &self,
575 archived_worktree_id: i64,
576 cx: &App,
577 ) -> Task<anyhow::Result<bool>> {
578 let db = self.db.clone();
579 cx.background_spawn(async move {
580 db.is_archived_worktree_referenced(archived_worktree_id)
581 .await
582 })
583 }
584
585 fn update_archived(
586 &mut self,
587 session_id: &acp::SessionId,
588 archived: bool,
589 cx: &mut Context<Self>,
590 ) {
591 if let Some(thread) = self.threads.get(session_id) {
592 self.save_internal(ThreadMetadata {
593 archived,
594 ..thread.clone()
595 });
596 cx.notify();
597 }
598 }
599
600 pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context<Self>) {
601 if let Some(thread) = self.threads.get(&session_id) {
602 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
603 session_ids.remove(&session_id);
604 }
605 if !thread.main_worktree_paths.is_empty() {
606 if let Some(session_ids) = self
607 .threads_by_main_paths
608 .get_mut(&thread.main_worktree_paths)
609 {
610 session_ids.remove(&session_id);
611 }
612 }
613 }
614 self.threads.remove(&session_id);
615 self.pending_thread_ops_tx
616 .try_send(DbOperation::Delete(session_id))
617 .log_err();
618 cx.notify();
619 }
620
621 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
622 let weak_store = cx.weak_entity();
623
624 cx.observe_new::<acp_thread::AcpThread>(move |thread, _window, cx| {
625 // Don't track subagent threads in the sidebar.
626 if thread.parent_session_id().is_some() {
627 return;
628 }
629
630 let thread_entity = cx.entity();
631
632 cx.on_release({
633 let weak_store = weak_store.clone();
634 move |thread, cx| {
635 weak_store
636 .update(cx, |store, _cx| {
637 let session_id = thread.session_id().clone();
638 store.session_subscriptions.remove(&session_id);
639 })
640 .ok();
641 }
642 })
643 .detach();
644
645 weak_store
646 .update(cx, |this, cx| {
647 let subscription = cx.subscribe(&thread_entity, Self::handle_thread_event);
648 this.session_subscriptions
649 .insert(thread.session_id().clone(), subscription);
650 })
651 .ok();
652 })
653 .detach();
654
655 let (tx, rx) = smol::channel::unbounded();
656 let _db_operations_task = cx.background_spawn({
657 let db = db.clone();
658 async move {
659 while let Ok(first_update) = rx.recv().await {
660 let mut updates = vec![first_update];
661 while let Ok(update) = rx.try_recv() {
662 updates.push(update);
663 }
664 let updates = Self::dedup_db_operations(updates);
665 for operation in updates {
666 match operation {
667 DbOperation::Upsert(metadata) => {
668 db.save(metadata).await.log_err();
669 }
670 DbOperation::Delete(session_id) => {
671 db.delete(session_id).await.log_err();
672 }
673 }
674 }
675 }
676 }
677 });
678
679 let mut this = Self {
680 db,
681 threads: HashMap::default(),
682 threads_by_paths: HashMap::default(),
683 threads_by_main_paths: HashMap::default(),
684 reload_task: None,
685 session_subscriptions: HashMap::default(),
686 pending_thread_ops_tx: tx,
687 in_flight_archives: HashMap::default(),
688 _db_operations_task,
689 };
690 let _ = this.reload(cx);
691 this
692 }
693
694 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
695 let mut ops = HashMap::default();
696 for operation in operations.into_iter().rev() {
697 if ops.contains_key(operation.id()) {
698 continue;
699 }
700 ops.insert(operation.id().clone(), operation);
701 }
702 ops.into_values().collect()
703 }
704
705 fn handle_thread_event(
706 &mut self,
707 thread: Entity<acp_thread::AcpThread>,
708 event: &AcpThreadEvent,
709 cx: &mut Context<Self>,
710 ) {
711 // Don't track subagent threads in the sidebar.
712 if thread.read(cx).parent_session_id().is_some() {
713 return;
714 }
715
716 match event {
717 AcpThreadEvent::NewEntry
718 | AcpThreadEvent::TitleUpdated
719 | AcpThreadEvent::EntryUpdated(_)
720 | AcpThreadEvent::EntriesRemoved(_)
721 | AcpThreadEvent::ToolAuthorizationRequested(_)
722 | AcpThreadEvent::ToolAuthorizationReceived(_)
723 | AcpThreadEvent::Retry(_)
724 | AcpThreadEvent::Stopped(_)
725 | AcpThreadEvent::Error
726 | AcpThreadEvent::LoadError(_)
727 | AcpThreadEvent::Refusal
728 | AcpThreadEvent::WorkingDirectoriesUpdated => {
729 let thread_ref = thread.read(cx);
730 if thread_ref.entries().is_empty() {
731 return;
732 }
733
734 let existing_thread = self.threads.get(thread_ref.session_id());
735 let session_id = thread_ref.session_id().clone();
736 let title = thread_ref
737 .title()
738 .unwrap_or_else(|| DEFAULT_THREAD_TITLE.into());
739
740 let updated_at = Utc::now();
741
742 let created_at = existing_thread
743 .and_then(|t| t.created_at)
744 .unwrap_or_else(|| updated_at);
745
746 let agent_id = thread_ref.connection().agent_id();
747
748 let folder_paths = {
749 let project = thread_ref.project().read(cx);
750 let paths: Vec<Arc<Path>> = project
751 .visible_worktrees(cx)
752 .map(|worktree| worktree.read(cx).abs_path())
753 .collect();
754 PathList::new(&paths)
755 };
756
757 let main_worktree_paths = thread_ref
758 .project()
759 .read(cx)
760 .project_group_key(cx)
761 .path_list()
762 .clone();
763
764 // Threads without a folder path (e.g. started in an empty
765 // window) are archived by default so they don't get lost,
766 // because they won't show up in the sidebar. Users can reload
767 // them from the archive.
768 let archived = existing_thread
769 .map(|t| t.archived)
770 .unwrap_or(folder_paths.is_empty());
771
772 let metadata = ThreadMetadata {
773 session_id,
774 agent_id,
775 title,
776 created_at: Some(created_at),
777 updated_at,
778 folder_paths,
779 main_worktree_paths,
780 archived,
781 };
782
783 self.save(metadata, cx);
784 }
785 AcpThreadEvent::TokenUsageUpdated
786 | AcpThreadEvent::SubagentSpawned(_)
787 | AcpThreadEvent::PromptCapabilitiesUpdated
788 | AcpThreadEvent::AvailableCommandsUpdated(_)
789 | AcpThreadEvent::ModeUpdated(_)
790 | AcpThreadEvent::ConfigOptionsUpdated(_) => {}
791 }
792 }
793}
794
795impl Global for ThreadMetadataStore {}
796
797struct ThreadMetadataDb(ThreadSafeConnection);
798
799impl Domain for ThreadMetadataDb {
800 const NAME: &str = stringify!(ThreadMetadataDb);
801
802 const MIGRATIONS: &[&str] = &[
803 sql!(
804 CREATE TABLE IF NOT EXISTS sidebar_threads(
805 session_id TEXT PRIMARY KEY,
806 agent_id TEXT,
807 title TEXT NOT NULL,
808 updated_at TEXT NOT NULL,
809 created_at TEXT,
810 folder_paths TEXT,
811 folder_paths_order TEXT
812 ) STRICT;
813 ),
814 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
815 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
816 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
817 sql!(
818 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
819 id INTEGER PRIMARY KEY,
820 worktree_path TEXT NOT NULL,
821 main_repo_path TEXT NOT NULL,
822 branch_name TEXT,
823 staged_commit_hash TEXT,
824 unstaged_commit_hash TEXT,
825 original_commit_hash TEXT
826 ) STRICT;
827
828 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
829 session_id TEXT NOT NULL,
830 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
831 PRIMARY KEY (session_id, archived_worktree_id)
832 ) STRICT;
833 ),
834 ];
835}
836
837db::static_connection!(ThreadMetadataDb, []);
838
839impl ThreadMetadataDb {
840 pub fn list_ids(&self) -> anyhow::Result<Vec<Arc<str>>> {
841 self.select::<Arc<str>>(
842 "SELECT session_id FROM sidebar_threads \
843 ORDER BY updated_at DESC",
844 )?()
845 }
846
847 /// List all sidebar thread metadata, ordered by updated_at descending.
848 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
849 self.select::<ThreadMetadata>(
850 "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order \
851 FROM sidebar_threads \
852 ORDER BY updated_at DESC"
853 )?()
854 }
855
856 /// Upsert metadata for a thread.
857 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
858 let id = row.session_id.0.clone();
859 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
860 None
861 } else {
862 Some(row.agent_id.to_string())
863 };
864 let title = row.title.to_string();
865 let updated_at = row.updated_at.to_rfc3339();
866 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
867 let serialized = row.folder_paths.serialize();
868 let (folder_paths, folder_paths_order) = if row.folder_paths.is_empty() {
869 (None, None)
870 } else {
871 (Some(serialized.paths), Some(serialized.order))
872 };
873 let main_serialized = row.main_worktree_paths.serialize();
874 let (main_worktree_paths, main_worktree_paths_order) = if row.main_worktree_paths.is_empty()
875 {
876 (None, None)
877 } else {
878 (Some(main_serialized.paths), Some(main_serialized.order))
879 };
880 let archived = row.archived;
881
882 self.write(move |conn| {
883 let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order) \
884 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) \
885 ON CONFLICT(session_id) DO UPDATE SET \
886 agent_id = excluded.agent_id, \
887 title = excluded.title, \
888 updated_at = excluded.updated_at, \
889 created_at = excluded.created_at, \
890 folder_paths = excluded.folder_paths, \
891 folder_paths_order = excluded.folder_paths_order, \
892 archived = excluded.archived, \
893 main_worktree_paths = excluded.main_worktree_paths, \
894 main_worktree_paths_order = excluded.main_worktree_paths_order";
895 let mut stmt = Statement::prepare(conn, sql)?;
896 let mut i = stmt.bind(&id, 1)?;
897 i = stmt.bind(&agent_id, i)?;
898 i = stmt.bind(&title, i)?;
899 i = stmt.bind(&updated_at, i)?;
900 i = stmt.bind(&created_at, i)?;
901 i = stmt.bind(&folder_paths, i)?;
902 i = stmt.bind(&folder_paths_order, i)?;
903 i = stmt.bind(&archived, i)?;
904 i = stmt.bind(&main_worktree_paths, i)?;
905 stmt.bind(&main_worktree_paths_order, i)?;
906 stmt.exec()
907 })
908 .await
909 }
910
911 /// Delete metadata for a single thread.
912 pub async fn delete(&self, session_id: acp::SessionId) -> anyhow::Result<()> {
913 let id = session_id.0.clone();
914 self.write(move |conn| {
915 let mut stmt =
916 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE session_id = ?")?;
917 stmt.bind(&id, 1)?;
918 stmt.exec()
919 })
920 .await
921 }
922
923 pub async fn create_archived_worktree(
924 &self,
925 worktree_path: String,
926 main_repo_path: String,
927 branch_name: Option<String>,
928 staged_commit_hash: String,
929 unstaged_commit_hash: String,
930 original_commit_hash: String,
931 ) -> anyhow::Result<i64> {
932 self.write(move |conn| {
933 let mut stmt = Statement::prepare(
934 conn,
935 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
936 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
937 RETURNING id",
938 )?;
939 let mut i = stmt.bind(&worktree_path, 1)?;
940 i = stmt.bind(&main_repo_path, i)?;
941 i = stmt.bind(&branch_name, i)?;
942 i = stmt.bind(&staged_commit_hash, i)?;
943 i = stmt.bind(&unstaged_commit_hash, i)?;
944 stmt.bind(&original_commit_hash, i)?;
945 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
946 })
947 .await
948 }
949
950 pub async fn link_thread_to_archived_worktree(
951 &self,
952 session_id: String,
953 archived_worktree_id: i64,
954 ) -> anyhow::Result<()> {
955 self.write(move |conn| {
956 let mut stmt = Statement::prepare(
957 conn,
958 "INSERT INTO thread_archived_worktrees(session_id, archived_worktree_id) \
959 VALUES (?1, ?2)",
960 )?;
961 let i = stmt.bind(&session_id, 1)?;
962 stmt.bind(&archived_worktree_id, i)?;
963 stmt.exec()
964 })
965 .await
966 }
967
968 pub async fn get_archived_worktrees_for_thread(
969 &self,
970 session_id: String,
971 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
972 self.select_bound::<String, ArchivedGitWorktree>(
973 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
974 FROM archived_git_worktrees a \
975 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
976 WHERE t.session_id = ?1",
977 )?(session_id)
978 }
979
980 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
981 self.write(move |conn| {
982 let mut stmt = Statement::prepare(
983 conn,
984 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
985 )?;
986 stmt.bind(&id, 1)?;
987 stmt.exec()?;
988
989 let mut stmt =
990 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
991 stmt.bind(&id, 1)?;
992 stmt.exec()
993 })
994 .await
995 }
996
997 pub async fn unlink_thread_from_all_archived_worktrees(
998 &self,
999 session_id: String,
1000 ) -> anyhow::Result<()> {
1001 self.write(move |conn| {
1002 let mut stmt = Statement::prepare(
1003 conn,
1004 "DELETE FROM thread_archived_worktrees WHERE session_id = ?",
1005 )?;
1006 stmt.bind(&session_id, 1)?;
1007 stmt.exec()
1008 })
1009 .await
1010 }
1011
1012 pub async fn is_archived_worktree_referenced(
1013 &self,
1014 archived_worktree_id: i64,
1015 ) -> anyhow::Result<bool> {
1016 self.select_row_bound::<i64, i64>(
1017 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
1018 )?(archived_worktree_id)
1019 .map(|count| count.unwrap_or(0) > 0)
1020 }
1021}
1022
1023impl Column for ThreadMetadata {
1024 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1025 let (id, next): (Arc<str>, i32) = Column::column(statement, start_index)?;
1026 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
1027 let (title, next): (String, i32) = Column::column(statement, next)?;
1028 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
1029 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1030 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
1031 let (folder_paths_order_str, next): (Option<String>, i32) =
1032 Column::column(statement, next)?;
1033 let (archived, next): (bool, i32) = Column::column(statement, next)?;
1034 let (main_worktree_paths_str, next): (Option<String>, i32) =
1035 Column::column(statement, next)?;
1036 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
1037 Column::column(statement, next)?;
1038
1039 let agent_id = agent_id
1040 .map(|id| AgentId::new(id))
1041 .unwrap_or(ZED_AGENT_ID.clone());
1042
1043 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
1044 let created_at = created_at_str
1045 .as_deref()
1046 .map(DateTime::parse_from_rfc3339)
1047 .transpose()?
1048 .map(|dt| dt.with_timezone(&Utc));
1049
1050 let folder_paths = folder_paths_str
1051 .map(|paths| {
1052 PathList::deserialize(&util::path_list::SerializedPathList {
1053 paths,
1054 order: folder_paths_order_str.unwrap_or_default(),
1055 })
1056 })
1057 .unwrap_or_default();
1058
1059 let main_worktree_paths = main_worktree_paths_str
1060 .map(|paths| {
1061 PathList::deserialize(&util::path_list::SerializedPathList {
1062 paths,
1063 order: main_worktree_paths_order_str.unwrap_or_default(),
1064 })
1065 })
1066 .unwrap_or_default();
1067
1068 Ok((
1069 ThreadMetadata {
1070 session_id: acp::SessionId::new(id),
1071 agent_id,
1072 title: title.into(),
1073 updated_at,
1074 created_at,
1075 folder_paths,
1076 main_worktree_paths,
1077 archived,
1078 },
1079 next,
1080 ))
1081 }
1082}
1083
1084impl Column for ArchivedGitWorktree {
1085 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1086 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1087 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1088 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1089 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1090 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1091 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1092 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1093
1094 Ok((
1095 ArchivedGitWorktree {
1096 id,
1097 worktree_path: PathBuf::from(worktree_path_str),
1098 main_repo_path: PathBuf::from(main_repo_path_str),
1099 branch_name,
1100 staged_commit_hash,
1101 unstaged_commit_hash,
1102 original_commit_hash,
1103 },
1104 next,
1105 ))
1106 }
1107}
1108
1109#[cfg(test)]
1110mod tests {
1111 use super::*;
1112 use acp_thread::{AgentConnection, StubAgentConnection};
1113 use action_log::ActionLog;
1114 use agent::DbThread;
1115 use agent_client_protocol as acp;
1116
1117 use gpui::TestAppContext;
1118 use project::FakeFs;
1119 use project::Project;
1120 use std::path::Path;
1121 use std::rc::Rc;
1122
1123 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1124 DbThread {
1125 title: title.to_string().into(),
1126 messages: Vec::new(),
1127 updated_at,
1128 detailed_summary: None,
1129 initial_project_snapshot: None,
1130 cumulative_token_usage: Default::default(),
1131 request_token_usage: Default::default(),
1132 model: None,
1133 profile: None,
1134 imported: false,
1135 subagent_context: None,
1136 speed: None,
1137 thinking_enabled: false,
1138 thinking_effort: None,
1139 draft_prompt: None,
1140 ui_scroll_position: None,
1141 }
1142 }
1143
1144 fn make_metadata(
1145 session_id: &str,
1146 title: &str,
1147 updated_at: DateTime<Utc>,
1148 folder_paths: PathList,
1149 ) -> ThreadMetadata {
1150 ThreadMetadata {
1151 archived: false,
1152 session_id: acp::SessionId::new(session_id),
1153 agent_id: agent::ZED_AGENT_ID.clone(),
1154 title: title.to_string().into(),
1155 updated_at,
1156 created_at: Some(updated_at),
1157 folder_paths,
1158 main_worktree_paths: PathList::default(),
1159 }
1160 }
1161
1162 fn init_test(cx: &mut TestAppContext) {
1163 cx.update(|cx| {
1164 let settings_store = settings::SettingsStore::test(cx);
1165 cx.set_global(settings_store);
1166 ThreadMetadataStore::init_global(cx);
1167 ThreadStore::init_global(cx);
1168 });
1169 cx.run_until_parked();
1170 }
1171
1172 #[gpui::test]
1173 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1174 let first_paths = PathList::new(&[Path::new("/project-a")]);
1175 let second_paths = PathList::new(&[Path::new("/project-b")]);
1176 let now = Utc::now();
1177 let older = now - chrono::Duration::seconds(1);
1178
1179 let thread = std::thread::current();
1180 let test_name = thread.name().unwrap_or("unknown_test");
1181 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1182 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1183 &db_name,
1184 )));
1185
1186 db.save(make_metadata(
1187 "session-1",
1188 "First Thread",
1189 now,
1190 first_paths.clone(),
1191 ))
1192 .await
1193 .unwrap();
1194 db.save(make_metadata(
1195 "session-2",
1196 "Second Thread",
1197 older,
1198 second_paths.clone(),
1199 ))
1200 .await
1201 .unwrap();
1202
1203 cx.update(|cx| {
1204 let settings_store = settings::SettingsStore::test(cx);
1205 cx.set_global(settings_store);
1206 ThreadMetadataStore::init_global(cx);
1207 });
1208
1209 cx.run_until_parked();
1210
1211 cx.update(|cx| {
1212 let store = ThreadMetadataStore::global(cx);
1213 let store = store.read(cx);
1214
1215 let entry_ids = store
1216 .entry_ids()
1217 .map(|session_id| session_id.0.to_string())
1218 .collect::<Vec<_>>();
1219 assert_eq!(entry_ids.len(), 2);
1220 assert!(entry_ids.contains(&"session-1".to_string()));
1221 assert!(entry_ids.contains(&"session-2".to_string()));
1222
1223 let first_path_entries = store
1224 .entries_for_path(&first_paths)
1225 .map(|entry| entry.session_id.0.to_string())
1226 .collect::<Vec<_>>();
1227 assert_eq!(first_path_entries, vec!["session-1"]);
1228
1229 let second_path_entries = store
1230 .entries_for_path(&second_paths)
1231 .map(|entry| entry.session_id.0.to_string())
1232 .collect::<Vec<_>>();
1233 assert_eq!(second_path_entries, vec!["session-2"]);
1234 });
1235 }
1236
1237 #[gpui::test]
1238 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1239 init_test(cx);
1240
1241 let first_paths = PathList::new(&[Path::new("/project-a")]);
1242 let second_paths = PathList::new(&[Path::new("/project-b")]);
1243 let initial_time = Utc::now();
1244 let updated_time = initial_time + chrono::Duration::seconds(1);
1245
1246 let initial_metadata = make_metadata(
1247 "session-1",
1248 "First Thread",
1249 initial_time,
1250 first_paths.clone(),
1251 );
1252
1253 let second_metadata = make_metadata(
1254 "session-2",
1255 "Second Thread",
1256 initial_time,
1257 second_paths.clone(),
1258 );
1259
1260 cx.update(|cx| {
1261 let store = ThreadMetadataStore::global(cx);
1262 store.update(cx, |store, cx| {
1263 store.save(initial_metadata, cx);
1264 store.save(second_metadata, cx);
1265 });
1266 });
1267
1268 cx.run_until_parked();
1269
1270 cx.update(|cx| {
1271 let store = ThreadMetadataStore::global(cx);
1272 let store = store.read(cx);
1273
1274 let first_path_entries = store
1275 .entries_for_path(&first_paths)
1276 .map(|entry| entry.session_id.0.to_string())
1277 .collect::<Vec<_>>();
1278 assert_eq!(first_path_entries, vec!["session-1"]);
1279
1280 let second_path_entries = store
1281 .entries_for_path(&second_paths)
1282 .map(|entry| entry.session_id.0.to_string())
1283 .collect::<Vec<_>>();
1284 assert_eq!(second_path_entries, vec!["session-2"]);
1285 });
1286
1287 let moved_metadata = make_metadata(
1288 "session-1",
1289 "First Thread",
1290 updated_time,
1291 second_paths.clone(),
1292 );
1293
1294 cx.update(|cx| {
1295 let store = ThreadMetadataStore::global(cx);
1296 store.update(cx, |store, cx| {
1297 store.save(moved_metadata, cx);
1298 });
1299 });
1300
1301 cx.run_until_parked();
1302
1303 cx.update(|cx| {
1304 let store = ThreadMetadataStore::global(cx);
1305 let store = store.read(cx);
1306
1307 let entry_ids = store
1308 .entry_ids()
1309 .map(|session_id| session_id.0.to_string())
1310 .collect::<Vec<_>>();
1311 assert_eq!(entry_ids.len(), 2);
1312 assert!(entry_ids.contains(&"session-1".to_string()));
1313 assert!(entry_ids.contains(&"session-2".to_string()));
1314
1315 let first_path_entries = store
1316 .entries_for_path(&first_paths)
1317 .map(|entry| entry.session_id.0.to_string())
1318 .collect::<Vec<_>>();
1319 assert!(first_path_entries.is_empty());
1320
1321 let second_path_entries = store
1322 .entries_for_path(&second_paths)
1323 .map(|entry| entry.session_id.0.to_string())
1324 .collect::<Vec<_>>();
1325 assert_eq!(second_path_entries.len(), 2);
1326 assert!(second_path_entries.contains(&"session-1".to_string()));
1327 assert!(second_path_entries.contains(&"session-2".to_string()));
1328 });
1329
1330 cx.update(|cx| {
1331 let store = ThreadMetadataStore::global(cx);
1332 store.update(cx, |store, cx| {
1333 store.delete(acp::SessionId::new("session-2"), cx);
1334 });
1335 });
1336
1337 cx.run_until_parked();
1338
1339 cx.update(|cx| {
1340 let store = ThreadMetadataStore::global(cx);
1341 let store = store.read(cx);
1342
1343 let entry_ids = store
1344 .entry_ids()
1345 .map(|session_id| session_id.0.to_string())
1346 .collect::<Vec<_>>();
1347 assert_eq!(entry_ids, vec!["session-1"]);
1348
1349 let second_path_entries = store
1350 .entries_for_path(&second_paths)
1351 .map(|entry| entry.session_id.0.to_string())
1352 .collect::<Vec<_>>();
1353 assert_eq!(second_path_entries, vec!["session-1"]);
1354 });
1355 }
1356
1357 #[gpui::test]
1358 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1359 init_test(cx);
1360
1361 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1362 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1363 let now = Utc::now();
1364
1365 let existing_metadata = ThreadMetadata {
1366 session_id: acp::SessionId::new("a-session-0"),
1367 agent_id: agent::ZED_AGENT_ID.clone(),
1368 title: "Existing Metadata".into(),
1369 updated_at: now - chrono::Duration::seconds(10),
1370 created_at: Some(now - chrono::Duration::seconds(10)),
1371 folder_paths: project_a_paths.clone(),
1372 main_worktree_paths: PathList::default(),
1373 archived: false,
1374 };
1375
1376 cx.update(|cx| {
1377 let store = ThreadMetadataStore::global(cx);
1378 store.update(cx, |store, cx| {
1379 store.save(existing_metadata, cx);
1380 });
1381 });
1382 cx.run_until_parked();
1383
1384 let threads_to_save = vec![
1385 (
1386 "a-session-0",
1387 "Thread A0 From Native Store",
1388 project_a_paths.clone(),
1389 now,
1390 ),
1391 (
1392 "a-session-1",
1393 "Thread A1",
1394 project_a_paths.clone(),
1395 now + chrono::Duration::seconds(1),
1396 ),
1397 (
1398 "b-session-0",
1399 "Thread B0",
1400 project_b_paths.clone(),
1401 now + chrono::Duration::seconds(2),
1402 ),
1403 (
1404 "projectless",
1405 "Projectless",
1406 PathList::default(),
1407 now + chrono::Duration::seconds(3),
1408 ),
1409 ];
1410
1411 for (session_id, title, paths, updated_at) in &threads_to_save {
1412 let save_task = cx.update(|cx| {
1413 let thread_store = ThreadStore::global(cx);
1414 let session_id = session_id.to_string();
1415 let title = title.to_string();
1416 let paths = paths.clone();
1417 thread_store.update(cx, |store, cx| {
1418 store.save_thread(
1419 acp::SessionId::new(session_id),
1420 make_db_thread(&title, *updated_at),
1421 paths,
1422 cx,
1423 )
1424 })
1425 });
1426 save_task.await.unwrap();
1427 cx.run_until_parked();
1428 }
1429
1430 cx.update(|cx| migrate_thread_metadata(cx));
1431 cx.run_until_parked();
1432
1433 let list = cx.update(|cx| {
1434 let store = ThreadMetadataStore::global(cx);
1435 store.read(cx).entries().cloned().collect::<Vec<_>>()
1436 });
1437
1438 assert_eq!(list.len(), 4);
1439 assert!(
1440 list.iter()
1441 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1442 );
1443
1444 let existing_metadata = list
1445 .iter()
1446 .find(|metadata| metadata.session_id.0.as_ref() == "a-session-0")
1447 .unwrap();
1448 assert_eq!(existing_metadata.title.as_ref(), "Existing Metadata");
1449 assert!(!existing_metadata.archived);
1450
1451 let migrated_session_ids = list
1452 .iter()
1453 .map(|metadata| metadata.session_id.0.as_ref())
1454 .collect::<Vec<_>>();
1455 assert!(migrated_session_ids.contains(&"a-session-1"));
1456 assert!(migrated_session_ids.contains(&"b-session-0"));
1457 assert!(migrated_session_ids.contains(&"projectless"));
1458
1459 let migrated_entries = list
1460 .iter()
1461 .filter(|metadata| metadata.session_id.0.as_ref() != "a-session-0")
1462 .collect::<Vec<_>>();
1463 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1464 }
1465
1466 #[gpui::test]
1467 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1468 cx: &mut TestAppContext,
1469 ) {
1470 init_test(cx);
1471
1472 let project_paths = PathList::new(&[Path::new("/project-a")]);
1473 let existing_updated_at = Utc::now();
1474
1475 let existing_metadata = ThreadMetadata {
1476 session_id: acp::SessionId::new("existing-session"),
1477 agent_id: agent::ZED_AGENT_ID.clone(),
1478 title: "Existing Metadata".into(),
1479 updated_at: existing_updated_at,
1480 created_at: Some(existing_updated_at),
1481 folder_paths: project_paths.clone(),
1482 main_worktree_paths: PathList::default(),
1483 archived: false,
1484 };
1485
1486 cx.update(|cx| {
1487 let store = ThreadMetadataStore::global(cx);
1488 store.update(cx, |store, cx| {
1489 store.save(existing_metadata, cx);
1490 });
1491 });
1492 cx.run_until_parked();
1493
1494 let save_task = cx.update(|cx| {
1495 let thread_store = ThreadStore::global(cx);
1496 thread_store.update(cx, |store, cx| {
1497 store.save_thread(
1498 acp::SessionId::new("existing-session"),
1499 make_db_thread(
1500 "Updated Native Thread Title",
1501 existing_updated_at + chrono::Duration::seconds(1),
1502 ),
1503 project_paths.clone(),
1504 cx,
1505 )
1506 })
1507 });
1508 save_task.await.unwrap();
1509 cx.run_until_parked();
1510
1511 cx.update(|cx| migrate_thread_metadata(cx));
1512 cx.run_until_parked();
1513
1514 let list = cx.update(|cx| {
1515 let store = ThreadMetadataStore::global(cx);
1516 store.read(cx).entries().cloned().collect::<Vec<_>>()
1517 });
1518
1519 assert_eq!(list.len(), 1);
1520 assert_eq!(list[0].session_id.0.as_ref(), "existing-session");
1521 }
1522
1523 #[gpui::test]
1524 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
1525 cx: &mut TestAppContext,
1526 ) {
1527 init_test(cx);
1528
1529 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1530 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1531 let now = Utc::now();
1532
1533 // Create 7 threads for project A and 3 for project B
1534 let mut threads_to_save = Vec::new();
1535 for i in 0..7 {
1536 threads_to_save.push((
1537 format!("a-session-{i}"),
1538 format!("Thread A{i}"),
1539 project_a_paths.clone(),
1540 now + chrono::Duration::seconds(i as i64),
1541 ));
1542 }
1543 for i in 0..3 {
1544 threads_to_save.push((
1545 format!("b-session-{i}"),
1546 format!("Thread B{i}"),
1547 project_b_paths.clone(),
1548 now + chrono::Duration::seconds(i as i64),
1549 ));
1550 }
1551
1552 for (session_id, title, paths, updated_at) in &threads_to_save {
1553 let save_task = cx.update(|cx| {
1554 let thread_store = ThreadStore::global(cx);
1555 let session_id = session_id.to_string();
1556 let title = title.to_string();
1557 let paths = paths.clone();
1558 thread_store.update(cx, |store, cx| {
1559 store.save_thread(
1560 acp::SessionId::new(session_id),
1561 make_db_thread(&title, *updated_at),
1562 paths,
1563 cx,
1564 )
1565 })
1566 });
1567 save_task.await.unwrap();
1568 cx.run_until_parked();
1569 }
1570
1571 cx.update(|cx| migrate_thread_metadata(cx));
1572 cx.run_until_parked();
1573
1574 let list = cx.update(|cx| {
1575 let store = ThreadMetadataStore::global(cx);
1576 store.read(cx).entries().cloned().collect::<Vec<_>>()
1577 });
1578
1579 assert_eq!(list.len(), 10);
1580
1581 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
1582 let mut project_a_entries: Vec<_> = list
1583 .iter()
1584 .filter(|m| m.folder_paths == project_a_paths)
1585 .collect();
1586 assert_eq!(project_a_entries.len(), 7);
1587 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
1588
1589 for entry in &project_a_entries[..5] {
1590 assert!(
1591 !entry.archived,
1592 "Expected {} to be unarchived (top 5 most recent)",
1593 entry.session_id.0
1594 );
1595 }
1596 for entry in &project_a_entries[5..] {
1597 assert!(
1598 entry.archived,
1599 "Expected {} to be archived (older than top 5)",
1600 entry.session_id.0
1601 );
1602 }
1603
1604 // Project B: all 3 should be unarchived (under the limit)
1605 let project_b_entries: Vec<_> = list
1606 .iter()
1607 .filter(|m| m.folder_paths == project_b_paths)
1608 .collect();
1609 assert_eq!(project_b_entries.len(), 3);
1610 assert!(project_b_entries.iter().all(|m| !m.archived));
1611 }
1612
1613 #[gpui::test]
1614 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
1615 init_test(cx);
1616
1617 let fs = FakeFs::new(cx.executor());
1618 let project = Project::test(fs, None::<&Path>, cx).await;
1619 let connection = Rc::new(StubAgentConnection::new());
1620
1621 let thread = cx
1622 .update(|cx| {
1623 connection
1624 .clone()
1625 .new_session(project.clone(), PathList::default(), cx)
1626 })
1627 .await
1628 .unwrap();
1629 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1630
1631 cx.update(|cx| {
1632 thread.update(cx, |thread, cx| {
1633 thread.set_title("Draft Thread".into(), cx).detach();
1634 });
1635 });
1636 cx.run_until_parked();
1637
1638 let metadata_ids = cx.update(|cx| {
1639 ThreadMetadataStore::global(cx)
1640 .read(cx)
1641 .entry_ids()
1642 .collect::<Vec<_>>()
1643 });
1644 assert!(
1645 metadata_ids.is_empty(),
1646 "expected empty draft thread title updates to be ignored"
1647 );
1648
1649 cx.update(|cx| {
1650 thread.update(cx, |thread, cx| {
1651 thread.push_user_content_block(None, "Hello".into(), cx);
1652 });
1653 });
1654 cx.run_until_parked();
1655
1656 let metadata_ids = cx.update(|cx| {
1657 ThreadMetadataStore::global(cx)
1658 .read(cx)
1659 .entry_ids()
1660 .collect::<Vec<_>>()
1661 });
1662 assert_eq!(metadata_ids, vec![session_id]);
1663 }
1664
1665 #[gpui::test]
1666 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
1667 init_test(cx);
1668
1669 let fs = FakeFs::new(cx.executor());
1670 let project = Project::test(fs, None::<&Path>, cx).await;
1671 let connection = Rc::new(StubAgentConnection::new());
1672
1673 let thread = cx
1674 .update(|cx| {
1675 connection
1676 .clone()
1677 .new_session(project.clone(), PathList::default(), cx)
1678 })
1679 .await
1680 .unwrap();
1681 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1682
1683 cx.update(|cx| {
1684 thread.update(cx, |thread, cx| {
1685 thread.push_user_content_block(None, "Hello".into(), cx);
1686 });
1687 });
1688 cx.run_until_parked();
1689
1690 let metadata_ids = cx.update(|cx| {
1691 ThreadMetadataStore::global(cx)
1692 .read(cx)
1693 .entry_ids()
1694 .collect::<Vec<_>>()
1695 });
1696 assert_eq!(metadata_ids, vec![session_id.clone()]);
1697
1698 drop(thread);
1699 cx.update(|_| {});
1700 cx.run_until_parked();
1701
1702 let metadata_ids = cx.update(|cx| {
1703 ThreadMetadataStore::global(cx)
1704 .read(cx)
1705 .entry_ids()
1706 .collect::<Vec<_>>()
1707 });
1708 assert_eq!(metadata_ids, vec![session_id]);
1709 }
1710
1711 #[gpui::test]
1712 async fn test_threads_without_project_association_are_archived_by_default(
1713 cx: &mut TestAppContext,
1714 ) {
1715 init_test(cx);
1716
1717 let fs = FakeFs::new(cx.executor());
1718 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
1719 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
1720 let connection = Rc::new(StubAgentConnection::new());
1721
1722 let thread_without_worktree = cx
1723 .update(|cx| {
1724 connection.clone().new_session(
1725 project_without_worktree.clone(),
1726 PathList::default(),
1727 cx,
1728 )
1729 })
1730 .await
1731 .unwrap();
1732 let session_without_worktree =
1733 cx.read(|cx| thread_without_worktree.read(cx).session_id().clone());
1734
1735 cx.update(|cx| {
1736 thread_without_worktree.update(cx, |thread, cx| {
1737 thread.push_user_content_block(None, "content".into(), cx);
1738 thread.set_title("No Project Thread".into(), cx).detach();
1739 });
1740 });
1741 cx.run_until_parked();
1742
1743 let thread_with_worktree = cx
1744 .update(|cx| {
1745 connection.clone().new_session(
1746 project_with_worktree.clone(),
1747 PathList::default(),
1748 cx,
1749 )
1750 })
1751 .await
1752 .unwrap();
1753 let session_with_worktree =
1754 cx.read(|cx| thread_with_worktree.read(cx).session_id().clone());
1755
1756 cx.update(|cx| {
1757 thread_with_worktree.update(cx, |thread, cx| {
1758 thread.push_user_content_block(None, "content".into(), cx);
1759 thread.set_title("Project Thread".into(), cx).detach();
1760 });
1761 });
1762 cx.run_until_parked();
1763
1764 cx.update(|cx| {
1765 let store = ThreadMetadataStore::global(cx);
1766 let store = store.read(cx);
1767
1768 let without_worktree = store
1769 .entry(&session_without_worktree)
1770 .expect("missing metadata for thread without project association");
1771 assert!(without_worktree.folder_paths.is_empty());
1772 assert!(
1773 without_worktree.archived,
1774 "expected thread without project association to be archived"
1775 );
1776
1777 let with_worktree = store
1778 .entry(&session_with_worktree)
1779 .expect("missing metadata for thread with project association");
1780 assert_eq!(
1781 with_worktree.folder_paths,
1782 PathList::new(&[Path::new("/project-a")])
1783 );
1784 assert!(
1785 !with_worktree.archived,
1786 "expected thread with project association to remain unarchived"
1787 );
1788 });
1789 }
1790
1791 #[gpui::test]
1792 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
1793 init_test(cx);
1794
1795 let fs = FakeFs::new(cx.executor());
1796 let project = Project::test(fs, None::<&Path>, cx).await;
1797 let connection = Rc::new(StubAgentConnection::new());
1798
1799 // Create a regular (non-subagent) AcpThread.
1800 let regular_thread = cx
1801 .update(|cx| {
1802 connection
1803 .clone()
1804 .new_session(project.clone(), PathList::default(), cx)
1805 })
1806 .await
1807 .unwrap();
1808
1809 let regular_session_id = cx.read(|cx| regular_thread.read(cx).session_id().clone());
1810
1811 // Set a title on the regular thread to trigger a save via handle_thread_update.
1812 cx.update(|cx| {
1813 regular_thread.update(cx, |thread, cx| {
1814 thread.push_user_content_block(None, "content".into(), cx);
1815 thread.set_title("Regular Thread".into(), cx).detach();
1816 });
1817 });
1818 cx.run_until_parked();
1819
1820 // Create a subagent AcpThread
1821 let subagent_session_id = acp::SessionId::new("subagent-session");
1822 let subagent_thread = cx.update(|cx| {
1823 let action_log = cx.new(|_| ActionLog::new(project.clone()));
1824 cx.new(|cx| {
1825 acp_thread::AcpThread::new(
1826 Some(regular_session_id.clone()),
1827 Some("Subagent Thread".into()),
1828 None,
1829 connection.clone(),
1830 project.clone(),
1831 action_log,
1832 subagent_session_id.clone(),
1833 watch::Receiver::constant(acp::PromptCapabilities::new()),
1834 cx,
1835 )
1836 })
1837 });
1838
1839 // Set a title on the subagent thread to trigger handle_thread_update.
1840 cx.update(|cx| {
1841 subagent_thread.update(cx, |thread, cx| {
1842 thread
1843 .set_title("Subagent Thread Title".into(), cx)
1844 .detach();
1845 });
1846 });
1847 cx.run_until_parked();
1848
1849 // List all metadata from the store cache.
1850 let list = cx.update(|cx| {
1851 let store = ThreadMetadataStore::global(cx);
1852 store.read(cx).entries().cloned().collect::<Vec<_>>()
1853 });
1854
1855 // The subagent thread should NOT appear in the sidebar metadata.
1856 // Only the regular thread should be listed.
1857 assert_eq!(
1858 list.len(),
1859 1,
1860 "Expected only the regular thread in sidebar metadata, \
1861 but found {} entries (subagent threads are leaking into the sidebar)",
1862 list.len(),
1863 );
1864 assert_eq!(list[0].session_id, regular_session_id);
1865 assert_eq!(list[0].title.as_ref(), "Regular Thread");
1866 }
1867
1868 #[test]
1869 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
1870 let now = Utc::now();
1871
1872 let operations = vec![
1873 DbOperation::Upsert(make_metadata(
1874 "session-1",
1875 "First Thread",
1876 now,
1877 PathList::default(),
1878 )),
1879 DbOperation::Delete(acp::SessionId::new("session-1")),
1880 ];
1881
1882 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
1883
1884 assert_eq!(deduped.len(), 1);
1885 assert_eq!(
1886 deduped[0],
1887 DbOperation::Delete(acp::SessionId::new("session-1"))
1888 );
1889 }
1890
1891 #[test]
1892 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
1893 let now = Utc::now();
1894 let later = now + chrono::Duration::seconds(1);
1895
1896 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
1897 let new_metadata = make_metadata("session-1", "New Title", later, PathList::default());
1898
1899 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1900 DbOperation::Upsert(old_metadata),
1901 DbOperation::Upsert(new_metadata.clone()),
1902 ]);
1903
1904 assert_eq!(deduped.len(), 1);
1905 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
1906 }
1907
1908 #[test]
1909 fn test_dedup_db_operations_preserves_distinct_sessions() {
1910 let now = Utc::now();
1911
1912 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
1913 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
1914 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1915 DbOperation::Upsert(metadata1.clone()),
1916 DbOperation::Upsert(metadata2.clone()),
1917 ]);
1918
1919 assert_eq!(deduped.len(), 2);
1920 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
1921 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
1922 }
1923
1924 #[gpui::test]
1925 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
1926 init_test(cx);
1927
1928 let paths = PathList::new(&[Path::new("/project-a")]);
1929 let now = Utc::now();
1930 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
1931
1932 cx.update(|cx| {
1933 let store = ThreadMetadataStore::global(cx);
1934 store.update(cx, |store, cx| {
1935 store.save(metadata, cx);
1936 });
1937 });
1938
1939 cx.run_until_parked();
1940
1941 cx.update(|cx| {
1942 let store = ThreadMetadataStore::global(cx);
1943 let store = store.read(cx);
1944
1945 let path_entries = store
1946 .entries_for_path(&paths)
1947 .map(|e| e.session_id.0.to_string())
1948 .collect::<Vec<_>>();
1949 assert_eq!(path_entries, vec!["session-1"]);
1950
1951 let archived = store
1952 .archived_entries()
1953 .map(|e| e.session_id.0.to_string())
1954 .collect::<Vec<_>>();
1955 assert!(archived.is_empty());
1956 });
1957
1958 cx.update(|cx| {
1959 let store = ThreadMetadataStore::global(cx);
1960 store.update(cx, |store, cx| {
1961 store.archive(&acp::SessionId::new("session-1"), None, cx);
1962 });
1963 });
1964
1965 // Thread 1 should now be archived
1966 cx.run_until_parked();
1967
1968 cx.update(|cx| {
1969 let store = ThreadMetadataStore::global(cx);
1970 let store = store.read(cx);
1971
1972 let path_entries = store
1973 .entries_for_path(&paths)
1974 .map(|e| e.session_id.0.to_string())
1975 .collect::<Vec<_>>();
1976 assert!(path_entries.is_empty());
1977
1978 let archived = store.archived_entries().collect::<Vec<_>>();
1979 assert_eq!(archived.len(), 1);
1980 assert_eq!(archived[0].session_id.0.as_ref(), "session-1");
1981 assert!(archived[0].archived);
1982 });
1983
1984 cx.update(|cx| {
1985 let store = ThreadMetadataStore::global(cx);
1986 store.update(cx, |store, cx| {
1987 store.unarchive(&acp::SessionId::new("session-1"), cx);
1988 });
1989 });
1990
1991 cx.run_until_parked();
1992
1993 cx.update(|cx| {
1994 let store = ThreadMetadataStore::global(cx);
1995 let store = store.read(cx);
1996
1997 let path_entries = store
1998 .entries_for_path(&paths)
1999 .map(|e| e.session_id.0.to_string())
2000 .collect::<Vec<_>>();
2001 assert_eq!(path_entries, vec!["session-1"]);
2002
2003 let archived = store
2004 .archived_entries()
2005 .map(|e| e.session_id.0.to_string())
2006 .collect::<Vec<_>>();
2007 assert!(archived.is_empty());
2008 });
2009 }
2010
2011 #[gpui::test]
2012 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
2013 init_test(cx);
2014
2015 let paths = PathList::new(&[Path::new("/project-a")]);
2016 let now = Utc::now();
2017
2018 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
2019 let metadata2 = make_metadata(
2020 "session-2",
2021 "Archived Thread",
2022 now - chrono::Duration::seconds(1),
2023 paths.clone(),
2024 );
2025
2026 cx.update(|cx| {
2027 let store = ThreadMetadataStore::global(cx);
2028 store.update(cx, |store, cx| {
2029 store.save(metadata1, cx);
2030 store.save(metadata2, cx);
2031 });
2032 });
2033
2034 cx.run_until_parked();
2035
2036 cx.update(|cx| {
2037 let store = ThreadMetadataStore::global(cx);
2038 store.update(cx, |store, cx| {
2039 store.archive(&acp::SessionId::new("session-2"), None, cx);
2040 });
2041 });
2042
2043 cx.run_until_parked();
2044
2045 cx.update(|cx| {
2046 let store = ThreadMetadataStore::global(cx);
2047 let store = store.read(cx);
2048
2049 let path_entries = store
2050 .entries_for_path(&paths)
2051 .map(|e| e.session_id.0.to_string())
2052 .collect::<Vec<_>>();
2053 assert_eq!(path_entries, vec!["session-1"]);
2054
2055 let all_entries = store
2056 .entries()
2057 .map(|e| e.session_id.0.to_string())
2058 .collect::<Vec<_>>();
2059 assert_eq!(all_entries.len(), 2);
2060 assert!(all_entries.contains(&"session-1".to_string()));
2061 assert!(all_entries.contains(&"session-2".to_string()));
2062
2063 let archived = store
2064 .archived_entries()
2065 .map(|e| e.session_id.0.to_string())
2066 .collect::<Vec<_>>();
2067 assert_eq!(archived, vec!["session-2"]);
2068 });
2069 }
2070
2071 #[gpui::test]
2072 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2073 init_test(cx);
2074
2075 let paths = PathList::new(&[Path::new("/project-a")]);
2076 let now = Utc::now();
2077
2078 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2079 let m2 = make_metadata(
2080 "session-2",
2081 "Thread Two",
2082 now - chrono::Duration::seconds(1),
2083 paths.clone(),
2084 );
2085 let m3 = make_metadata(
2086 "session-3",
2087 "Thread Three",
2088 now - chrono::Duration::seconds(2),
2089 paths,
2090 );
2091
2092 cx.update(|cx| {
2093 let store = ThreadMetadataStore::global(cx);
2094 store.update(cx, |store, cx| {
2095 store.save_all(vec![m1, m2, m3], cx);
2096 });
2097 });
2098
2099 cx.run_until_parked();
2100
2101 cx.update(|cx| {
2102 let store = ThreadMetadataStore::global(cx);
2103 let store = store.read(cx);
2104
2105 let all_entries = store
2106 .entries()
2107 .map(|e| e.session_id.0.to_string())
2108 .collect::<Vec<_>>();
2109 assert_eq!(all_entries.len(), 3);
2110 assert!(all_entries.contains(&"session-1".to_string()));
2111 assert!(all_entries.contains(&"session-2".to_string()));
2112 assert!(all_entries.contains(&"session-3".to_string()));
2113
2114 let entry_ids = store.entry_ids().collect::<Vec<_>>();
2115 assert_eq!(entry_ids.len(), 3);
2116 });
2117 }
2118
2119 #[gpui::test]
2120 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2121 init_test(cx);
2122
2123 let paths = PathList::new(&[Path::new("/project-a")]);
2124 let now = Utc::now();
2125 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2126
2127 cx.update(|cx| {
2128 let store = ThreadMetadataStore::global(cx);
2129 store.update(cx, |store, cx| {
2130 store.save(metadata, cx);
2131 });
2132 });
2133
2134 cx.run_until_parked();
2135
2136 cx.update(|cx| {
2137 let store = ThreadMetadataStore::global(cx);
2138 store.update(cx, |store, cx| {
2139 store.archive(&acp::SessionId::new("session-1"), None, cx);
2140 });
2141 });
2142
2143 cx.run_until_parked();
2144
2145 cx.update(|cx| {
2146 let store = ThreadMetadataStore::global(cx);
2147 store.update(cx, |store, cx| {
2148 let _ = store.reload(cx);
2149 });
2150 });
2151
2152 cx.run_until_parked();
2153
2154 cx.update(|cx| {
2155 let store = ThreadMetadataStore::global(cx);
2156 let store = store.read(cx);
2157
2158 let thread = store
2159 .entries()
2160 .find(|e| e.session_id.0.as_ref() == "session-1")
2161 .expect("thread should exist after reload");
2162 assert!(thread.archived);
2163
2164 let path_entries = store
2165 .entries_for_path(&paths)
2166 .map(|e| e.session_id.0.to_string())
2167 .collect::<Vec<_>>();
2168 assert!(path_entries.is_empty());
2169
2170 let archived = store
2171 .archived_entries()
2172 .map(|e| e.session_id.0.to_string())
2173 .collect::<Vec<_>>();
2174 assert_eq!(archived, vec!["session-1"]);
2175 });
2176 }
2177
2178 #[gpui::test]
2179 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2180 init_test(cx);
2181
2182 cx.run_until_parked();
2183
2184 cx.update(|cx| {
2185 let store = ThreadMetadataStore::global(cx);
2186 store.update(cx, |store, cx| {
2187 store.archive(&acp::SessionId::new("nonexistent"), None, cx);
2188 });
2189 });
2190
2191 cx.run_until_parked();
2192
2193 cx.update(|cx| {
2194 let store = ThreadMetadataStore::global(cx);
2195 let store = store.read(cx);
2196
2197 assert!(store.is_empty());
2198 assert_eq!(store.entries().count(), 0);
2199 assert_eq!(store.archived_entries().count(), 0);
2200 });
2201 }
2202
2203 #[gpui::test]
2204 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2205 init_test(cx);
2206
2207 let paths = PathList::new(&[Path::new("/project-a")]);
2208 let now = Utc::now();
2209 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2210 let session_id = metadata.session_id.clone();
2211
2212 cx.update(|cx| {
2213 let store = ThreadMetadataStore::global(cx);
2214 store.update(cx, |store, cx| {
2215 store.save(metadata.clone(), cx);
2216 store.archive(&session_id, None, cx);
2217 });
2218 });
2219
2220 cx.run_until_parked();
2221
2222 cx.update(|cx| {
2223 let store = ThreadMetadataStore::global(cx);
2224 let store = store.read(cx);
2225
2226 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2227 pretty_assertions::assert_eq!(
2228 entries,
2229 vec![ThreadMetadata {
2230 archived: true,
2231 ..metadata
2232 }]
2233 );
2234 });
2235 }
2236
2237 #[gpui::test]
2238 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2239 init_test(cx);
2240 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2241
2242 let id = store
2243 .read_with(cx, |store, cx| {
2244 store.create_archived_worktree(
2245 "/tmp/worktree".to_string(),
2246 "/home/user/repo".to_string(),
2247 Some("feature-branch".to_string()),
2248 "staged_aaa".to_string(),
2249 "unstaged_bbb".to_string(),
2250 "original_000".to_string(),
2251 cx,
2252 )
2253 })
2254 .await
2255 .unwrap();
2256
2257 store
2258 .read_with(cx, |store, cx| {
2259 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2260 })
2261 .await
2262 .unwrap();
2263
2264 let worktrees = store
2265 .read_with(cx, |store, cx| {
2266 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2267 })
2268 .await
2269 .unwrap();
2270
2271 assert_eq!(worktrees.len(), 1);
2272 let wt = &worktrees[0];
2273 assert_eq!(wt.id, id);
2274 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2275 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2276 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2277 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2278 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2279 assert_eq!(wt.original_commit_hash, "original_000");
2280 }
2281
2282 #[gpui::test]
2283 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2284 init_test(cx);
2285 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2286
2287 let id = store
2288 .read_with(cx, |store, cx| {
2289 store.create_archived_worktree(
2290 "/tmp/worktree".to_string(),
2291 "/home/user/repo".to_string(),
2292 Some("main".to_string()),
2293 "deadbeef".to_string(),
2294 "deadbeef".to_string(),
2295 "original_000".to_string(),
2296 cx,
2297 )
2298 })
2299 .await
2300 .unwrap();
2301
2302 store
2303 .read_with(cx, |store, cx| {
2304 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2305 })
2306 .await
2307 .unwrap();
2308
2309 store
2310 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2311 .await
2312 .unwrap();
2313
2314 let worktrees = store
2315 .read_with(cx, |store, cx| {
2316 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2317 })
2318 .await
2319 .unwrap();
2320 assert!(worktrees.is_empty());
2321 }
2322
2323 #[gpui::test]
2324 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2325 init_test(cx);
2326 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2327
2328 let id = store
2329 .read_with(cx, |store, cx| {
2330 store.create_archived_worktree(
2331 "/tmp/worktree".to_string(),
2332 "/home/user/repo".to_string(),
2333 None,
2334 "abc123".to_string(),
2335 "abc123".to_string(),
2336 "original_000".to_string(),
2337 cx,
2338 )
2339 })
2340 .await
2341 .unwrap();
2342
2343 store
2344 .read_with(cx, |store, cx| {
2345 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2346 })
2347 .await
2348 .unwrap();
2349
2350 store
2351 .read_with(cx, |store, cx| {
2352 store.link_thread_to_archived_worktree("session-2".to_string(), id, cx)
2353 })
2354 .await
2355 .unwrap();
2356
2357 let wt1 = store
2358 .read_with(cx, |store, cx| {
2359 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2360 })
2361 .await
2362 .unwrap();
2363
2364 let wt2 = store
2365 .read_with(cx, |store, cx| {
2366 store.get_archived_worktrees_for_thread("session-2".to_string(), cx)
2367 })
2368 .await
2369 .unwrap();
2370
2371 assert_eq!(wt1.len(), 1);
2372 assert_eq!(wt2.len(), 1);
2373 assert_eq!(wt1[0].id, wt2[0].id);
2374 }
2375
2376 #[gpui::test]
2377 async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) {
2378 init_test(cx);
2379 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2380
2381 let original_paths = PathList::new(&[
2382 Path::new("/projects/worktree-a"),
2383 Path::new("/projects/worktree-b"),
2384 Path::new("/other/unrelated"),
2385 ]);
2386 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2387
2388 store.update(cx, |store, cx| {
2389 store.save_manually(meta, cx);
2390 });
2391
2392 let replacements = vec![
2393 (
2394 PathBuf::from("/projects/worktree-a"),
2395 PathBuf::from("/restored/worktree-a"),
2396 ),
2397 (
2398 PathBuf::from("/projects/worktree-b"),
2399 PathBuf::from("/restored/worktree-b"),
2400 ),
2401 ];
2402
2403 store.update(cx, |store, cx| {
2404 store.complete_worktree_restore(
2405 &acp::SessionId::new("session-multi"),
2406 &replacements,
2407 cx,
2408 );
2409 });
2410
2411 let entry = store.read_with(cx, |store, _cx| {
2412 store.entry(&acp::SessionId::new("session-multi")).cloned()
2413 });
2414 let entry = entry.unwrap();
2415 let paths = entry.folder_paths.paths();
2416 assert_eq!(paths.len(), 3);
2417 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2418 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2419 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2420 }
2421
2422 #[gpui::test]
2423 async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) {
2424 init_test(cx);
2425 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2426
2427 let original_paths =
2428 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2429 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2430
2431 store.update(cx, |store, cx| {
2432 store.save_manually(meta, cx);
2433 });
2434
2435 let replacements = vec![
2436 (
2437 PathBuf::from("/projects/worktree-a"),
2438 PathBuf::from("/new/worktree-a"),
2439 ),
2440 (
2441 PathBuf::from("/nonexistent/path"),
2442 PathBuf::from("/should/not/appear"),
2443 ),
2444 ];
2445
2446 store.update(cx, |store, cx| {
2447 store.complete_worktree_restore(
2448 &acp::SessionId::new("session-partial"),
2449 &replacements,
2450 cx,
2451 );
2452 });
2453
2454 let entry = store.read_with(cx, |store, _cx| {
2455 store
2456 .entry(&acp::SessionId::new("session-partial"))
2457 .cloned()
2458 });
2459 let entry = entry.unwrap();
2460 let paths = entry.folder_paths.paths();
2461 assert_eq!(paths.len(), 2);
2462 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2463 assert!(paths.contains(&PathBuf::from("/other/path")));
2464 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2465 }
2466
2467 #[gpui::test]
2468 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
2469 init_test(cx);
2470 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2471
2472 let original_paths = PathList::new(&[
2473 Path::new("/projects/worktree-a"),
2474 Path::new("/projects/worktree-b"),
2475 Path::new("/other/unrelated"),
2476 ]);
2477 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2478
2479 store.update(cx, |store, cx| {
2480 store.save_manually(meta, cx);
2481 });
2482
2483 let replacements = vec![
2484 (
2485 PathBuf::from("/projects/worktree-a"),
2486 PathBuf::from("/restored/worktree-a"),
2487 ),
2488 (
2489 PathBuf::from("/projects/worktree-b"),
2490 PathBuf::from("/restored/worktree-b"),
2491 ),
2492 ];
2493
2494 store.update(cx, |store, cx| {
2495 store.update_restored_worktree_paths(
2496 &acp::SessionId::new("session-multi"),
2497 &replacements,
2498 cx,
2499 );
2500 });
2501
2502 let entry = store.read_with(cx, |store, _cx| {
2503 store.entry(&acp::SessionId::new("session-multi")).cloned()
2504 });
2505 let entry = entry.unwrap();
2506 let paths = entry.folder_paths.paths();
2507 assert_eq!(paths.len(), 3);
2508 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2509 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2510 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2511 }
2512
2513 #[gpui::test]
2514 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
2515 init_test(cx);
2516 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2517
2518 let original_paths =
2519 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2520 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2521
2522 store.update(cx, |store, cx| {
2523 store.save_manually(meta, cx);
2524 });
2525
2526 let replacements = vec![
2527 (
2528 PathBuf::from("/projects/worktree-a"),
2529 PathBuf::from("/new/worktree-a"),
2530 ),
2531 (
2532 PathBuf::from("/nonexistent/path"),
2533 PathBuf::from("/should/not/appear"),
2534 ),
2535 ];
2536
2537 store.update(cx, |store, cx| {
2538 store.update_restored_worktree_paths(
2539 &acp::SessionId::new("session-partial"),
2540 &replacements,
2541 cx,
2542 );
2543 });
2544
2545 let entry = store.read_with(cx, |store, _cx| {
2546 store
2547 .entry(&acp::SessionId::new("session-partial"))
2548 .cloned()
2549 });
2550 let entry = entry.unwrap();
2551 let paths = entry.folder_paths.paths();
2552 assert_eq!(paths.len(), 2);
2553 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2554 assert!(paths.contains(&PathBuf::from("/other/path")));
2555 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2556 }
2557
2558 #[gpui::test]
2559 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
2560 init_test(cx);
2561 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2562
2563 let id1 = store
2564 .read_with(cx, |store, cx| {
2565 store.create_archived_worktree(
2566 "/projects/worktree-a".to_string(),
2567 "/home/user/repo".to_string(),
2568 Some("branch-a".to_string()),
2569 "staged_a".to_string(),
2570 "unstaged_a".to_string(),
2571 "original_000".to_string(),
2572 cx,
2573 )
2574 })
2575 .await
2576 .unwrap();
2577
2578 let id2 = store
2579 .read_with(cx, |store, cx| {
2580 store.create_archived_worktree(
2581 "/projects/worktree-b".to_string(),
2582 "/home/user/repo".to_string(),
2583 Some("branch-b".to_string()),
2584 "staged_b".to_string(),
2585 "unstaged_b".to_string(),
2586 "original_000".to_string(),
2587 cx,
2588 )
2589 })
2590 .await
2591 .unwrap();
2592
2593 store
2594 .read_with(cx, |store, cx| {
2595 store.link_thread_to_archived_worktree("session-1".to_string(), id1, cx)
2596 })
2597 .await
2598 .unwrap();
2599
2600 store
2601 .read_with(cx, |store, cx| {
2602 store.link_thread_to_archived_worktree("session-1".to_string(), id2, cx)
2603 })
2604 .await
2605 .unwrap();
2606
2607 let worktrees = store
2608 .read_with(cx, |store, cx| {
2609 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2610 })
2611 .await
2612 .unwrap();
2613
2614 assert_eq!(worktrees.len(), 2);
2615
2616 let paths: Vec<&Path> = worktrees
2617 .iter()
2618 .map(|w| w.worktree_path.as_path())
2619 .collect();
2620 assert!(paths.contains(&Path::new("/projects/worktree-a")));
2621 assert!(paths.contains(&Path::new("/projects/worktree-b")));
2622 }
2623}