1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use acp_thread::AcpThreadEvent;
7use agent::{ThreadStore, ZED_AGENT_ID};
8use agent_client_protocol as acp;
9use anyhow::Context as _;
10use chrono::{DateTime, Utc};
11use collections::{HashMap, HashSet};
12use db::{
13 sqlez::{
14 bindable::Column, domain::Domain, statement::Statement,
15 thread_safe_connection::ThreadSafeConnection,
16 },
17 sqlez_macros::sql,
18};
19use futures::{FutureExt as _, future::Shared};
20use gpui::{AppContext as _, Entity, Global, Subscription, Task};
21use project::AgentId;
22use ui::{App, Context, SharedString};
23use util::ResultExt as _;
24use workspace::PathList;
25
26use crate::DEFAULT_THREAD_TITLE;
27
28pub fn init(cx: &mut App) {
29 ThreadMetadataStore::init_global(cx);
30 migrate_thread_metadata(cx);
31}
32
33/// Migrate existing thread metadata from native agent thread store to the new metadata storage.
34/// We skip migrating threads that do not have a project.
35///
36/// TODO: Remove this after N weeks of shipping the sidebar
37fn migrate_thread_metadata(cx: &mut App) {
38 let store = ThreadMetadataStore::global(cx);
39 let db = store.read(cx).db.clone();
40
41 cx.spawn(async move |cx| {
42 let existing_entries = db.list_ids()?.into_iter().collect::<HashSet<_>>();
43
44 let is_first_migration = existing_entries.is_empty();
45
46 let mut to_migrate = store.read_with(cx, |_store, cx| {
47 ThreadStore::global(cx)
48 .read(cx)
49 .entries()
50 .filter_map(|entry| {
51 if existing_entries.contains(&entry.id.0) {
52 return None;
53 }
54
55 Some(ThreadMetadata {
56 session_id: entry.id,
57 agent_id: ZED_AGENT_ID.clone(),
58 title: entry.title,
59 updated_at: entry.updated_at,
60 created_at: entry.created_at,
61 folder_paths: entry.folder_paths,
62 main_worktree_paths: PathList::default(),
63 archived: true,
64 })
65 })
66 .collect::<Vec<_>>()
67 });
68
69 if to_migrate.is_empty() {
70 return anyhow::Ok(());
71 }
72
73 // On the first migration (no entries in DB yet), keep the 5 most
74 // recent threads per project unarchived.
75 if is_first_migration {
76 let mut per_project: HashMap<PathList, Vec<&mut ThreadMetadata>> = HashMap::default();
77 for entry in &mut to_migrate {
78 if entry.folder_paths.is_empty() {
79 continue;
80 }
81 per_project
82 .entry(entry.folder_paths.clone())
83 .or_default()
84 .push(entry);
85 }
86 for entries in per_project.values_mut() {
87 entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
88 for entry in entries.iter_mut().take(5) {
89 entry.archived = false;
90 }
91 }
92 }
93
94 log::info!("Migrating {} thread store entries", to_migrate.len());
95
96 // Manually save each entry to the database and call reload, otherwise
97 // we'll end up triggering lots of reloads after each save
98 for entry in to_migrate {
99 db.save(entry).await?;
100 }
101
102 log::info!("Finished migrating thread store entries");
103
104 let _ = store.update(cx, |store, cx| store.reload(cx));
105 anyhow::Ok(())
106 })
107 .detach_and_log_err(cx);
108}
109
110struct GlobalThreadMetadataStore(Entity<ThreadMetadataStore>);
111impl Global for GlobalThreadMetadataStore {}
112
113/// Lightweight metadata for any thread (native or ACP), enough to populate
114/// the sidebar list and route to the correct load path when clicked.
115#[derive(Debug, Clone, PartialEq)]
116pub struct ThreadMetadata {
117 pub session_id: acp::SessionId,
118 pub agent_id: AgentId,
119 pub title: SharedString,
120 pub updated_at: DateTime<Utc>,
121 pub created_at: Option<DateTime<Utc>>,
122 pub folder_paths: PathList,
123 pub main_worktree_paths: PathList,
124 pub archived: bool,
125}
126
127impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo {
128 fn from(meta: &ThreadMetadata) -> Self {
129 Self {
130 session_id: meta.session_id.clone(),
131 work_dirs: Some(meta.folder_paths.clone()),
132 title: Some(meta.title.clone()),
133 updated_at: Some(meta.updated_at),
134 created_at: meta.created_at,
135 meta: None,
136 }
137 }
138}
139
140/// Record of a git worktree that was archived (deleted from disk) when its
141/// last thread was archived.
142pub struct ArchivedGitWorktree {
143 /// Auto-incrementing primary key.
144 pub id: i64,
145 /// Absolute path to the directory of the worktree before it was deleted.
146 /// Used when restoring, to put the recreated worktree back where it was.
147 /// If the path already exists on disk, the worktree is assumed to be
148 /// already restored and is used as-is.
149 pub worktree_path: PathBuf,
150 /// Absolute path of the main repository ("main worktree") that owned this worktree.
151 /// Used when restoring, to reattach the recreated worktree to the correct main repo.
152 /// If the main repo isn't found on disk, unarchiving fails because we only store
153 /// commit hashes, and without the actual git repo being available, we can't restore
154 /// the files.
155 pub main_repo_path: PathBuf,
156 /// Branch that was checked out in the worktree at archive time. `None` if
157 /// the worktree was in detached HEAD state, which isn't supported in Zed, but
158 /// could happen if the user made a detached one outside of Zed.
159 /// On restore, we try to switch to this branch. If that fails (e.g. it's
160 /// checked out elsewhere), we auto-generate a new one.
161 pub branch_name: Option<String>,
162 /// SHA of the WIP commit that captures files that were staged (but not yet
163 /// committed) at the time of archiving. This commit can be empty if the
164 /// user had no staged files at the time. It sits directly on top of whatever
165 /// the user's last actual commit was.
166 pub staged_commit_hash: String,
167 /// SHA of the WIP commit that captures files that were unstaged (including
168 /// untracked) at the time of archiving. This commit can be empty if the user
169 /// had no unstaged files at the time. It sits on top of `staged_commit_hash`.
170 /// After doing `git reset` past both of these commits, we're back in the state
171 /// we had before archiving, including what was staged, what was unstaged, and
172 /// what was committed.
173 pub unstaged_commit_hash: String,
174 /// SHA of the commit that HEAD pointed at before we created the two WIP
175 /// commits during archival. After resetting past the WIP commits during
176 /// restore, HEAD should land back on this commit. It also serves as a
177 /// pre-restore sanity check (abort if this commit no longer exists in the
178 /// repo) and as a fallback target if the WIP resets fail.
179 pub original_commit_hash: String,
180}
181
182/// The store holds all metadata needed to show threads in the sidebar/the archive.
183///
184/// Automatically listens to AcpThread events and updates metadata if it has changed.
185pub struct ThreadMetadataStore {
186 db: ThreadMetadataDb,
187 threads: HashMap<acp::SessionId, ThreadMetadata>,
188 threads_by_paths: HashMap<PathList, HashSet<acp::SessionId>>,
189 threads_by_main_paths: HashMap<PathList, HashSet<acp::SessionId>>,
190 reload_task: Option<Shared<Task<()>>>,
191 session_subscriptions: HashMap<acp::SessionId, Subscription>,
192 pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
193 in_flight_archives: HashMap<acp::SessionId, (Task<()>, smol::channel::Sender<()>)>,
194 _db_operations_task: Task<()>,
195}
196
197#[derive(Debug, PartialEq)]
198enum DbOperation {
199 Upsert(ThreadMetadata),
200 Delete(acp::SessionId),
201}
202
203impl DbOperation {
204 fn id(&self) -> &acp::SessionId {
205 match self {
206 DbOperation::Upsert(thread) => &thread.session_id,
207 DbOperation::Delete(session_id) => session_id,
208 }
209 }
210}
211
212impl ThreadMetadataStore {
213 #[cfg(not(any(test, feature = "test-support")))]
214 pub fn init_global(cx: &mut App) {
215 if cx.has_global::<Self>() {
216 return;
217 }
218
219 let db = ThreadMetadataDb::global(cx);
220 let thread_store = cx.new(|cx| Self::new(db, cx));
221 cx.set_global(GlobalThreadMetadataStore(thread_store));
222 }
223
224 #[cfg(any(test, feature = "test-support"))]
225 pub fn init_global(cx: &mut App) {
226 let thread = std::thread::current();
227 let test_name = thread.name().unwrap_or("unknown_test");
228 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
229 let db = smol::block_on(db::open_test_db::<ThreadMetadataDb>(&db_name));
230 let thread_store = cx.new(|cx| Self::new(ThreadMetadataDb(db), cx));
231 cx.set_global(GlobalThreadMetadataStore(thread_store));
232 }
233
234 pub fn try_global(cx: &App) -> Option<Entity<Self>> {
235 cx.try_global::<GlobalThreadMetadataStore>()
236 .map(|store| store.0.clone())
237 }
238
239 pub fn global(cx: &App) -> Entity<Self> {
240 cx.global::<GlobalThreadMetadataStore>().0.clone()
241 }
242
243 pub fn is_empty(&self) -> bool {
244 self.threads.is_empty()
245 }
246
247 /// Returns all thread IDs.
248 pub fn entry_ids(&self) -> impl Iterator<Item = acp::SessionId> + '_ {
249 self.threads.keys().cloned()
250 }
251
252 /// Returns the metadata for a specific thread, if it exists.
253 pub fn entry(&self, session_id: &acp::SessionId) -> Option<&ThreadMetadata> {
254 self.threads.get(session_id)
255 }
256
257 /// Returns all threads.
258 pub fn entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
259 self.threads.values()
260 }
261
262 /// Returns all archived threads.
263 pub fn archived_entries(&self) -> impl Iterator<Item = &ThreadMetadata> + '_ {
264 self.entries().filter(|t| t.archived)
265 }
266
267 /// Returns all threads for the given path list, excluding archived threads.
268 pub fn entries_for_path(
269 &self,
270 path_list: &PathList,
271 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
272 self.threads_by_paths
273 .get(path_list)
274 .into_iter()
275 .flatten()
276 .filter_map(|s| self.threads.get(s))
277 .filter(|s| !s.archived)
278 }
279
280 /// Returns threads whose `main_worktree_paths` matches the given path list,
281 /// excluding archived threads. This finds threads that were opened in a
282 /// linked worktree but are associated with the given main worktree.
283 pub fn entries_for_main_worktree_path(
284 &self,
285 path_list: &PathList,
286 ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
287 self.threads_by_main_paths
288 .get(path_list)
289 .into_iter()
290 .flatten()
291 .filter_map(|s| self.threads.get(s))
292 .filter(|s| !s.archived)
293 }
294
295 fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
296 let db = self.db.clone();
297 self.reload_task.take();
298
299 let list_task = cx
300 .background_spawn(async move { db.list().context("Failed to fetch sidebar metadata") });
301
302 let reload_task = cx
303 .spawn(async move |this, cx| {
304 let Some(rows) = list_task.await.log_err() else {
305 return;
306 };
307
308 this.update(cx, |this, cx| {
309 this.threads.clear();
310 this.threads_by_paths.clear();
311 this.threads_by_main_paths.clear();
312
313 for row in rows {
314 this.threads_by_paths
315 .entry(row.folder_paths.clone())
316 .or_default()
317 .insert(row.session_id.clone());
318 if !row.main_worktree_paths.is_empty() {
319 this.threads_by_main_paths
320 .entry(row.main_worktree_paths.clone())
321 .or_default()
322 .insert(row.session_id.clone());
323 }
324 this.threads.insert(row.session_id.clone(), row);
325 }
326
327 cx.notify();
328 })
329 .ok();
330 })
331 .shared();
332 self.reload_task = Some(reload_task.clone());
333 reload_task
334 }
335
336 pub fn save_all(&mut self, metadata: Vec<ThreadMetadata>, cx: &mut Context<Self>) {
337 for metadata in metadata {
338 self.save_internal(metadata);
339 }
340 cx.notify();
341 }
342
343 #[cfg(any(test, feature = "test-support"))]
344 pub fn save_manually(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
345 self.save(metadata, cx)
346 }
347
348 fn save(&mut self, metadata: ThreadMetadata, cx: &mut Context<Self>) {
349 self.save_internal(metadata);
350 cx.notify();
351 }
352
353 fn save_internal(&mut self, metadata: ThreadMetadata) {
354 if let Some(thread) = self.threads.get(&metadata.session_id) {
355 if thread.folder_paths != metadata.folder_paths {
356 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
357 session_ids.remove(&metadata.session_id);
358 }
359 }
360 if thread.main_worktree_paths != metadata.main_worktree_paths
361 && !thread.main_worktree_paths.is_empty()
362 {
363 if let Some(session_ids) = self
364 .threads_by_main_paths
365 .get_mut(&thread.main_worktree_paths)
366 {
367 session_ids.remove(&metadata.session_id);
368 }
369 }
370 }
371
372 self.threads
373 .insert(metadata.session_id.clone(), metadata.clone());
374
375 self.threads_by_paths
376 .entry(metadata.folder_paths.clone())
377 .or_default()
378 .insert(metadata.session_id.clone());
379
380 if !metadata.main_worktree_paths.is_empty() {
381 self.threads_by_main_paths
382 .entry(metadata.main_worktree_paths.clone())
383 .or_default()
384 .insert(metadata.session_id.clone());
385 }
386
387 self.pending_thread_ops_tx
388 .try_send(DbOperation::Upsert(metadata))
389 .log_err();
390 }
391
392 pub fn update_working_directories(
393 &mut self,
394 session_id: &acp::SessionId,
395 work_dirs: PathList,
396 cx: &mut Context<Self>,
397 ) {
398 if let Some(thread) = self.threads.get(session_id) {
399 self.save_internal(ThreadMetadata {
400 folder_paths: work_dirs,
401 ..thread.clone()
402 });
403 cx.notify();
404 }
405 }
406
407 pub fn archive(
408 &mut self,
409 session_id: &acp::SessionId,
410 archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
411 cx: &mut Context<Self>,
412 ) {
413 self.update_archived(session_id, true, cx);
414
415 if let Some(job) = archive_job {
416 self.in_flight_archives.insert(session_id.clone(), job);
417 }
418 }
419
420 pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
421 self.update_archived(session_id, false, cx);
422 // Dropping the Sender triggers cancellation in the background task.
423 self.in_flight_archives.remove(session_id);
424 }
425
426 pub fn cleanup_completed_archive(&mut self, session_id: &acp::SessionId) {
427 self.in_flight_archives.remove(session_id);
428 }
429
430 /// Updates a thread's `folder_paths` after an archived worktree has been
431 /// restored to disk. The restored worktree may land at a different path
432 /// than it had before archival, so each `(old_path, new_path)` pair in
433 /// `path_replacements` is applied to the thread's stored folder paths.
434 pub fn update_restored_worktree_paths(
435 &mut self,
436 session_id: &acp::SessionId,
437 path_replacements: &[(PathBuf, PathBuf)],
438 cx: &mut Context<Self>,
439 ) {
440 if let Some(thread) = self.threads.get(session_id).cloned() {
441 let mut paths: Vec<PathBuf> = thread.folder_paths.paths().to_vec();
442 for (old_path, new_path) in path_replacements {
443 if let Some(pos) = paths.iter().position(|p| p == old_path) {
444 paths[pos] = new_path.clone();
445 }
446 }
447 let new_folder_paths = PathList::new(&paths);
448 self.save_internal(ThreadMetadata {
449 folder_paths: new_folder_paths,
450 ..thread
451 });
452 cx.notify();
453 }
454 }
455
456 pub fn create_archived_worktree(
457 &self,
458 worktree_path: String,
459 main_repo_path: String,
460 branch_name: Option<String>,
461 staged_commit_hash: String,
462 unstaged_commit_hash: String,
463 original_commit_hash: String,
464 cx: &App,
465 ) -> Task<anyhow::Result<i64>> {
466 let db = self.db.clone();
467 cx.background_spawn(async move {
468 db.create_archived_worktree(
469 worktree_path,
470 main_repo_path,
471 branch_name,
472 staged_commit_hash,
473 unstaged_commit_hash,
474 original_commit_hash,
475 )
476 .await
477 })
478 }
479
480 pub fn link_thread_to_archived_worktree(
481 &self,
482 session_id: String,
483 archived_worktree_id: i64,
484 cx: &App,
485 ) -> Task<anyhow::Result<()>> {
486 let db = self.db.clone();
487 cx.background_spawn(async move {
488 db.link_thread_to_archived_worktree(session_id, archived_worktree_id)
489 .await
490 })
491 }
492
493 pub fn get_archived_worktrees_for_thread(
494 &self,
495 session_id: String,
496 cx: &App,
497 ) -> Task<anyhow::Result<Vec<ArchivedGitWorktree>>> {
498 let db = self.db.clone();
499 cx.background_spawn(async move { db.get_archived_worktrees_for_thread(session_id).await })
500 }
501
502 pub fn delete_archived_worktree(&self, id: i64, cx: &App) -> Task<anyhow::Result<()>> {
503 let db = self.db.clone();
504 cx.background_spawn(async move { db.delete_archived_worktree(id).await })
505 }
506
507 pub fn unlink_thread_from_all_archived_worktrees(
508 &self,
509 session_id: String,
510 cx: &App,
511 ) -> Task<anyhow::Result<()>> {
512 let db = self.db.clone();
513 cx.background_spawn(async move {
514 db.unlink_thread_from_all_archived_worktrees(session_id)
515 .await
516 })
517 }
518
519 pub fn is_archived_worktree_referenced(
520 &self,
521 archived_worktree_id: i64,
522 cx: &App,
523 ) -> Task<anyhow::Result<bool>> {
524 let db = self.db.clone();
525 cx.background_spawn(async move {
526 db.is_archived_worktree_referenced(archived_worktree_id)
527 .await
528 })
529 }
530
531 fn update_archived(
532 &mut self,
533 session_id: &acp::SessionId,
534 archived: bool,
535 cx: &mut Context<Self>,
536 ) {
537 if let Some(thread) = self.threads.get(session_id) {
538 self.save_internal(ThreadMetadata {
539 archived,
540 ..thread.clone()
541 });
542 cx.notify();
543 }
544 }
545
546 pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context<Self>) {
547 if let Some(thread) = self.threads.get(&session_id) {
548 if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) {
549 session_ids.remove(&session_id);
550 }
551 if !thread.main_worktree_paths.is_empty() {
552 if let Some(session_ids) = self
553 .threads_by_main_paths
554 .get_mut(&thread.main_worktree_paths)
555 {
556 session_ids.remove(&session_id);
557 }
558 }
559 }
560 self.threads.remove(&session_id);
561 self.pending_thread_ops_tx
562 .try_send(DbOperation::Delete(session_id))
563 .log_err();
564 cx.notify();
565 }
566
567 fn new(db: ThreadMetadataDb, cx: &mut Context<Self>) -> Self {
568 let weak_store = cx.weak_entity();
569
570 cx.observe_new::<acp_thread::AcpThread>(move |thread, _window, cx| {
571 // Don't track subagent threads in the sidebar.
572 if thread.parent_session_id().is_some() {
573 return;
574 }
575
576 let thread_entity = cx.entity();
577
578 cx.on_release({
579 let weak_store = weak_store.clone();
580 move |thread, cx| {
581 weak_store
582 .update(cx, |store, _cx| {
583 let session_id = thread.session_id().clone();
584 store.session_subscriptions.remove(&session_id);
585 })
586 .ok();
587 }
588 })
589 .detach();
590
591 weak_store
592 .update(cx, |this, cx| {
593 let subscription = cx.subscribe(&thread_entity, Self::handle_thread_event);
594 this.session_subscriptions
595 .insert(thread.session_id().clone(), subscription);
596 })
597 .ok();
598 })
599 .detach();
600
601 let (tx, rx) = smol::channel::unbounded();
602 let _db_operations_task = cx.background_spawn({
603 let db = db.clone();
604 async move {
605 while let Ok(first_update) = rx.recv().await {
606 let mut updates = vec![first_update];
607 while let Ok(update) = rx.try_recv() {
608 updates.push(update);
609 }
610 let updates = Self::dedup_db_operations(updates);
611 for operation in updates {
612 match operation {
613 DbOperation::Upsert(metadata) => {
614 db.save(metadata).await.log_err();
615 }
616 DbOperation::Delete(session_id) => {
617 db.delete(session_id).await.log_err();
618 }
619 }
620 }
621 }
622 }
623 });
624
625 let mut this = Self {
626 db,
627 threads: HashMap::default(),
628 threads_by_paths: HashMap::default(),
629 threads_by_main_paths: HashMap::default(),
630 reload_task: None,
631 session_subscriptions: HashMap::default(),
632 pending_thread_ops_tx: tx,
633 in_flight_archives: HashMap::default(),
634 _db_operations_task,
635 };
636 let _ = this.reload(cx);
637 this
638 }
639
640 fn dedup_db_operations(operations: Vec<DbOperation>) -> Vec<DbOperation> {
641 let mut ops = HashMap::default();
642 for operation in operations.into_iter().rev() {
643 if ops.contains_key(operation.id()) {
644 continue;
645 }
646 ops.insert(operation.id().clone(), operation);
647 }
648 ops.into_values().collect()
649 }
650
651 fn handle_thread_event(
652 &mut self,
653 thread: Entity<acp_thread::AcpThread>,
654 event: &AcpThreadEvent,
655 cx: &mut Context<Self>,
656 ) {
657 // Don't track subagent threads in the sidebar.
658 if thread.read(cx).parent_session_id().is_some() {
659 return;
660 }
661
662 match event {
663 AcpThreadEvent::NewEntry
664 | AcpThreadEvent::TitleUpdated
665 | AcpThreadEvent::EntryUpdated(_)
666 | AcpThreadEvent::EntriesRemoved(_)
667 | AcpThreadEvent::ToolAuthorizationRequested(_)
668 | AcpThreadEvent::ToolAuthorizationReceived(_)
669 | AcpThreadEvent::Retry(_)
670 | AcpThreadEvent::Stopped(_)
671 | AcpThreadEvent::Error
672 | AcpThreadEvent::LoadError(_)
673 | AcpThreadEvent::Refusal
674 | AcpThreadEvent::WorkingDirectoriesUpdated => {
675 let thread_ref = thread.read(cx);
676 if thread_ref.entries().is_empty() {
677 return;
678 }
679
680 let existing_thread = self.threads.get(thread_ref.session_id());
681 let session_id = thread_ref.session_id().clone();
682 let title = thread_ref
683 .title()
684 .unwrap_or_else(|| DEFAULT_THREAD_TITLE.into());
685
686 let updated_at = Utc::now();
687
688 let created_at = existing_thread
689 .and_then(|t| t.created_at)
690 .unwrap_or_else(|| updated_at);
691
692 let agent_id = thread_ref.connection().agent_id();
693
694 let folder_paths = {
695 let project = thread_ref.project().read(cx);
696 let paths: Vec<Arc<Path>> = project
697 .visible_worktrees(cx)
698 .map(|worktree| worktree.read(cx).abs_path())
699 .collect();
700 PathList::new(&paths)
701 };
702
703 let main_worktree_paths = thread_ref
704 .project()
705 .read(cx)
706 .project_group_key(cx)
707 .path_list()
708 .clone();
709
710 // Threads without a folder path (e.g. started in an empty
711 // window) are archived by default so they don't get lost,
712 // because they won't show up in the sidebar. Users can reload
713 // them from the archive.
714 let archived = existing_thread
715 .map(|t| t.archived)
716 .unwrap_or(folder_paths.is_empty());
717
718 let metadata = ThreadMetadata {
719 session_id,
720 agent_id,
721 title,
722 created_at: Some(created_at),
723 updated_at,
724 folder_paths,
725 main_worktree_paths,
726 archived,
727 };
728
729 self.save(metadata, cx);
730 }
731 AcpThreadEvent::TokenUsageUpdated
732 | AcpThreadEvent::SubagentSpawned(_)
733 | AcpThreadEvent::PromptCapabilitiesUpdated
734 | AcpThreadEvent::AvailableCommandsUpdated(_)
735 | AcpThreadEvent::ModeUpdated(_)
736 | AcpThreadEvent::ConfigOptionsUpdated(_) => {}
737 }
738 }
739}
740
741impl Global for ThreadMetadataStore {}
742
743struct ThreadMetadataDb(ThreadSafeConnection);
744
745impl Domain for ThreadMetadataDb {
746 const NAME: &str = stringify!(ThreadMetadataDb);
747
748 const MIGRATIONS: &[&str] = &[
749 sql!(
750 CREATE TABLE IF NOT EXISTS sidebar_threads(
751 session_id TEXT PRIMARY KEY,
752 agent_id TEXT,
753 title TEXT NOT NULL,
754 updated_at TEXT NOT NULL,
755 created_at TEXT,
756 folder_paths TEXT,
757 folder_paths_order TEXT
758 ) STRICT;
759 ),
760 sql!(ALTER TABLE sidebar_threads ADD COLUMN archived INTEGER DEFAULT 0),
761 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths TEXT),
762 sql!(ALTER TABLE sidebar_threads ADD COLUMN main_worktree_paths_order TEXT),
763 sql!(
764 CREATE TABLE IF NOT EXISTS archived_git_worktrees(
765 id INTEGER PRIMARY KEY,
766 worktree_path TEXT NOT NULL,
767 main_repo_path TEXT NOT NULL,
768 branch_name TEXT,
769 staged_commit_hash TEXT,
770 unstaged_commit_hash TEXT,
771 original_commit_hash TEXT
772 ) STRICT;
773
774 CREATE TABLE IF NOT EXISTS thread_archived_worktrees(
775 session_id TEXT NOT NULL,
776 archived_worktree_id INTEGER NOT NULL REFERENCES archived_git_worktrees(id),
777 PRIMARY KEY (session_id, archived_worktree_id)
778 ) STRICT;
779 ),
780 ];
781}
782
783db::static_connection!(ThreadMetadataDb, []);
784
785impl ThreadMetadataDb {
786 pub fn list_ids(&self) -> anyhow::Result<Vec<Arc<str>>> {
787 self.select::<Arc<str>>(
788 "SELECT session_id FROM sidebar_threads \
789 ORDER BY updated_at DESC",
790 )?()
791 }
792
793 /// List all sidebar thread metadata, ordered by updated_at descending.
794 pub fn list(&self) -> anyhow::Result<Vec<ThreadMetadata>> {
795 self.select::<ThreadMetadata>(
796 "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order \
797 FROM sidebar_threads \
798 ORDER BY updated_at DESC"
799 )?()
800 }
801
802 /// Upsert metadata for a thread.
803 pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
804 let id = row.session_id.0.clone();
805 let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
806 None
807 } else {
808 Some(row.agent_id.to_string())
809 };
810 let title = row.title.to_string();
811 let updated_at = row.updated_at.to_rfc3339();
812 let created_at = row.created_at.map(|dt| dt.to_rfc3339());
813 let serialized = row.folder_paths.serialize();
814 let (folder_paths, folder_paths_order) = if row.folder_paths.is_empty() {
815 (None, None)
816 } else {
817 (Some(serialized.paths), Some(serialized.order))
818 };
819 let main_serialized = row.main_worktree_paths.serialize();
820 let (main_worktree_paths, main_worktree_paths_order) = if row.main_worktree_paths.is_empty()
821 {
822 (None, None)
823 } else {
824 (Some(main_serialized.paths), Some(main_serialized.order))
825 };
826 let archived = row.archived;
827
828 self.write(move |conn| {
829 let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order) \
830 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) \
831 ON CONFLICT(session_id) DO UPDATE SET \
832 agent_id = excluded.agent_id, \
833 title = excluded.title, \
834 updated_at = excluded.updated_at, \
835 created_at = excluded.created_at, \
836 folder_paths = excluded.folder_paths, \
837 folder_paths_order = excluded.folder_paths_order, \
838 archived = excluded.archived, \
839 main_worktree_paths = excluded.main_worktree_paths, \
840 main_worktree_paths_order = excluded.main_worktree_paths_order";
841 let mut stmt = Statement::prepare(conn, sql)?;
842 let mut i = stmt.bind(&id, 1)?;
843 i = stmt.bind(&agent_id, i)?;
844 i = stmt.bind(&title, i)?;
845 i = stmt.bind(&updated_at, i)?;
846 i = stmt.bind(&created_at, i)?;
847 i = stmt.bind(&folder_paths, i)?;
848 i = stmt.bind(&folder_paths_order, i)?;
849 i = stmt.bind(&archived, i)?;
850 i = stmt.bind(&main_worktree_paths, i)?;
851 stmt.bind(&main_worktree_paths_order, i)?;
852 stmt.exec()
853 })
854 .await
855 }
856
857 /// Delete metadata for a single thread.
858 pub async fn delete(&self, session_id: acp::SessionId) -> anyhow::Result<()> {
859 let id = session_id.0.clone();
860 self.write(move |conn| {
861 let mut stmt =
862 Statement::prepare(conn, "DELETE FROM sidebar_threads WHERE session_id = ?")?;
863 stmt.bind(&id, 1)?;
864 stmt.exec()
865 })
866 .await
867 }
868
869 pub async fn create_archived_worktree(
870 &self,
871 worktree_path: String,
872 main_repo_path: String,
873 branch_name: Option<String>,
874 staged_commit_hash: String,
875 unstaged_commit_hash: String,
876 original_commit_hash: String,
877 ) -> anyhow::Result<i64> {
878 self.write(move |conn| {
879 let mut stmt = Statement::prepare(
880 conn,
881 "INSERT INTO archived_git_worktrees(worktree_path, main_repo_path, branch_name, staged_commit_hash, unstaged_commit_hash, original_commit_hash) \
882 VALUES (?1, ?2, ?3, ?4, ?5, ?6) \
883 RETURNING id",
884 )?;
885 let mut i = stmt.bind(&worktree_path, 1)?;
886 i = stmt.bind(&main_repo_path, i)?;
887 i = stmt.bind(&branch_name, i)?;
888 i = stmt.bind(&staged_commit_hash, i)?;
889 i = stmt.bind(&unstaged_commit_hash, i)?;
890 stmt.bind(&original_commit_hash, i)?;
891 stmt.maybe_row::<i64>()?.context("expected RETURNING id")
892 })
893 .await
894 }
895
896 pub async fn link_thread_to_archived_worktree(
897 &self,
898 session_id: String,
899 archived_worktree_id: i64,
900 ) -> anyhow::Result<()> {
901 self.write(move |conn| {
902 let mut stmt = Statement::prepare(
903 conn,
904 "INSERT INTO thread_archived_worktrees(session_id, archived_worktree_id) \
905 VALUES (?1, ?2)",
906 )?;
907 let i = stmt.bind(&session_id, 1)?;
908 stmt.bind(&archived_worktree_id, i)?;
909 stmt.exec()
910 })
911 .await
912 }
913
914 pub async fn get_archived_worktrees_for_thread(
915 &self,
916 session_id: String,
917 ) -> anyhow::Result<Vec<ArchivedGitWorktree>> {
918 self.select_bound::<String, ArchivedGitWorktree>(
919 "SELECT a.id, a.worktree_path, a.main_repo_path, a.branch_name, a.staged_commit_hash, a.unstaged_commit_hash, a.original_commit_hash \
920 FROM archived_git_worktrees a \
921 JOIN thread_archived_worktrees t ON a.id = t.archived_worktree_id \
922 WHERE t.session_id = ?1",
923 )?(session_id)
924 }
925
926 pub async fn delete_archived_worktree(&self, id: i64) -> anyhow::Result<()> {
927 self.write(move |conn| {
928 let mut stmt = Statement::prepare(
929 conn,
930 "DELETE FROM thread_archived_worktrees WHERE archived_worktree_id = ?",
931 )?;
932 stmt.bind(&id, 1)?;
933 stmt.exec()?;
934
935 let mut stmt =
936 Statement::prepare(conn, "DELETE FROM archived_git_worktrees WHERE id = ?")?;
937 stmt.bind(&id, 1)?;
938 stmt.exec()
939 })
940 .await
941 }
942
943 pub async fn unlink_thread_from_all_archived_worktrees(
944 &self,
945 session_id: String,
946 ) -> anyhow::Result<()> {
947 self.write(move |conn| {
948 let mut stmt = Statement::prepare(
949 conn,
950 "DELETE FROM thread_archived_worktrees WHERE session_id = ?",
951 )?;
952 stmt.bind(&session_id, 1)?;
953 stmt.exec()
954 })
955 .await
956 }
957
958 pub async fn is_archived_worktree_referenced(
959 &self,
960 archived_worktree_id: i64,
961 ) -> anyhow::Result<bool> {
962 self.select_row_bound::<i64, i64>(
963 "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
964 )?(archived_worktree_id)
965 .map(|count| count.unwrap_or(0) > 0)
966 }
967}
968
969impl Column for ThreadMetadata {
970 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
971 let (id, next): (Arc<str>, i32) = Column::column(statement, start_index)?;
972 let (agent_id, next): (Option<String>, i32) = Column::column(statement, next)?;
973 let (title, next): (String, i32) = Column::column(statement, next)?;
974 let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
975 let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
976 let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
977 let (folder_paths_order_str, next): (Option<String>, i32) =
978 Column::column(statement, next)?;
979 let (archived, next): (bool, i32) = Column::column(statement, next)?;
980 let (main_worktree_paths_str, next): (Option<String>, i32) =
981 Column::column(statement, next)?;
982 let (main_worktree_paths_order_str, next): (Option<String>, i32) =
983 Column::column(statement, next)?;
984
985 let agent_id = agent_id
986 .map(|id| AgentId::new(id))
987 .unwrap_or(ZED_AGENT_ID.clone());
988
989 let updated_at = DateTime::parse_from_rfc3339(&updated_at_str)?.with_timezone(&Utc);
990 let created_at = created_at_str
991 .as_deref()
992 .map(DateTime::parse_from_rfc3339)
993 .transpose()?
994 .map(|dt| dt.with_timezone(&Utc));
995
996 let folder_paths = folder_paths_str
997 .map(|paths| {
998 PathList::deserialize(&util::path_list::SerializedPathList {
999 paths,
1000 order: folder_paths_order_str.unwrap_or_default(),
1001 })
1002 })
1003 .unwrap_or_default();
1004
1005 let main_worktree_paths = main_worktree_paths_str
1006 .map(|paths| {
1007 PathList::deserialize(&util::path_list::SerializedPathList {
1008 paths,
1009 order: main_worktree_paths_order_str.unwrap_or_default(),
1010 })
1011 })
1012 .unwrap_or_default();
1013
1014 Ok((
1015 ThreadMetadata {
1016 session_id: acp::SessionId::new(id),
1017 agent_id,
1018 title: title.into(),
1019 updated_at,
1020 created_at,
1021 folder_paths,
1022 main_worktree_paths,
1023 archived,
1024 },
1025 next,
1026 ))
1027 }
1028}
1029
1030impl Column for ArchivedGitWorktree {
1031 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
1032 let (id, next): (i64, i32) = Column::column(statement, start_index)?;
1033 let (worktree_path_str, next): (String, i32) = Column::column(statement, next)?;
1034 let (main_repo_path_str, next): (String, i32) = Column::column(statement, next)?;
1035 let (branch_name, next): (Option<String>, i32) = Column::column(statement, next)?;
1036 let (staged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1037 let (unstaged_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1038 let (original_commit_hash, next): (String, i32) = Column::column(statement, next)?;
1039
1040 Ok((
1041 ArchivedGitWorktree {
1042 id,
1043 worktree_path: PathBuf::from(worktree_path_str),
1044 main_repo_path: PathBuf::from(main_repo_path_str),
1045 branch_name,
1046 staged_commit_hash,
1047 unstaged_commit_hash,
1048 original_commit_hash,
1049 },
1050 next,
1051 ))
1052 }
1053}
1054
1055#[cfg(test)]
1056mod tests {
1057 use super::*;
1058 use acp_thread::{AgentConnection, StubAgentConnection};
1059 use action_log::ActionLog;
1060 use agent::DbThread;
1061 use agent_client_protocol as acp;
1062
1063 use gpui::TestAppContext;
1064 use project::FakeFs;
1065 use project::Project;
1066 use std::path::Path;
1067 use std::rc::Rc;
1068
1069 fn make_db_thread(title: &str, updated_at: DateTime<Utc>) -> DbThread {
1070 DbThread {
1071 title: title.to_string().into(),
1072 messages: Vec::new(),
1073 updated_at,
1074 detailed_summary: None,
1075 initial_project_snapshot: None,
1076 cumulative_token_usage: Default::default(),
1077 request_token_usage: Default::default(),
1078 model: None,
1079 profile: None,
1080 imported: false,
1081 subagent_context: None,
1082 speed: None,
1083 thinking_enabled: false,
1084 thinking_effort: None,
1085 draft_prompt: None,
1086 ui_scroll_position: None,
1087 }
1088 }
1089
1090 fn make_metadata(
1091 session_id: &str,
1092 title: &str,
1093 updated_at: DateTime<Utc>,
1094 folder_paths: PathList,
1095 ) -> ThreadMetadata {
1096 ThreadMetadata {
1097 archived: false,
1098 session_id: acp::SessionId::new(session_id),
1099 agent_id: agent::ZED_AGENT_ID.clone(),
1100 title: title.to_string().into(),
1101 updated_at,
1102 created_at: Some(updated_at),
1103 folder_paths,
1104 main_worktree_paths: PathList::default(),
1105 }
1106 }
1107
1108 fn init_test(cx: &mut TestAppContext) {
1109 cx.update(|cx| {
1110 let settings_store = settings::SettingsStore::test(cx);
1111 cx.set_global(settings_store);
1112 ThreadMetadataStore::init_global(cx);
1113 ThreadStore::init_global(cx);
1114 });
1115 cx.run_until_parked();
1116 }
1117
1118 #[gpui::test]
1119 async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) {
1120 let first_paths = PathList::new(&[Path::new("/project-a")]);
1121 let second_paths = PathList::new(&[Path::new("/project-b")]);
1122 let now = Utc::now();
1123 let older = now - chrono::Duration::seconds(1);
1124
1125 let thread = std::thread::current();
1126 let test_name = thread.name().unwrap_or("unknown_test");
1127 let db_name = format!("THREAD_METADATA_DB_{}", test_name);
1128 let db = ThreadMetadataDb(smol::block_on(db::open_test_db::<ThreadMetadataDb>(
1129 &db_name,
1130 )));
1131
1132 db.save(make_metadata(
1133 "session-1",
1134 "First Thread",
1135 now,
1136 first_paths.clone(),
1137 ))
1138 .await
1139 .unwrap();
1140 db.save(make_metadata(
1141 "session-2",
1142 "Second Thread",
1143 older,
1144 second_paths.clone(),
1145 ))
1146 .await
1147 .unwrap();
1148
1149 cx.update(|cx| {
1150 let settings_store = settings::SettingsStore::test(cx);
1151 cx.set_global(settings_store);
1152 ThreadMetadataStore::init_global(cx);
1153 });
1154
1155 cx.run_until_parked();
1156
1157 cx.update(|cx| {
1158 let store = ThreadMetadataStore::global(cx);
1159 let store = store.read(cx);
1160
1161 let entry_ids = store
1162 .entry_ids()
1163 .map(|session_id| session_id.0.to_string())
1164 .collect::<Vec<_>>();
1165 assert_eq!(entry_ids.len(), 2);
1166 assert!(entry_ids.contains(&"session-1".to_string()));
1167 assert!(entry_ids.contains(&"session-2".to_string()));
1168
1169 let first_path_entries = store
1170 .entries_for_path(&first_paths)
1171 .map(|entry| entry.session_id.0.to_string())
1172 .collect::<Vec<_>>();
1173 assert_eq!(first_path_entries, vec!["session-1"]);
1174
1175 let second_path_entries = store
1176 .entries_for_path(&second_paths)
1177 .map(|entry| entry.session_id.0.to_string())
1178 .collect::<Vec<_>>();
1179 assert_eq!(second_path_entries, vec!["session-2"]);
1180 });
1181 }
1182
1183 #[gpui::test]
1184 async fn test_store_cache_updates_after_save_and_delete(cx: &mut TestAppContext) {
1185 init_test(cx);
1186
1187 let first_paths = PathList::new(&[Path::new("/project-a")]);
1188 let second_paths = PathList::new(&[Path::new("/project-b")]);
1189 let initial_time = Utc::now();
1190 let updated_time = initial_time + chrono::Duration::seconds(1);
1191
1192 let initial_metadata = make_metadata(
1193 "session-1",
1194 "First Thread",
1195 initial_time,
1196 first_paths.clone(),
1197 );
1198
1199 let second_metadata = make_metadata(
1200 "session-2",
1201 "Second Thread",
1202 initial_time,
1203 second_paths.clone(),
1204 );
1205
1206 cx.update(|cx| {
1207 let store = ThreadMetadataStore::global(cx);
1208 store.update(cx, |store, cx| {
1209 store.save(initial_metadata, cx);
1210 store.save(second_metadata, cx);
1211 });
1212 });
1213
1214 cx.run_until_parked();
1215
1216 cx.update(|cx| {
1217 let store = ThreadMetadataStore::global(cx);
1218 let store = store.read(cx);
1219
1220 let first_path_entries = store
1221 .entries_for_path(&first_paths)
1222 .map(|entry| entry.session_id.0.to_string())
1223 .collect::<Vec<_>>();
1224 assert_eq!(first_path_entries, vec!["session-1"]);
1225
1226 let second_path_entries = store
1227 .entries_for_path(&second_paths)
1228 .map(|entry| entry.session_id.0.to_string())
1229 .collect::<Vec<_>>();
1230 assert_eq!(second_path_entries, vec!["session-2"]);
1231 });
1232
1233 let moved_metadata = make_metadata(
1234 "session-1",
1235 "First Thread",
1236 updated_time,
1237 second_paths.clone(),
1238 );
1239
1240 cx.update(|cx| {
1241 let store = ThreadMetadataStore::global(cx);
1242 store.update(cx, |store, cx| {
1243 store.save(moved_metadata, cx);
1244 });
1245 });
1246
1247 cx.run_until_parked();
1248
1249 cx.update(|cx| {
1250 let store = ThreadMetadataStore::global(cx);
1251 let store = store.read(cx);
1252
1253 let entry_ids = store
1254 .entry_ids()
1255 .map(|session_id| session_id.0.to_string())
1256 .collect::<Vec<_>>();
1257 assert_eq!(entry_ids.len(), 2);
1258 assert!(entry_ids.contains(&"session-1".to_string()));
1259 assert!(entry_ids.contains(&"session-2".to_string()));
1260
1261 let first_path_entries = store
1262 .entries_for_path(&first_paths)
1263 .map(|entry| entry.session_id.0.to_string())
1264 .collect::<Vec<_>>();
1265 assert!(first_path_entries.is_empty());
1266
1267 let second_path_entries = store
1268 .entries_for_path(&second_paths)
1269 .map(|entry| entry.session_id.0.to_string())
1270 .collect::<Vec<_>>();
1271 assert_eq!(second_path_entries.len(), 2);
1272 assert!(second_path_entries.contains(&"session-1".to_string()));
1273 assert!(second_path_entries.contains(&"session-2".to_string()));
1274 });
1275
1276 cx.update(|cx| {
1277 let store = ThreadMetadataStore::global(cx);
1278 store.update(cx, |store, cx| {
1279 store.delete(acp::SessionId::new("session-2"), cx);
1280 });
1281 });
1282
1283 cx.run_until_parked();
1284
1285 cx.update(|cx| {
1286 let store = ThreadMetadataStore::global(cx);
1287 let store = store.read(cx);
1288
1289 let entry_ids = store
1290 .entry_ids()
1291 .map(|session_id| session_id.0.to_string())
1292 .collect::<Vec<_>>();
1293 assert_eq!(entry_ids, vec!["session-1"]);
1294
1295 let second_path_entries = store
1296 .entries_for_path(&second_paths)
1297 .map(|entry| entry.session_id.0.to_string())
1298 .collect::<Vec<_>>();
1299 assert_eq!(second_path_entries, vec!["session-1"]);
1300 });
1301 }
1302
1303 #[gpui::test]
1304 async fn test_migrate_thread_metadata_migrates_only_missing_threads(cx: &mut TestAppContext) {
1305 init_test(cx);
1306
1307 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1308 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1309 let now = Utc::now();
1310
1311 let existing_metadata = ThreadMetadata {
1312 session_id: acp::SessionId::new("a-session-0"),
1313 agent_id: agent::ZED_AGENT_ID.clone(),
1314 title: "Existing Metadata".into(),
1315 updated_at: now - chrono::Duration::seconds(10),
1316 created_at: Some(now - chrono::Duration::seconds(10)),
1317 folder_paths: project_a_paths.clone(),
1318 main_worktree_paths: PathList::default(),
1319 archived: false,
1320 };
1321
1322 cx.update(|cx| {
1323 let store = ThreadMetadataStore::global(cx);
1324 store.update(cx, |store, cx| {
1325 store.save(existing_metadata, cx);
1326 });
1327 });
1328 cx.run_until_parked();
1329
1330 let threads_to_save = vec![
1331 (
1332 "a-session-0",
1333 "Thread A0 From Native Store",
1334 project_a_paths.clone(),
1335 now,
1336 ),
1337 (
1338 "a-session-1",
1339 "Thread A1",
1340 project_a_paths.clone(),
1341 now + chrono::Duration::seconds(1),
1342 ),
1343 (
1344 "b-session-0",
1345 "Thread B0",
1346 project_b_paths.clone(),
1347 now + chrono::Duration::seconds(2),
1348 ),
1349 (
1350 "projectless",
1351 "Projectless",
1352 PathList::default(),
1353 now + chrono::Duration::seconds(3),
1354 ),
1355 ];
1356
1357 for (session_id, title, paths, updated_at) in &threads_to_save {
1358 let save_task = cx.update(|cx| {
1359 let thread_store = ThreadStore::global(cx);
1360 let session_id = session_id.to_string();
1361 let title = title.to_string();
1362 let paths = paths.clone();
1363 thread_store.update(cx, |store, cx| {
1364 store.save_thread(
1365 acp::SessionId::new(session_id),
1366 make_db_thread(&title, *updated_at),
1367 paths,
1368 cx,
1369 )
1370 })
1371 });
1372 save_task.await.unwrap();
1373 cx.run_until_parked();
1374 }
1375
1376 cx.update(|cx| migrate_thread_metadata(cx));
1377 cx.run_until_parked();
1378
1379 let list = cx.update(|cx| {
1380 let store = ThreadMetadataStore::global(cx);
1381 store.read(cx).entries().cloned().collect::<Vec<_>>()
1382 });
1383
1384 assert_eq!(list.len(), 4);
1385 assert!(
1386 list.iter()
1387 .all(|metadata| metadata.agent_id.as_ref() == agent::ZED_AGENT_ID.as_ref())
1388 );
1389
1390 let existing_metadata = list
1391 .iter()
1392 .find(|metadata| metadata.session_id.0.as_ref() == "a-session-0")
1393 .unwrap();
1394 assert_eq!(existing_metadata.title.as_ref(), "Existing Metadata");
1395 assert!(!existing_metadata.archived);
1396
1397 let migrated_session_ids = list
1398 .iter()
1399 .map(|metadata| metadata.session_id.0.as_ref())
1400 .collect::<Vec<_>>();
1401 assert!(migrated_session_ids.contains(&"a-session-1"));
1402 assert!(migrated_session_ids.contains(&"b-session-0"));
1403 assert!(migrated_session_ids.contains(&"projectless"));
1404
1405 let migrated_entries = list
1406 .iter()
1407 .filter(|metadata| metadata.session_id.0.as_ref() != "a-session-0")
1408 .collect::<Vec<_>>();
1409 assert!(migrated_entries.iter().all(|metadata| metadata.archived));
1410 }
1411
1412 #[gpui::test]
1413 async fn test_migrate_thread_metadata_noops_when_all_threads_already_exist(
1414 cx: &mut TestAppContext,
1415 ) {
1416 init_test(cx);
1417
1418 let project_paths = PathList::new(&[Path::new("/project-a")]);
1419 let existing_updated_at = Utc::now();
1420
1421 let existing_metadata = ThreadMetadata {
1422 session_id: acp::SessionId::new("existing-session"),
1423 agent_id: agent::ZED_AGENT_ID.clone(),
1424 title: "Existing Metadata".into(),
1425 updated_at: existing_updated_at,
1426 created_at: Some(existing_updated_at),
1427 folder_paths: project_paths.clone(),
1428 main_worktree_paths: PathList::default(),
1429 archived: false,
1430 };
1431
1432 cx.update(|cx| {
1433 let store = ThreadMetadataStore::global(cx);
1434 store.update(cx, |store, cx| {
1435 store.save(existing_metadata, cx);
1436 });
1437 });
1438 cx.run_until_parked();
1439
1440 let save_task = cx.update(|cx| {
1441 let thread_store = ThreadStore::global(cx);
1442 thread_store.update(cx, |store, cx| {
1443 store.save_thread(
1444 acp::SessionId::new("existing-session"),
1445 make_db_thread(
1446 "Updated Native Thread Title",
1447 existing_updated_at + chrono::Duration::seconds(1),
1448 ),
1449 project_paths.clone(),
1450 cx,
1451 )
1452 })
1453 });
1454 save_task.await.unwrap();
1455 cx.run_until_parked();
1456
1457 cx.update(|cx| migrate_thread_metadata(cx));
1458 cx.run_until_parked();
1459
1460 let list = cx.update(|cx| {
1461 let store = ThreadMetadataStore::global(cx);
1462 store.read(cx).entries().cloned().collect::<Vec<_>>()
1463 });
1464
1465 assert_eq!(list.len(), 1);
1466 assert_eq!(list[0].session_id.0.as_ref(), "existing-session");
1467 }
1468
1469 #[gpui::test]
1470 async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project(
1471 cx: &mut TestAppContext,
1472 ) {
1473 init_test(cx);
1474
1475 let project_a_paths = PathList::new(&[Path::new("/project-a")]);
1476 let project_b_paths = PathList::new(&[Path::new("/project-b")]);
1477 let now = Utc::now();
1478
1479 // Create 7 threads for project A and 3 for project B
1480 let mut threads_to_save = Vec::new();
1481 for i in 0..7 {
1482 threads_to_save.push((
1483 format!("a-session-{i}"),
1484 format!("Thread A{i}"),
1485 project_a_paths.clone(),
1486 now + chrono::Duration::seconds(i as i64),
1487 ));
1488 }
1489 for i in 0..3 {
1490 threads_to_save.push((
1491 format!("b-session-{i}"),
1492 format!("Thread B{i}"),
1493 project_b_paths.clone(),
1494 now + chrono::Duration::seconds(i as i64),
1495 ));
1496 }
1497
1498 for (session_id, title, paths, updated_at) in &threads_to_save {
1499 let save_task = cx.update(|cx| {
1500 let thread_store = ThreadStore::global(cx);
1501 let session_id = session_id.to_string();
1502 let title = title.to_string();
1503 let paths = paths.clone();
1504 thread_store.update(cx, |store, cx| {
1505 store.save_thread(
1506 acp::SessionId::new(session_id),
1507 make_db_thread(&title, *updated_at),
1508 paths,
1509 cx,
1510 )
1511 })
1512 });
1513 save_task.await.unwrap();
1514 cx.run_until_parked();
1515 }
1516
1517 cx.update(|cx| migrate_thread_metadata(cx));
1518 cx.run_until_parked();
1519
1520 let list = cx.update(|cx| {
1521 let store = ThreadMetadataStore::global(cx);
1522 store.read(cx).entries().cloned().collect::<Vec<_>>()
1523 });
1524
1525 assert_eq!(list.len(), 10);
1526
1527 // Project A: 5 most recent should be unarchived, 2 oldest should be archived
1528 let mut project_a_entries: Vec<_> = list
1529 .iter()
1530 .filter(|m| m.folder_paths == project_a_paths)
1531 .collect();
1532 assert_eq!(project_a_entries.len(), 7);
1533 project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
1534
1535 for entry in &project_a_entries[..5] {
1536 assert!(
1537 !entry.archived,
1538 "Expected {} to be unarchived (top 5 most recent)",
1539 entry.session_id.0
1540 );
1541 }
1542 for entry in &project_a_entries[5..] {
1543 assert!(
1544 entry.archived,
1545 "Expected {} to be archived (older than top 5)",
1546 entry.session_id.0
1547 );
1548 }
1549
1550 // Project B: all 3 should be unarchived (under the limit)
1551 let project_b_entries: Vec<_> = list
1552 .iter()
1553 .filter(|m| m.folder_paths == project_b_paths)
1554 .collect();
1555 assert_eq!(project_b_entries.len(), 3);
1556 assert!(project_b_entries.iter().all(|m| !m.archived));
1557 }
1558
1559 #[gpui::test]
1560 async fn test_empty_thread_events_do_not_create_metadata(cx: &mut TestAppContext) {
1561 init_test(cx);
1562
1563 let fs = FakeFs::new(cx.executor());
1564 let project = Project::test(fs, None::<&Path>, cx).await;
1565 let connection = Rc::new(StubAgentConnection::new());
1566
1567 let thread = cx
1568 .update(|cx| {
1569 connection
1570 .clone()
1571 .new_session(project.clone(), PathList::default(), cx)
1572 })
1573 .await
1574 .unwrap();
1575 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1576
1577 cx.update(|cx| {
1578 thread.update(cx, |thread, cx| {
1579 thread.set_title("Draft Thread".into(), cx).detach();
1580 });
1581 });
1582 cx.run_until_parked();
1583
1584 let metadata_ids = cx.update(|cx| {
1585 ThreadMetadataStore::global(cx)
1586 .read(cx)
1587 .entry_ids()
1588 .collect::<Vec<_>>()
1589 });
1590 assert!(
1591 metadata_ids.is_empty(),
1592 "expected empty draft thread title updates to be ignored"
1593 );
1594
1595 cx.update(|cx| {
1596 thread.update(cx, |thread, cx| {
1597 thread.push_user_content_block(None, "Hello".into(), cx);
1598 });
1599 });
1600 cx.run_until_parked();
1601
1602 let metadata_ids = cx.update(|cx| {
1603 ThreadMetadataStore::global(cx)
1604 .read(cx)
1605 .entry_ids()
1606 .collect::<Vec<_>>()
1607 });
1608 assert_eq!(metadata_ids, vec![session_id]);
1609 }
1610
1611 #[gpui::test]
1612 async fn test_nonempty_thread_metadata_preserved_when_thread_released(cx: &mut TestAppContext) {
1613 init_test(cx);
1614
1615 let fs = FakeFs::new(cx.executor());
1616 let project = Project::test(fs, None::<&Path>, cx).await;
1617 let connection = Rc::new(StubAgentConnection::new());
1618
1619 let thread = cx
1620 .update(|cx| {
1621 connection
1622 .clone()
1623 .new_session(project.clone(), PathList::default(), cx)
1624 })
1625 .await
1626 .unwrap();
1627 let session_id = cx.read(|cx| thread.read(cx).session_id().clone());
1628
1629 cx.update(|cx| {
1630 thread.update(cx, |thread, cx| {
1631 thread.push_user_content_block(None, "Hello".into(), cx);
1632 });
1633 });
1634 cx.run_until_parked();
1635
1636 let metadata_ids = cx.update(|cx| {
1637 ThreadMetadataStore::global(cx)
1638 .read(cx)
1639 .entry_ids()
1640 .collect::<Vec<_>>()
1641 });
1642 assert_eq!(metadata_ids, vec![session_id.clone()]);
1643
1644 drop(thread);
1645 cx.update(|_| {});
1646 cx.run_until_parked();
1647
1648 let metadata_ids = cx.update(|cx| {
1649 ThreadMetadataStore::global(cx)
1650 .read(cx)
1651 .entry_ids()
1652 .collect::<Vec<_>>()
1653 });
1654 assert_eq!(metadata_ids, vec![session_id]);
1655 }
1656
1657 #[gpui::test]
1658 async fn test_threads_without_project_association_are_archived_by_default(
1659 cx: &mut TestAppContext,
1660 ) {
1661 init_test(cx);
1662
1663 let fs = FakeFs::new(cx.executor());
1664 let project_without_worktree = Project::test(fs.clone(), None::<&Path>, cx).await;
1665 let project_with_worktree = Project::test(fs, [Path::new("/project-a")], cx).await;
1666 let connection = Rc::new(StubAgentConnection::new());
1667
1668 let thread_without_worktree = cx
1669 .update(|cx| {
1670 connection.clone().new_session(
1671 project_without_worktree.clone(),
1672 PathList::default(),
1673 cx,
1674 )
1675 })
1676 .await
1677 .unwrap();
1678 let session_without_worktree =
1679 cx.read(|cx| thread_without_worktree.read(cx).session_id().clone());
1680
1681 cx.update(|cx| {
1682 thread_without_worktree.update(cx, |thread, cx| {
1683 thread.push_user_content_block(None, "content".into(), cx);
1684 thread.set_title("No Project Thread".into(), cx).detach();
1685 });
1686 });
1687 cx.run_until_parked();
1688
1689 let thread_with_worktree = cx
1690 .update(|cx| {
1691 connection.clone().new_session(
1692 project_with_worktree.clone(),
1693 PathList::default(),
1694 cx,
1695 )
1696 })
1697 .await
1698 .unwrap();
1699 let session_with_worktree =
1700 cx.read(|cx| thread_with_worktree.read(cx).session_id().clone());
1701
1702 cx.update(|cx| {
1703 thread_with_worktree.update(cx, |thread, cx| {
1704 thread.push_user_content_block(None, "content".into(), cx);
1705 thread.set_title("Project Thread".into(), cx).detach();
1706 });
1707 });
1708 cx.run_until_parked();
1709
1710 cx.update(|cx| {
1711 let store = ThreadMetadataStore::global(cx);
1712 let store = store.read(cx);
1713
1714 let without_worktree = store
1715 .entry(&session_without_worktree)
1716 .expect("missing metadata for thread without project association");
1717 assert!(without_worktree.folder_paths.is_empty());
1718 assert!(
1719 without_worktree.archived,
1720 "expected thread without project association to be archived"
1721 );
1722
1723 let with_worktree = store
1724 .entry(&session_with_worktree)
1725 .expect("missing metadata for thread with project association");
1726 assert_eq!(
1727 with_worktree.folder_paths,
1728 PathList::new(&[Path::new("/project-a")])
1729 );
1730 assert!(
1731 !with_worktree.archived,
1732 "expected thread with project association to remain unarchived"
1733 );
1734 });
1735 }
1736
1737 #[gpui::test]
1738 async fn test_subagent_threads_excluded_from_sidebar_metadata(cx: &mut TestAppContext) {
1739 init_test(cx);
1740
1741 let fs = FakeFs::new(cx.executor());
1742 let project = Project::test(fs, None::<&Path>, cx).await;
1743 let connection = Rc::new(StubAgentConnection::new());
1744
1745 // Create a regular (non-subagent) AcpThread.
1746 let regular_thread = cx
1747 .update(|cx| {
1748 connection
1749 .clone()
1750 .new_session(project.clone(), PathList::default(), cx)
1751 })
1752 .await
1753 .unwrap();
1754
1755 let regular_session_id = cx.read(|cx| regular_thread.read(cx).session_id().clone());
1756
1757 // Set a title on the regular thread to trigger a save via handle_thread_update.
1758 cx.update(|cx| {
1759 regular_thread.update(cx, |thread, cx| {
1760 thread.push_user_content_block(None, "content".into(), cx);
1761 thread.set_title("Regular Thread".into(), cx).detach();
1762 });
1763 });
1764 cx.run_until_parked();
1765
1766 // Create a subagent AcpThread
1767 let subagent_session_id = acp::SessionId::new("subagent-session");
1768 let subagent_thread = cx.update(|cx| {
1769 let action_log = cx.new(|_| ActionLog::new(project.clone()));
1770 cx.new(|cx| {
1771 acp_thread::AcpThread::new(
1772 Some(regular_session_id.clone()),
1773 Some("Subagent Thread".into()),
1774 None,
1775 connection.clone(),
1776 project.clone(),
1777 action_log,
1778 subagent_session_id.clone(),
1779 watch::Receiver::constant(acp::PromptCapabilities::new()),
1780 cx,
1781 )
1782 })
1783 });
1784
1785 // Set a title on the subagent thread to trigger handle_thread_update.
1786 cx.update(|cx| {
1787 subagent_thread.update(cx, |thread, cx| {
1788 thread
1789 .set_title("Subagent Thread Title".into(), cx)
1790 .detach();
1791 });
1792 });
1793 cx.run_until_parked();
1794
1795 // List all metadata from the store cache.
1796 let list = cx.update(|cx| {
1797 let store = ThreadMetadataStore::global(cx);
1798 store.read(cx).entries().cloned().collect::<Vec<_>>()
1799 });
1800
1801 // The subagent thread should NOT appear in the sidebar metadata.
1802 // Only the regular thread should be listed.
1803 assert_eq!(
1804 list.len(),
1805 1,
1806 "Expected only the regular thread in sidebar metadata, \
1807 but found {} entries (subagent threads are leaking into the sidebar)",
1808 list.len(),
1809 );
1810 assert_eq!(list[0].session_id, regular_session_id);
1811 assert_eq!(list[0].title.as_ref(), "Regular Thread");
1812 }
1813
1814 #[test]
1815 fn test_dedup_db_operations_keeps_latest_operation_for_session() {
1816 let now = Utc::now();
1817
1818 let operations = vec![
1819 DbOperation::Upsert(make_metadata(
1820 "session-1",
1821 "First Thread",
1822 now,
1823 PathList::default(),
1824 )),
1825 DbOperation::Delete(acp::SessionId::new("session-1")),
1826 ];
1827
1828 let deduped = ThreadMetadataStore::dedup_db_operations(operations);
1829
1830 assert_eq!(deduped.len(), 1);
1831 assert_eq!(
1832 deduped[0],
1833 DbOperation::Delete(acp::SessionId::new("session-1"))
1834 );
1835 }
1836
1837 #[test]
1838 fn test_dedup_db_operations_keeps_latest_insert_for_same_session() {
1839 let now = Utc::now();
1840 let later = now + chrono::Duration::seconds(1);
1841
1842 let old_metadata = make_metadata("session-1", "Old Title", now, PathList::default());
1843 let new_metadata = make_metadata("session-1", "New Title", later, PathList::default());
1844
1845 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1846 DbOperation::Upsert(old_metadata),
1847 DbOperation::Upsert(new_metadata.clone()),
1848 ]);
1849
1850 assert_eq!(deduped.len(), 1);
1851 assert_eq!(deduped[0], DbOperation::Upsert(new_metadata));
1852 }
1853
1854 #[test]
1855 fn test_dedup_db_operations_preserves_distinct_sessions() {
1856 let now = Utc::now();
1857
1858 let metadata1 = make_metadata("session-1", "First Thread", now, PathList::default());
1859 let metadata2 = make_metadata("session-2", "Second Thread", now, PathList::default());
1860 let deduped = ThreadMetadataStore::dedup_db_operations(vec![
1861 DbOperation::Upsert(metadata1.clone()),
1862 DbOperation::Upsert(metadata2.clone()),
1863 ]);
1864
1865 assert_eq!(deduped.len(), 2);
1866 assert!(deduped.contains(&DbOperation::Upsert(metadata1)));
1867 assert!(deduped.contains(&DbOperation::Upsert(metadata2)));
1868 }
1869
1870 #[gpui::test]
1871 async fn test_archive_and_unarchive_thread(cx: &mut TestAppContext) {
1872 init_test(cx);
1873
1874 let paths = PathList::new(&[Path::new("/project-a")]);
1875 let now = Utc::now();
1876 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
1877
1878 cx.update(|cx| {
1879 let store = ThreadMetadataStore::global(cx);
1880 store.update(cx, |store, cx| {
1881 store.save(metadata, cx);
1882 });
1883 });
1884
1885 cx.run_until_parked();
1886
1887 cx.update(|cx| {
1888 let store = ThreadMetadataStore::global(cx);
1889 let store = store.read(cx);
1890
1891 let path_entries = store
1892 .entries_for_path(&paths)
1893 .map(|e| e.session_id.0.to_string())
1894 .collect::<Vec<_>>();
1895 assert_eq!(path_entries, vec!["session-1"]);
1896
1897 let archived = store
1898 .archived_entries()
1899 .map(|e| e.session_id.0.to_string())
1900 .collect::<Vec<_>>();
1901 assert!(archived.is_empty());
1902 });
1903
1904 cx.update(|cx| {
1905 let store = ThreadMetadataStore::global(cx);
1906 store.update(cx, |store, cx| {
1907 store.archive(&acp::SessionId::new("session-1"), None, cx);
1908 });
1909 });
1910
1911 // Thread 1 should now be archived
1912 cx.run_until_parked();
1913
1914 cx.update(|cx| {
1915 let store = ThreadMetadataStore::global(cx);
1916 let store = store.read(cx);
1917
1918 let path_entries = store
1919 .entries_for_path(&paths)
1920 .map(|e| e.session_id.0.to_string())
1921 .collect::<Vec<_>>();
1922 assert!(path_entries.is_empty());
1923
1924 let archived = store.archived_entries().collect::<Vec<_>>();
1925 assert_eq!(archived.len(), 1);
1926 assert_eq!(archived[0].session_id.0.as_ref(), "session-1");
1927 assert!(archived[0].archived);
1928 });
1929
1930 cx.update(|cx| {
1931 let store = ThreadMetadataStore::global(cx);
1932 store.update(cx, |store, cx| {
1933 store.unarchive(&acp::SessionId::new("session-1"), cx);
1934 });
1935 });
1936
1937 cx.run_until_parked();
1938
1939 cx.update(|cx| {
1940 let store = ThreadMetadataStore::global(cx);
1941 let store = store.read(cx);
1942
1943 let path_entries = store
1944 .entries_for_path(&paths)
1945 .map(|e| e.session_id.0.to_string())
1946 .collect::<Vec<_>>();
1947 assert_eq!(path_entries, vec!["session-1"]);
1948
1949 let archived = store
1950 .archived_entries()
1951 .map(|e| e.session_id.0.to_string())
1952 .collect::<Vec<_>>();
1953 assert!(archived.is_empty());
1954 });
1955 }
1956
1957 #[gpui::test]
1958 async fn test_entries_for_path_excludes_archived(cx: &mut TestAppContext) {
1959 init_test(cx);
1960
1961 let paths = PathList::new(&[Path::new("/project-a")]);
1962 let now = Utc::now();
1963
1964 let metadata1 = make_metadata("session-1", "Active Thread", now, paths.clone());
1965 let metadata2 = make_metadata(
1966 "session-2",
1967 "Archived Thread",
1968 now - chrono::Duration::seconds(1),
1969 paths.clone(),
1970 );
1971
1972 cx.update(|cx| {
1973 let store = ThreadMetadataStore::global(cx);
1974 store.update(cx, |store, cx| {
1975 store.save(metadata1, cx);
1976 store.save(metadata2, cx);
1977 });
1978 });
1979
1980 cx.run_until_parked();
1981
1982 cx.update(|cx| {
1983 let store = ThreadMetadataStore::global(cx);
1984 store.update(cx, |store, cx| {
1985 store.archive(&acp::SessionId::new("session-2"), None, cx);
1986 });
1987 });
1988
1989 cx.run_until_parked();
1990
1991 cx.update(|cx| {
1992 let store = ThreadMetadataStore::global(cx);
1993 let store = store.read(cx);
1994
1995 let path_entries = store
1996 .entries_for_path(&paths)
1997 .map(|e| e.session_id.0.to_string())
1998 .collect::<Vec<_>>();
1999 assert_eq!(path_entries, vec!["session-1"]);
2000
2001 let all_entries = store
2002 .entries()
2003 .map(|e| e.session_id.0.to_string())
2004 .collect::<Vec<_>>();
2005 assert_eq!(all_entries.len(), 2);
2006 assert!(all_entries.contains(&"session-1".to_string()));
2007 assert!(all_entries.contains(&"session-2".to_string()));
2008
2009 let archived = store
2010 .archived_entries()
2011 .map(|e| e.session_id.0.to_string())
2012 .collect::<Vec<_>>();
2013 assert_eq!(archived, vec!["session-2"]);
2014 });
2015 }
2016
2017 #[gpui::test]
2018 async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
2019 init_test(cx);
2020
2021 let paths = PathList::new(&[Path::new("/project-a")]);
2022 let now = Utc::now();
2023
2024 let m1 = make_metadata("session-1", "Thread One", now, paths.clone());
2025 let m2 = make_metadata(
2026 "session-2",
2027 "Thread Two",
2028 now - chrono::Duration::seconds(1),
2029 paths.clone(),
2030 );
2031 let m3 = make_metadata(
2032 "session-3",
2033 "Thread Three",
2034 now - chrono::Duration::seconds(2),
2035 paths,
2036 );
2037
2038 cx.update(|cx| {
2039 let store = ThreadMetadataStore::global(cx);
2040 store.update(cx, |store, cx| {
2041 store.save_all(vec![m1, m2, m3], cx);
2042 });
2043 });
2044
2045 cx.run_until_parked();
2046
2047 cx.update(|cx| {
2048 let store = ThreadMetadataStore::global(cx);
2049 let store = store.read(cx);
2050
2051 let all_entries = store
2052 .entries()
2053 .map(|e| e.session_id.0.to_string())
2054 .collect::<Vec<_>>();
2055 assert_eq!(all_entries.len(), 3);
2056 assert!(all_entries.contains(&"session-1".to_string()));
2057 assert!(all_entries.contains(&"session-2".to_string()));
2058 assert!(all_entries.contains(&"session-3".to_string()));
2059
2060 let entry_ids = store.entry_ids().collect::<Vec<_>>();
2061 assert_eq!(entry_ids.len(), 3);
2062 });
2063 }
2064
2065 #[gpui::test]
2066 async fn test_archived_flag_persists_across_reload(cx: &mut TestAppContext) {
2067 init_test(cx);
2068
2069 let paths = PathList::new(&[Path::new("/project-a")]);
2070 let now = Utc::now();
2071 let metadata = make_metadata("session-1", "Thread 1", now, paths.clone());
2072
2073 cx.update(|cx| {
2074 let store = ThreadMetadataStore::global(cx);
2075 store.update(cx, |store, cx| {
2076 store.save(metadata, cx);
2077 });
2078 });
2079
2080 cx.run_until_parked();
2081
2082 cx.update(|cx| {
2083 let store = ThreadMetadataStore::global(cx);
2084 store.update(cx, |store, cx| {
2085 store.archive(&acp::SessionId::new("session-1"), None, cx);
2086 });
2087 });
2088
2089 cx.run_until_parked();
2090
2091 cx.update(|cx| {
2092 let store = ThreadMetadataStore::global(cx);
2093 store.update(cx, |store, cx| {
2094 let _ = store.reload(cx);
2095 });
2096 });
2097
2098 cx.run_until_parked();
2099
2100 cx.update(|cx| {
2101 let store = ThreadMetadataStore::global(cx);
2102 let store = store.read(cx);
2103
2104 let thread = store
2105 .entries()
2106 .find(|e| e.session_id.0.as_ref() == "session-1")
2107 .expect("thread should exist after reload");
2108 assert!(thread.archived);
2109
2110 let path_entries = store
2111 .entries_for_path(&paths)
2112 .map(|e| e.session_id.0.to_string())
2113 .collect::<Vec<_>>();
2114 assert!(path_entries.is_empty());
2115
2116 let archived = store
2117 .archived_entries()
2118 .map(|e| e.session_id.0.to_string())
2119 .collect::<Vec<_>>();
2120 assert_eq!(archived, vec!["session-1"]);
2121 });
2122 }
2123
2124 #[gpui::test]
2125 async fn test_archive_nonexistent_thread_is_noop(cx: &mut TestAppContext) {
2126 init_test(cx);
2127
2128 cx.run_until_parked();
2129
2130 cx.update(|cx| {
2131 let store = ThreadMetadataStore::global(cx);
2132 store.update(cx, |store, cx| {
2133 store.archive(&acp::SessionId::new("nonexistent"), None, cx);
2134 });
2135 });
2136
2137 cx.run_until_parked();
2138
2139 cx.update(|cx| {
2140 let store = ThreadMetadataStore::global(cx);
2141 let store = store.read(cx);
2142
2143 assert!(store.is_empty());
2144 assert_eq!(store.entries().count(), 0);
2145 assert_eq!(store.archived_entries().count(), 0);
2146 });
2147 }
2148
2149 #[gpui::test]
2150 async fn test_save_followed_by_archiving_without_parking(cx: &mut TestAppContext) {
2151 init_test(cx);
2152
2153 let paths = PathList::new(&[Path::new("/project-a")]);
2154 let now = Utc::now();
2155 let metadata = make_metadata("session-1", "Thread 1", now, paths);
2156 let session_id = metadata.session_id.clone();
2157
2158 cx.update(|cx| {
2159 let store = ThreadMetadataStore::global(cx);
2160 store.update(cx, |store, cx| {
2161 store.save(metadata.clone(), cx);
2162 store.archive(&session_id, None, cx);
2163 });
2164 });
2165
2166 cx.run_until_parked();
2167
2168 cx.update(|cx| {
2169 let store = ThreadMetadataStore::global(cx);
2170 let store = store.read(cx);
2171
2172 let entries: Vec<ThreadMetadata> = store.entries().cloned().collect();
2173 pretty_assertions::assert_eq!(
2174 entries,
2175 vec![ThreadMetadata {
2176 archived: true,
2177 ..metadata
2178 }]
2179 );
2180 });
2181 }
2182
2183 #[gpui::test]
2184 async fn test_create_and_retrieve_archived_worktree(cx: &mut TestAppContext) {
2185 init_test(cx);
2186 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2187
2188 let id = store
2189 .read_with(cx, |store, cx| {
2190 store.create_archived_worktree(
2191 "/tmp/worktree".to_string(),
2192 "/home/user/repo".to_string(),
2193 Some("feature-branch".to_string()),
2194 "staged_aaa".to_string(),
2195 "unstaged_bbb".to_string(),
2196 "original_000".to_string(),
2197 cx,
2198 )
2199 })
2200 .await
2201 .unwrap();
2202
2203 store
2204 .read_with(cx, |store, cx| {
2205 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2206 })
2207 .await
2208 .unwrap();
2209
2210 let worktrees = store
2211 .read_with(cx, |store, cx| {
2212 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2213 })
2214 .await
2215 .unwrap();
2216
2217 assert_eq!(worktrees.len(), 1);
2218 let wt = &worktrees[0];
2219 assert_eq!(wt.id, id);
2220 assert_eq!(wt.worktree_path, PathBuf::from("/tmp/worktree"));
2221 assert_eq!(wt.main_repo_path, PathBuf::from("/home/user/repo"));
2222 assert_eq!(wt.branch_name.as_deref(), Some("feature-branch"));
2223 assert_eq!(wt.staged_commit_hash, "staged_aaa");
2224 assert_eq!(wt.unstaged_commit_hash, "unstaged_bbb");
2225 assert_eq!(wt.original_commit_hash, "original_000");
2226 }
2227
2228 #[gpui::test]
2229 async fn test_delete_archived_worktree(cx: &mut TestAppContext) {
2230 init_test(cx);
2231 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2232
2233 let id = store
2234 .read_with(cx, |store, cx| {
2235 store.create_archived_worktree(
2236 "/tmp/worktree".to_string(),
2237 "/home/user/repo".to_string(),
2238 Some("main".to_string()),
2239 "deadbeef".to_string(),
2240 "deadbeef".to_string(),
2241 "original_000".to_string(),
2242 cx,
2243 )
2244 })
2245 .await
2246 .unwrap();
2247
2248 store
2249 .read_with(cx, |store, cx| {
2250 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2251 })
2252 .await
2253 .unwrap();
2254
2255 store
2256 .read_with(cx, |store, cx| store.delete_archived_worktree(id, cx))
2257 .await
2258 .unwrap();
2259
2260 let worktrees = store
2261 .read_with(cx, |store, cx| {
2262 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2263 })
2264 .await
2265 .unwrap();
2266 assert!(worktrees.is_empty());
2267 }
2268
2269 #[gpui::test]
2270 async fn test_link_multiple_threads_to_archived_worktree(cx: &mut TestAppContext) {
2271 init_test(cx);
2272 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2273
2274 let id = store
2275 .read_with(cx, |store, cx| {
2276 store.create_archived_worktree(
2277 "/tmp/worktree".to_string(),
2278 "/home/user/repo".to_string(),
2279 None,
2280 "abc123".to_string(),
2281 "abc123".to_string(),
2282 "original_000".to_string(),
2283 cx,
2284 )
2285 })
2286 .await
2287 .unwrap();
2288
2289 store
2290 .read_with(cx, |store, cx| {
2291 store.link_thread_to_archived_worktree("session-1".to_string(), id, cx)
2292 })
2293 .await
2294 .unwrap();
2295
2296 store
2297 .read_with(cx, |store, cx| {
2298 store.link_thread_to_archived_worktree("session-2".to_string(), id, cx)
2299 })
2300 .await
2301 .unwrap();
2302
2303 let wt1 = store
2304 .read_with(cx, |store, cx| {
2305 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2306 })
2307 .await
2308 .unwrap();
2309
2310 let wt2 = store
2311 .read_with(cx, |store, cx| {
2312 store.get_archived_worktrees_for_thread("session-2".to_string(), cx)
2313 })
2314 .await
2315 .unwrap();
2316
2317 assert_eq!(wt1.len(), 1);
2318 assert_eq!(wt2.len(), 1);
2319 assert_eq!(wt1[0].id, wt2[0].id);
2320 }
2321
2322 #[gpui::test]
2323 async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
2324 init_test(cx);
2325 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2326
2327 let original_paths = PathList::new(&[
2328 Path::new("/projects/worktree-a"),
2329 Path::new("/projects/worktree-b"),
2330 Path::new("/other/unrelated"),
2331 ]);
2332 let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
2333
2334 store.update(cx, |store, cx| {
2335 store.save_manually(meta, cx);
2336 });
2337
2338 let replacements = vec![
2339 (
2340 PathBuf::from("/projects/worktree-a"),
2341 PathBuf::from("/restored/worktree-a"),
2342 ),
2343 (
2344 PathBuf::from("/projects/worktree-b"),
2345 PathBuf::from("/restored/worktree-b"),
2346 ),
2347 ];
2348
2349 store.update(cx, |store, cx| {
2350 store.update_restored_worktree_paths(
2351 &acp::SessionId::new("session-multi"),
2352 &replacements,
2353 cx,
2354 );
2355 });
2356
2357 let entry = store.read_with(cx, |store, _cx| {
2358 store.entry(&acp::SessionId::new("session-multi")).cloned()
2359 });
2360 let entry = entry.unwrap();
2361 let paths = entry.folder_paths.paths();
2362 assert_eq!(paths.len(), 3);
2363 assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
2364 assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
2365 assert!(paths.contains(&PathBuf::from("/other/unrelated")));
2366 }
2367
2368 #[gpui::test]
2369 async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
2370 init_test(cx);
2371 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2372
2373 let original_paths =
2374 PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
2375 let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
2376
2377 store.update(cx, |store, cx| {
2378 store.save_manually(meta, cx);
2379 });
2380
2381 let replacements = vec![
2382 (
2383 PathBuf::from("/projects/worktree-a"),
2384 PathBuf::from("/new/worktree-a"),
2385 ),
2386 (
2387 PathBuf::from("/nonexistent/path"),
2388 PathBuf::from("/should/not/appear"),
2389 ),
2390 ];
2391
2392 store.update(cx, |store, cx| {
2393 store.update_restored_worktree_paths(
2394 &acp::SessionId::new("session-partial"),
2395 &replacements,
2396 cx,
2397 );
2398 });
2399
2400 let entry = store.read_with(cx, |store, _cx| {
2401 store
2402 .entry(&acp::SessionId::new("session-partial"))
2403 .cloned()
2404 });
2405 let entry = entry.unwrap();
2406 let paths = entry.folder_paths.paths();
2407 assert_eq!(paths.len(), 2);
2408 assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
2409 assert!(paths.contains(&PathBuf::from("/other/path")));
2410 assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
2411 }
2412
2413 #[gpui::test]
2414 async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
2415 init_test(cx);
2416 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
2417
2418 let id1 = store
2419 .read_with(cx, |store, cx| {
2420 store.create_archived_worktree(
2421 "/projects/worktree-a".to_string(),
2422 "/home/user/repo".to_string(),
2423 Some("branch-a".to_string()),
2424 "staged_a".to_string(),
2425 "unstaged_a".to_string(),
2426 "original_000".to_string(),
2427 cx,
2428 )
2429 })
2430 .await
2431 .unwrap();
2432
2433 let id2 = store
2434 .read_with(cx, |store, cx| {
2435 store.create_archived_worktree(
2436 "/projects/worktree-b".to_string(),
2437 "/home/user/repo".to_string(),
2438 Some("branch-b".to_string()),
2439 "staged_b".to_string(),
2440 "unstaged_b".to_string(),
2441 "original_000".to_string(),
2442 cx,
2443 )
2444 })
2445 .await
2446 .unwrap();
2447
2448 store
2449 .read_with(cx, |store, cx| {
2450 store.link_thread_to_archived_worktree("session-1".to_string(), id1, cx)
2451 })
2452 .await
2453 .unwrap();
2454
2455 store
2456 .read_with(cx, |store, cx| {
2457 store.link_thread_to_archived_worktree("session-1".to_string(), id2, cx)
2458 })
2459 .await
2460 .unwrap();
2461
2462 let worktrees = store
2463 .read_with(cx, |store, cx| {
2464 store.get_archived_worktrees_for_thread("session-1".to_string(), cx)
2465 })
2466 .await
2467 .unwrap();
2468
2469 assert_eq!(worktrees.len(), 2);
2470
2471 let paths: Vec<&Path> = worktrees
2472 .iter()
2473 .map(|w| w.worktree_path.as_path())
2474 .collect();
2475 assert!(paths.contains(&Path::new("/projects/worktree-a")));
2476 assert!(paths.contains(&Path::new("/projects/worktree-b")));
2477 }
2478}