1use std::{
2 collections::HashSet,
3 path::{Path, PathBuf},
4 sync::Arc,
5};
6
7use agent_client_protocol as acp;
8use anyhow::{Context as _, Result, anyhow};
9use collections::HashMap;
10use git::repository::{AskPassDelegate, CommitOptions, ResetMode};
11use gpui::{App, AsyncApp, Entity, Global, Task, WindowHandle};
12use parking_lot::Mutex;
13use project::{LocalProjectFlags, Project, WorktreeId, git_store::Repository};
14use util::ResultExt;
15use workspace::{
16 AppState, MultiWorkspace, OpenMode, OpenOptions, PathList, Toast, Workspace,
17 notifications::NotificationId, open_new, open_paths,
18};
19
20use crate::thread_metadata_store::{ArchivedGitWorktree, ThreadMetadataStore};
21
22#[derive(Default)]
23pub struct ThreadArchiveCleanupCoordinator {
24 in_flight_roots: Mutex<HashSet<PathBuf>>,
25}
26
27impl Global for ThreadArchiveCleanupCoordinator {}
28
29fn ensure_global(cx: &mut App) {
30 if !cx.has_global::<ThreadArchiveCleanupCoordinator>() {
31 cx.set_global(ThreadArchiveCleanupCoordinator::default());
32 }
33}
34
35#[derive(Clone)]
36pub struct ArchiveOutcome {
37 pub archived_immediately: bool,
38 pub roots_to_delete: Vec<PathBuf>,
39}
40
41#[derive(Clone)]
42struct RootPlan {
43 root_path: PathBuf,
44 main_repo_path: PathBuf,
45 affected_projects: Vec<AffectedProject>,
46 worktree_repo: Option<Entity<Repository>>,
47 branch_name: Option<String>,
48}
49
50#[derive(Clone)]
51struct AffectedProject {
52 project: Entity<Project>,
53 worktree_id: WorktreeId,
54}
55
56#[derive(Clone)]
57enum FallbackTarget {
58 ExistingWorkspace {
59 window: WindowHandle<MultiWorkspace>,
60 workspace: Entity<Workspace>,
61 },
62 OpenPaths {
63 requesting_window: WindowHandle<MultiWorkspace>,
64 paths: Vec<PathBuf>,
65 },
66 OpenEmpty {
67 requesting_window: WindowHandle<MultiWorkspace>,
68 },
69}
70
71#[derive(Clone)]
72struct CleanupPlan {
73 folder_paths: PathList,
74 roots: Vec<RootPlan>,
75 current_workspace: Option<Entity<Workspace>>,
76 current_workspace_will_be_empty: bool,
77 fallback: Option<FallbackTarget>,
78 affected_workspaces: Vec<Entity<Workspace>>,
79}
80
81fn archived_worktree_ref_name(id: i64) -> String {
82 format!("refs/archived-worktrees/{}", id)
83}
84
85struct PersistOutcome {
86 archived_worktree_id: i64,
87 staged_commit_hash: String,
88}
89
90pub fn archive_thread(
91 session_id: &acp::SessionId,
92 current_workspace: Option<Entity<Workspace>>,
93 window: Option<WindowHandle<MultiWorkspace>>,
94 cx: &mut App,
95) -> ArchiveOutcome {
96 ensure_global(cx);
97 let plan =
98 window.and_then(|window| build_cleanup_plan(session_id, current_workspace, window, cx));
99
100 ThreadMetadataStore::global(cx).update(cx, |store, cx| store.archive(session_id, cx));
101
102 if let Some(plan) = plan {
103 let roots_to_delete = plan
104 .roots
105 .iter()
106 .map(|root| root.root_path.clone())
107 .collect::<Vec<_>>();
108 if !roots_to_delete.is_empty() {
109 cx.spawn(async move |cx| {
110 run_cleanup(plan, cx).await;
111 })
112 .detach();
113
114 return ArchiveOutcome {
115 archived_immediately: true,
116 roots_to_delete,
117 };
118 }
119 }
120
121 ArchiveOutcome {
122 archived_immediately: true,
123 roots_to_delete: Vec::new(),
124 }
125}
126
127fn build_cleanup_plan(
128 session_id: &acp::SessionId,
129 current_workspace: Option<Entity<Workspace>>,
130 requesting_window: WindowHandle<MultiWorkspace>,
131 cx: &App,
132) -> Option<CleanupPlan> {
133 let metadata = ThreadMetadataStore::global(cx)
134 .read(cx)
135 .entry(session_id)
136 .cloned()?;
137
138 let workspaces = all_open_workspaces(cx);
139
140 let candidate_roots = metadata
141 .folder_paths
142 .ordered_paths()
143 .filter_map(|path| build_root_plan(path, &workspaces, cx))
144 .filter(|plan| {
145 !path_is_referenced_by_other_unarchived_threads(session_id, &plan.root_path, cx)
146 })
147 .collect::<Vec<_>>();
148
149 if candidate_roots.is_empty() {
150 return Some(CleanupPlan {
151 folder_paths: metadata.folder_paths,
152 roots: Vec::new(),
153 current_workspace,
154 current_workspace_will_be_empty: false,
155 fallback: None,
156 affected_workspaces: Vec::new(),
157 });
158 }
159
160 let mut affected_workspaces = Vec::new();
161 let mut current_workspace_will_be_empty = false;
162
163 for workspace in workspaces.iter() {
164 let doomed_root_count = workspace
165 .read(cx)
166 .root_paths(cx)
167 .into_iter()
168 .filter(|path| {
169 candidate_roots
170 .iter()
171 .any(|root| root.root_path.as_path() == path.as_ref())
172 })
173 .count();
174
175 if doomed_root_count == 0 {
176 continue;
177 }
178
179 let surviving_root_count = workspace
180 .read(cx)
181 .root_paths(cx)
182 .len()
183 .saturating_sub(doomed_root_count);
184 if current_workspace
185 .as_ref()
186 .is_some_and(|current| current == workspace)
187 {
188 current_workspace_will_be_empty = surviving_root_count == 0;
189 }
190 affected_workspaces.push(workspace.clone());
191 }
192
193 let fallback = if current_workspace_will_be_empty {
194 choose_fallback_target(
195 session_id,
196 current_workspace.as_ref(),
197 &candidate_roots,
198 &requesting_window,
199 &workspaces,
200 cx,
201 )
202 } else {
203 None
204 };
205
206 Some(CleanupPlan {
207 folder_paths: metadata.folder_paths,
208 roots: candidate_roots,
209 current_workspace,
210 current_workspace_will_be_empty,
211 fallback,
212 affected_workspaces,
213 })
214}
215
216fn build_root_plan(path: &Path, workspaces: &[Entity<Workspace>], cx: &App) -> Option<RootPlan> {
217 let path = path.to_path_buf();
218 let affected_projects = workspaces
219 .iter()
220 .filter_map(|workspace| {
221 let project = workspace.read(cx).project().clone();
222 let worktree = project
223 .read(cx)
224 .visible_worktrees(cx)
225 .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())?;
226 let worktree_id = worktree.read(cx).id();
227 Some(AffectedProject {
228 project,
229 worktree_id,
230 })
231 })
232 .collect::<Vec<_>>();
233
234 let (linked_snapshot, worktree_repo) = workspaces
235 .iter()
236 .flat_map(|workspace| {
237 workspace
238 .read(cx)
239 .project()
240 .read(cx)
241 .repositories(cx)
242 .values()
243 .cloned()
244 .collect::<Vec<_>>()
245 })
246 .find_map(|repo| {
247 let snapshot = repo.read(cx).snapshot();
248 (snapshot.is_linked_worktree()
249 && snapshot.work_directory_abs_path.as_ref() == path.as_path())
250 .then_some((snapshot, repo))
251 })?;
252
253 let branch_name = linked_snapshot
254 .branch
255 .as_ref()
256 .map(|b| b.name().to_string());
257
258 Some(RootPlan {
259 root_path: path,
260 main_repo_path: linked_snapshot.original_repo_abs_path.to_path_buf(),
261 affected_projects,
262 worktree_repo: Some(worktree_repo),
263 branch_name,
264 })
265}
266
267fn path_is_referenced_by_other_unarchived_threads(
268 current_session_id: &acp::SessionId,
269 path: &Path,
270 cx: &App,
271) -> bool {
272 ThreadMetadataStore::global(cx)
273 .read(cx)
274 .entries()
275 .filter(|thread| thread.session_id != *current_session_id)
276 .filter(|thread| !thread.archived)
277 .any(|thread| {
278 thread
279 .folder_paths
280 .paths()
281 .iter()
282 .any(|other_path| other_path.as_path() == path)
283 })
284}
285
286fn choose_fallback_target(
287 current_session_id: &acp::SessionId,
288 current_workspace: Option<&Entity<Workspace>>,
289 roots: &[RootPlan],
290 requesting_window: &WindowHandle<MultiWorkspace>,
291 workspaces: &[Entity<Workspace>],
292 cx: &App,
293) -> Option<FallbackTarget> {
294 let doomed_roots = roots
295 .iter()
296 .map(|root| root.root_path.clone())
297 .collect::<HashSet<_>>();
298
299 let surviving_same_window = requesting_window.read(cx).ok().and_then(|multi_workspace| {
300 multi_workspace
301 .workspaces()
302 .iter()
303 .filter(|workspace| current_workspace.is_none_or(|current| *workspace != current))
304 .find(|workspace| workspace_survives(workspace, &doomed_roots, cx))
305 .cloned()
306 });
307 if let Some(workspace) = surviving_same_window {
308 return Some(FallbackTarget::ExistingWorkspace {
309 window: *requesting_window,
310 workspace,
311 });
312 }
313
314 for window in cx
315 .windows()
316 .into_iter()
317 .filter_map(|window| window.downcast::<MultiWorkspace>())
318 {
319 if window == *requesting_window {
320 continue;
321 }
322 if let Ok(multi_workspace) = window.read(cx) {
323 if let Some(workspace) = multi_workspace
324 .workspaces()
325 .iter()
326 .find(|workspace| workspace_survives(workspace, &doomed_roots, cx))
327 .cloned()
328 {
329 return Some(FallbackTarget::ExistingWorkspace { window, workspace });
330 }
331 }
332 }
333
334 let safe_thread_workspace = ThreadMetadataStore::global(cx)
335 .read(cx)
336 .entries()
337 .filter(|metadata| metadata.session_id != *current_session_id && !metadata.archived)
338 .filter_map(|metadata| {
339 workspaces
340 .iter()
341 .find(|workspace| workspace_path_list(workspace, cx) == metadata.folder_paths)
342 .cloned()
343 })
344 .find(|workspace| workspace_survives(workspace, &doomed_roots, cx));
345
346 if let Some(workspace) = safe_thread_workspace {
347 let window = window_for_workspace(&workspace, cx).unwrap_or(*requesting_window);
348 return Some(FallbackTarget::ExistingWorkspace { window, workspace });
349 }
350
351 if let Some(root) = roots.first() {
352 return Some(FallbackTarget::OpenPaths {
353 requesting_window: *requesting_window,
354 paths: vec![root.main_repo_path.clone()],
355 });
356 }
357
358 Some(FallbackTarget::OpenEmpty {
359 requesting_window: *requesting_window,
360 })
361}
362
363async fn run_cleanup(plan: CleanupPlan, cx: &mut AsyncApp) {
364 let roots_to_delete =
365 cx.update_global::<ThreadArchiveCleanupCoordinator, _>(|coordinator, _cx| {
366 let mut in_flight_roots = coordinator.in_flight_roots.lock();
367 plan.roots
368 .iter()
369 .filter_map(|root| {
370 if in_flight_roots.insert(root.root_path.clone()) {
371 Some(root.clone())
372 } else {
373 None
374 }
375 })
376 .collect::<Vec<_>>()
377 });
378
379 if roots_to_delete.is_empty() {
380 return;
381 }
382
383 let active_workspace = plan.current_workspace.clone();
384 if let Some(workspace) = active_workspace
385 .as_ref()
386 .filter(|_| plan.current_workspace_will_be_empty)
387 {
388 let Some(window) = window_for_workspace_async(workspace, cx) else {
389 release_in_flight_roots(&roots_to_delete, cx);
390 return;
391 };
392
393 let should_continue = save_workspace_for_root_removal(workspace.clone(), window, cx).await;
394 if !should_continue {
395 release_in_flight_roots(&roots_to_delete, cx);
396 return;
397 }
398 }
399
400 for workspace in plan
401 .affected_workspaces
402 .iter()
403 .filter(|workspace| Some((*workspace).clone()) != active_workspace)
404 {
405 let Some(window) = window_for_workspace_async(workspace, cx) else {
406 continue;
407 };
408
409 if !save_workspace_for_root_removal(workspace.clone(), window, cx).await {
410 release_in_flight_roots(&roots_to_delete, cx);
411 return;
412 }
413 }
414
415 if plan.current_workspace_will_be_empty {
416 if let Some(fallback) = plan.fallback.clone() {
417 activate_fallback(fallback, cx).await.log_err();
418 }
419 }
420
421 let mut git_removal_errors: Vec<(PathBuf, anyhow::Error)> = Vec::new();
422 let mut persist_errors: Vec<(PathBuf, anyhow::Error)> = Vec::new();
423 let mut persist_outcomes: HashMap<PathBuf, PersistOutcome> = HashMap::default();
424
425 for root in &roots_to_delete {
426 if root.worktree_repo.is_some() {
427 match persist_worktree_state(root, &plan, cx).await {
428 Ok(outcome) => {
429 persist_outcomes.insert(root.root_path.clone(), outcome);
430 }
431 Err(error) => {
432 log::error!(
433 "Failed to persist worktree state for {}: {error}",
434 root.root_path.display()
435 );
436 persist_errors.push((root.root_path.clone(), error));
437 continue;
438 }
439 }
440 }
441
442 if let Err(error) = remove_root(root.clone(), cx).await {
443 if let Some(outcome) = persist_outcomes.remove(&root.root_path) {
444 rollback_persist(&outcome, root, cx).await;
445 }
446 git_removal_errors.push((root.root_path.clone(), error));
447 }
448 }
449
450 cleanup_empty_workspaces(&plan.affected_workspaces, cx).await;
451
452 let all_errors: Vec<(PathBuf, anyhow::Error)> = persist_errors
453 .into_iter()
454 .chain(git_removal_errors)
455 .collect();
456
457 if !all_errors.is_empty() {
458 let detail = all_errors
459 .into_iter()
460 .map(|(path, error)| format!("{}: {error}", path.display()))
461 .collect::<Vec<_>>()
462 .join("\n");
463 show_error_toast(
464 "Thread archived, but linked worktree cleanup failed",
465 &detail,
466 &plan,
467 cx,
468 );
469 }
470
471 release_in_flight_roots(&roots_to_delete, cx);
472}
473
474async fn save_workspace_for_root_removal(
475 workspace: Entity<Workspace>,
476 window: WindowHandle<MultiWorkspace>,
477 cx: &mut AsyncApp,
478) -> bool {
479 let has_dirty_items = workspace.read_with(cx, |workspace, cx| {
480 workspace.items(cx).any(|item| item.is_dirty(cx))
481 });
482
483 if has_dirty_items {
484 let _ = window.update(cx, |multi_workspace, window, cx| {
485 window.activate_window();
486 multi_workspace.activate(workspace.clone(), window, cx);
487 });
488 }
489
490 let save_task = window.update(cx, |_multi_workspace, window, cx| {
491 workspace.update(cx, |workspace, cx| {
492 workspace.prompt_to_save_or_discard_dirty_items(window, cx)
493 })
494 });
495
496 let Ok(task) = save_task else {
497 return false;
498 };
499
500 task.await.unwrap_or(false)
501}
502
503async fn activate_fallback(target: FallbackTarget, cx: &mut AsyncApp) -> Result<()> {
504 match target {
505 FallbackTarget::ExistingWorkspace { window, workspace } => {
506 window.update(cx, |multi_workspace, window, cx| {
507 window.activate_window();
508 multi_workspace.activate(workspace, window, cx);
509 })?;
510 }
511 FallbackTarget::OpenPaths {
512 requesting_window,
513 paths,
514 } => {
515 let app_state = current_app_state(cx).context("no workspace app state available")?;
516 cx.update(|cx| {
517 open_paths(
518 &paths,
519 app_state,
520 OpenOptions {
521 requesting_window: Some(requesting_window),
522 open_mode: OpenMode::Activate,
523 ..Default::default()
524 },
525 cx,
526 )
527 })
528 .await?;
529 }
530 FallbackTarget::OpenEmpty { requesting_window } => {
531 let app_state = current_app_state(cx).context("no workspace app state available")?;
532 cx.update(|cx| {
533 open_new(
534 OpenOptions {
535 requesting_window: Some(requesting_window),
536 open_mode: OpenMode::Activate,
537 ..Default::default()
538 },
539 app_state,
540 cx,
541 |_workspace, _window, _cx| {},
542 )
543 })
544 .await?;
545 }
546 }
547
548 Ok(())
549}
550
551async fn remove_root(root: RootPlan, cx: &mut AsyncApp) -> Result<()> {
552 let release_tasks: Vec<_> = root
553 .affected_projects
554 .iter()
555 .map(|affected| {
556 let project = affected.project.clone();
557 let worktree_id = affected.worktree_id;
558 project.update(cx, |project, cx| {
559 let wait = project.wait_for_worktree_release(worktree_id, cx);
560 project.remove_worktree(worktree_id, cx);
561 wait
562 })
563 })
564 .collect();
565
566 if let Err(error) = remove_root_after_worktree_removal(&root, release_tasks, cx).await {
567 rollback_root(&root, cx).await;
568 return Err(error);
569 }
570
571 Ok(())
572}
573
574async fn remove_root_after_worktree_removal(
575 root: &RootPlan,
576 release_tasks: Vec<Task<Result<()>>>,
577 cx: &mut AsyncApp,
578) -> Result<()> {
579 for task in release_tasks {
580 task.await?;
581 }
582
583 let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?;
584 let receiver = repo.update(cx, |repo: &mut Repository, _cx| {
585 repo.remove_worktree(root.root_path.clone(), false)
586 });
587 let result = receiver
588 .await
589 .map_err(|_| anyhow!("git worktree removal was canceled"))?;
590 result
591}
592
593/// Finds a live `Repository` entity for the given path, or creates a temporary
594/// `Project::local` to obtain one.
595///
596/// `Repository` entities can only be obtained through a `Project` because
597/// `GitStore` (which creates and manages `Repository` entities) is owned by
598/// `Project`. When no open workspace contains the repo we need, we spin up a
599/// headless `Project::local` just to get a `Repository` handle. The caller
600/// keeps the returned `Option<Entity<Project>>` alive for the duration of the
601/// git operations, then drops it.
602///
603/// Future improvement: decoupling `GitStore` from `Project` so that
604/// `Repository` entities can be created standalone would eliminate this
605/// temporary-project workaround.
606async fn find_or_create_repository(
607 repo_path: &Path,
608 cx: &mut AsyncApp,
609) -> Result<(Entity<Repository>, Option<Entity<Project>>)> {
610 let repo_path_owned = repo_path.to_path_buf();
611 let live_repo = cx.update(|cx| {
612 all_open_workspaces(cx)
613 .into_iter()
614 .flat_map(|workspace| {
615 workspace
616 .read(cx)
617 .project()
618 .read(cx)
619 .repositories(cx)
620 .values()
621 .cloned()
622 .collect::<Vec<_>>()
623 })
624 .find(|repo| {
625 repo.read(cx).snapshot().work_directory_abs_path.as_ref()
626 == repo_path_owned.as_path()
627 })
628 });
629
630 if let Some(repo) = live_repo {
631 return Ok((repo, None));
632 }
633
634 let app_state =
635 current_app_state(cx).context("no app state available for temporary project")?;
636 let temp_project = cx.update(|cx| {
637 Project::local(
638 app_state.client.clone(),
639 app_state.node_runtime.clone(),
640 app_state.user_store.clone(),
641 app_state.languages.clone(),
642 app_state.fs.clone(),
643 None,
644 LocalProjectFlags::default(),
645 cx,
646 )
647 });
648
649 let repo_path_for_worktree = repo_path.to_path_buf();
650 let create_worktree = temp_project.update(cx, |project, cx| {
651 project.create_worktree(repo_path_for_worktree, true, cx)
652 });
653 let _worktree = create_worktree.await?;
654 let initial_scan = temp_project.read_with(cx, |project, cx| project.wait_for_initial_scan(cx));
655 initial_scan.await;
656
657 let repo_path_for_find = repo_path.to_path_buf();
658 let repo = temp_project
659 .update(cx, |project, cx| {
660 project
661 .repositories(cx)
662 .values()
663 .find(|repo| {
664 repo.read(cx).snapshot().work_directory_abs_path.as_ref()
665 == repo_path_for_find.as_path()
666 })
667 .cloned()
668 })
669 .context("failed to resolve temporary repository handle")?;
670
671 let barrier = repo.update(cx, |repo: &mut Repository, _cx| repo.barrier());
672 barrier
673 .await
674 .map_err(|_| anyhow!("temporary repository barrier canceled"))?;
675 Ok((repo, Some(temp_project)))
676}
677
678async fn rollback_root(root: &RootPlan, cx: &mut AsyncApp) {
679 for affected in &root.affected_projects {
680 let task = affected.project.update(cx, |project, cx| {
681 project.create_worktree(root.root_path.clone(), true, cx)
682 });
683 let _ = task.await;
684 }
685}
686
687async fn persist_worktree_state(
688 root: &RootPlan,
689 plan: &CleanupPlan,
690 cx: &mut AsyncApp,
691) -> Result<PersistOutcome> {
692 let worktree_repo = root
693 .worktree_repo
694 .clone()
695 .context("no worktree repo entity for persistence")?;
696
697 // Step 0: Read original HEAD SHA before creating any WIP commits
698 let original_commit_hash = worktree_repo
699 .update(cx, |repo, _cx| repo.head_sha())
700 .await
701 .map_err(|_| anyhow!("head_sha canceled"))?
702 .context("failed to read original HEAD SHA")?
703 .context("HEAD SHA is None before WIP commits")?;
704
705 // Step 1: Create WIP commit #1 (staged state)
706 let askpass = AskPassDelegate::new(cx, |_, _, _| {});
707 let commit_rx = worktree_repo.update(cx, |repo, cx| {
708 repo.commit(
709 "WIP staged".into(),
710 None,
711 CommitOptions {
712 allow_empty: true,
713 ..Default::default()
714 },
715 askpass,
716 cx,
717 )
718 });
719 commit_rx
720 .await
721 .map_err(|_| anyhow!("WIP staged commit canceled"))??;
722
723 // Read SHA after staged commit
724 let staged_sha_result = worktree_repo
725 .update(cx, |repo, _cx| repo.head_sha())
726 .await
727 .map_err(|_| anyhow!("head_sha canceled"))
728 .and_then(|r| r.context("failed to read HEAD SHA after staged commit"))
729 .and_then(|opt| opt.context("HEAD SHA is None after staged commit"));
730 let staged_commit_hash = match staged_sha_result {
731 Ok(sha) => sha,
732 Err(error) => {
733 let rx = worktree_repo.update(cx, |repo, cx| {
734 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
735 });
736 let _ = rx.await;
737 return Err(error);
738 }
739 };
740
741 // Step 2: Stage all files including untracked
742 let stage_rx = worktree_repo.update(cx, |repo, _cx| repo.stage_all_including_untracked());
743 if let Err(error) = stage_rx
744 .await
745 .map_err(|_| anyhow!("stage all canceled"))
746 .and_then(|inner| inner)
747 {
748 let rx = worktree_repo.update(cx, |repo, cx| {
749 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
750 });
751 let _ = rx.await;
752 return Err(error.context("failed to stage all files including untracked"));
753 }
754
755 // Step 3: Create WIP commit #2 (unstaged/untracked state)
756 let askpass = AskPassDelegate::new(cx, |_, _, _| {});
757 let commit_rx = worktree_repo.update(cx, |repo, cx| {
758 repo.commit(
759 "WIP unstaged".into(),
760 None,
761 CommitOptions {
762 allow_empty: true,
763 ..Default::default()
764 },
765 askpass,
766 cx,
767 )
768 });
769 if let Err(error) = commit_rx
770 .await
771 .map_err(|_| anyhow!("WIP unstaged commit canceled"))
772 .and_then(|inner| inner)
773 {
774 let rx = worktree_repo.update(cx, |repo, cx| {
775 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
776 });
777 let _ = rx.await;
778 return Err(error);
779 }
780
781 // Step 4: Read HEAD SHA after WIP commits
782 let head_sha_result = worktree_repo
783 .update(cx, |repo, _cx| repo.head_sha())
784 .await
785 .map_err(|_| anyhow!("head_sha canceled"))
786 .and_then(|r| r.context("failed to read HEAD SHA after WIP commits"))
787 .and_then(|opt| opt.context("HEAD SHA is None after WIP commits"));
788 let unstaged_commit_hash = match head_sha_result {
789 Ok(sha) => sha,
790 Err(error) => {
791 let rx = worktree_repo.update(cx, |repo, cx| {
792 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
793 });
794 let _ = rx.await;
795 return Err(error);
796 }
797 };
798
799 // Step 5: Create DB record
800 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
801 let worktree_path_str = root.root_path.to_string_lossy().to_string();
802 let main_repo_path_str = root.main_repo_path.to_string_lossy().to_string();
803 let branch_name = root.branch_name.clone();
804
805 let db_result = store
806 .read_with(cx, |store, cx| {
807 store.create_archived_worktree(
808 worktree_path_str.clone(),
809 main_repo_path_str.clone(),
810 branch_name.clone(),
811 staged_commit_hash.clone(),
812 unstaged_commit_hash.clone(),
813 original_commit_hash.clone(),
814 cx,
815 )
816 })
817 .await
818 .context("failed to create archived worktree DB record");
819 let archived_worktree_id = match db_result {
820 Ok(id) => id,
821 Err(error) => {
822 let rx = worktree_repo.update(cx, |repo, cx| {
823 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
824 });
825 let _ = rx.await;
826 return Err(error);
827 }
828 };
829
830 // Step 6: Link all threads on this worktree to the archived record
831 let session_ids: Vec<acp::SessionId> = store.read_with(cx, |store, _cx| {
832 store
833 .all_session_ids_for_path(&plan.folder_paths)
834 .cloned()
835 .collect()
836 });
837
838 for session_id in &session_ids {
839 let link_result = store
840 .read_with(cx, |store, cx| {
841 store.link_thread_to_archived_worktree(
842 session_id.0.to_string(),
843 archived_worktree_id,
844 cx,
845 )
846 })
847 .await;
848 if let Err(error) = link_result {
849 if let Err(delete_error) = store
850 .read_with(cx, |store, cx| {
851 store.delete_archived_worktree(archived_worktree_id, cx)
852 })
853 .await
854 {
855 log::error!(
856 "Failed to delete archived worktree DB record during link rollback: {delete_error:#}"
857 );
858 }
859 let rx = worktree_repo.update(cx, |repo, cx| {
860 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
861 });
862 let _ = rx.await;
863 return Err(error.context("failed to link thread to archived worktree"));
864 }
865 }
866
867 // Step 7: Create git ref on main repo (non-fatal)
868 let ref_name = archived_worktree_ref_name(archived_worktree_id);
869 let main_repo_result = find_or_create_repository(&root.main_repo_path, cx).await;
870 match main_repo_result {
871 Ok((main_repo, _temp_project)) => {
872 let rx = main_repo.update(cx, |repo, _cx| {
873 repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone())
874 });
875 if let Err(error) = rx
876 .await
877 .map_err(|_| anyhow!("update_ref canceled"))
878 .and_then(|r| r)
879 {
880 log::warn!(
881 "Failed to create ref {} on main repo (non-fatal): {error}",
882 ref_name
883 );
884 }
885 }
886 Err(error) => {
887 log::warn!(
888 "Could not find main repo to create ref {} (non-fatal): {error}",
889 ref_name
890 );
891 }
892 }
893
894 Ok(PersistOutcome {
895 archived_worktree_id,
896 staged_commit_hash,
897 })
898}
899
900async fn rollback_persist(outcome: &PersistOutcome, root: &RootPlan, cx: &mut AsyncApp) {
901 // Undo WIP commits on the worktree repo
902 if let Some(worktree_repo) = &root.worktree_repo {
903 let rx = worktree_repo.update(cx, |repo, cx| {
904 repo.reset(
905 format!("{}~1", outcome.staged_commit_hash),
906 ResetMode::Mixed,
907 cx,
908 )
909 });
910 let _ = rx.await;
911 }
912
913 // Delete the git ref on main repo
914 if let Ok((main_repo, _temp_project)) =
915 find_or_create_repository(&root.main_repo_path, cx).await
916 {
917 let ref_name = archived_worktree_ref_name(outcome.archived_worktree_id);
918 let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
919 let _ = rx.await;
920 }
921
922 // Delete the DB record
923 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
924 if let Err(error) = store
925 .read_with(cx, |store, cx| {
926 store.delete_archived_worktree(outcome.archived_worktree_id, cx)
927 })
928 .await
929 {
930 log::error!("Failed to delete archived worktree DB record during rollback: {error:#}");
931 }
932}
933
934async fn cleanup_empty_workspaces(workspaces: &[Entity<Workspace>], cx: &mut AsyncApp) {
935 for workspace in workspaces {
936 let is_empty = match workspace
937 .downgrade()
938 .read_with(cx, |workspace, cx| workspace.root_paths(cx).is_empty())
939 {
940 Ok(is_empty) => is_empty,
941 Err(_) => {
942 log::debug!("Workspace entity already dropped during cleanup; skipping");
943 continue;
944 }
945 };
946 if !is_empty {
947 continue;
948 }
949
950 let Some(window) = window_for_workspace_async(workspace, cx) else {
951 continue;
952 };
953
954 let _ = window.update(cx, |multi_workspace, window, cx| {
955 if !multi_workspace.remove(workspace, window, cx) {
956 window.remove_window();
957 }
958 });
959 }
960}
961
962pub async fn restore_worktree_via_git(
963 row: &ArchivedGitWorktree,
964 cx: &mut AsyncApp,
965) -> Result<PathBuf> {
966 // Step 1: Find the main repo entity and verify original_commit_hash exists
967 let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?;
968
969 let commit_exists = main_repo
970 .update(cx, |repo, _cx| {
971 repo.resolve_commit(row.original_commit_hash.clone())
972 })
973 .await
974 .map_err(|_| anyhow!("resolve_commit was canceled"))?
975 .context("failed to check if original commit exists")?;
976
977 if !commit_exists {
978 anyhow::bail!(
979 "Original commit {} no longer exists in the repository — \
980 cannot restore worktree. The git history this archive depends on may have been \
981 rewritten or garbage-collected.",
982 row.original_commit_hash
983 );
984 }
985
986 // Step 2: Check if worktree path already exists on disk
987 let worktree_path = &row.worktree_path;
988 let app_state = current_app_state(cx).context("no app state available")?;
989 let already_exists = app_state.fs.metadata(worktree_path).await?.is_some();
990
991 let needs_reset = if already_exists {
992 // Check if the existing path is actually a git worktree by looking for
993 // a `.git` file (worktrees have a `.git` file, not a directory).
994 let dot_git_path = worktree_path.join(".git");
995 let dot_git_metadata = app_state.fs.metadata(&dot_git_path).await?;
996 let is_git_worktree = dot_git_metadata.as_ref().is_some_and(|meta| !meta.is_dir);
997
998 if is_git_worktree {
999 // Already a git worktree — another thread on the same worktree
1000 // already restored it. Reuse as-is.
1001 false
1002 } else {
1003 // Path exists but isn't a git worktree. Ask git to adopt it.
1004 let rx = main_repo.update(cx, |repo, _cx| repo.repair_worktrees());
1005 rx.await
1006 .map_err(|_| anyhow!("worktree repair was canceled"))?
1007 .context("failed to repair worktrees")?;
1008 true
1009 }
1010 } else {
1011 // Step 3: Create detached worktree at the unstaged commit
1012 let rx = main_repo.update(cx, |repo, _cx| {
1013 repo.create_worktree_detached(worktree_path.clone(), row.unstaged_commit_hash.clone())
1014 });
1015 rx.await
1016 .map_err(|_| anyhow!("worktree creation was canceled"))?
1017 .context("failed to create worktree")?;
1018 true
1019 };
1020
1021 if !needs_reset {
1022 return Ok(worktree_path.clone());
1023 }
1024
1025 // Step 4: Get the worktree's repo entity
1026 let (wt_repo, _temp_wt_project) = find_or_create_repository(worktree_path, cx).await?;
1027
1028 // Step 5: Reset past the WIP commits to recover original state
1029 let mixed_reset_ok = {
1030 let rx = wt_repo.update(cx, |repo, cx| {
1031 repo.reset(row.staged_commit_hash.clone(), ResetMode::Mixed, cx)
1032 });
1033 match rx.await {
1034 Ok(Ok(())) => true,
1035 Ok(Err(error)) => {
1036 log::error!("Mixed reset to staged commit failed: {error:#}");
1037 false
1038 }
1039 Err(_) => {
1040 log::error!("Mixed reset to staged commit was canceled");
1041 false
1042 }
1043 }
1044 };
1045
1046 let soft_reset_ok = if mixed_reset_ok {
1047 let rx = wt_repo.update(cx, |repo, cx| {
1048 repo.reset(row.original_commit_hash.clone(), ResetMode::Soft, cx)
1049 });
1050 match rx.await {
1051 Ok(Ok(())) => true,
1052 Ok(Err(error)) => {
1053 log::error!("Soft reset to original commit failed: {error:#}");
1054 false
1055 }
1056 Err(_) => {
1057 log::error!("Soft reset to original commit was canceled");
1058 false
1059 }
1060 }
1061 } else {
1062 false
1063 };
1064
1065 // If either WIP reset failed, fall back to a mixed reset directly to
1066 // original_commit_hash so we at least land on the right commit.
1067 if !mixed_reset_ok || !soft_reset_ok {
1068 log::warn!(
1069 "WIP reset(s) failed (mixed_ok={mixed_reset_ok}, soft_ok={soft_reset_ok}); \
1070 falling back to mixed reset to original commit {}",
1071 row.original_commit_hash
1072 );
1073 let rx = wt_repo.update(cx, |repo, cx| {
1074 repo.reset(row.original_commit_hash.clone(), ResetMode::Mixed, cx)
1075 });
1076 match rx.await {
1077 Ok(Ok(())) => {}
1078 Ok(Err(error)) => {
1079 return Err(error.context(format!(
1080 "fallback reset to original commit {} also failed",
1081 row.original_commit_hash
1082 )));
1083 }
1084 Err(_) => {
1085 return Err(anyhow!(
1086 "fallback reset to original commit {} was canceled",
1087 row.original_commit_hash
1088 ));
1089 }
1090 }
1091 }
1092
1093 // Step 6: Verify HEAD is at original_commit_hash
1094 let current_head = wt_repo
1095 .update(cx, |repo, _cx| repo.head_sha())
1096 .await
1097 .map_err(|_| anyhow!("post-restore head_sha was canceled"))?
1098 .context("failed to read HEAD after restore")?
1099 .context("HEAD is None after restore")?;
1100
1101 if current_head != row.original_commit_hash {
1102 anyhow::bail!(
1103 "After restore, HEAD is at {current_head} but expected {}. \
1104 The worktree may be in an inconsistent state.",
1105 row.original_commit_hash
1106 );
1107 }
1108
1109 // Step 7: Restore the branch
1110 if let Some(branch_name) = &row.branch_name {
1111 // Check if the branch exists and points at original_commit_hash.
1112 // If it does, switch to it. If not, create a new branch there.
1113 let rx = wt_repo.update(cx, |repo, _cx| repo.change_branch(branch_name.clone()));
1114 let switched = matches!(rx.await, Ok(Ok(())));
1115
1116 if switched {
1117 // Verify the branch actually points at original_commit_hash after switching
1118 let head_after_switch = wt_repo
1119 .update(cx, |repo, _cx| repo.head_sha())
1120 .await
1121 .ok()
1122 .and_then(|r| r.ok())
1123 .flatten();
1124
1125 if head_after_switch.as_deref() != Some(&row.original_commit_hash) {
1126 // Branch exists but doesn't point at the right commit.
1127 // Switch back to detached HEAD at original_commit_hash.
1128 log::warn!(
1129 "Branch '{}' exists but points at {:?}, not {}. Creating fresh branch.",
1130 branch_name,
1131 head_after_switch,
1132 row.original_commit_hash
1133 );
1134 let rx = wt_repo.update(cx, |repo, cx| {
1135 repo.reset(row.original_commit_hash.clone(), ResetMode::Mixed, cx)
1136 });
1137 let _ = rx.await;
1138 // Delete the old branch and create fresh
1139 let rx = wt_repo.update(cx, |repo, _cx| {
1140 repo.create_branch(branch_name.clone(), None)
1141 });
1142 let _ = rx.await;
1143 }
1144 } else {
1145 // Branch doesn't exist or can't be switched to — create it.
1146 let rx = wt_repo.update(cx, |repo, _cx| {
1147 repo.create_branch(branch_name.clone(), None)
1148 });
1149 if let Ok(Err(error)) | Err(error) = rx.await.map_err(|e| anyhow::anyhow!("{e}")) {
1150 log::warn!(
1151 "Could not create branch '{}': {error} — \
1152 restored worktree is in detached HEAD state.",
1153 branch_name
1154 );
1155 }
1156 }
1157 }
1158
1159 Ok(worktree_path.clone())
1160}
1161
1162pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) {
1163 // Delete the git ref from the main repo
1164 if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await
1165 {
1166 let ref_name = archived_worktree_ref_name(row.id);
1167 let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
1168 match rx.await {
1169 Ok(Ok(())) => {}
1170 Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"),
1171 Err(_) => log::warn!("Archive ref deletion was canceled"),
1172 }
1173 }
1174
1175 // Delete the DB records
1176 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
1177 store
1178 .read_with(cx, |store, cx| store.delete_archived_worktree(row.id, cx))
1179 .await
1180 .log_err();
1181}
1182
1183fn show_error_toast(summary: &str, detail: &str, plan: &CleanupPlan, cx: &mut AsyncApp) {
1184 let target_workspace = plan
1185 .current_workspace
1186 .clone()
1187 .or_else(|| plan.affected_workspaces.first().cloned());
1188 let Some(workspace) = target_workspace else {
1189 return;
1190 };
1191
1192 let _ = workspace.update(cx, |workspace, cx| {
1193 struct ArchiveCleanupErrorToast;
1194 let message = if detail.is_empty() {
1195 summary.to_string()
1196 } else {
1197 format!("{summary}: {detail}")
1198 };
1199 workspace.show_toast(
1200 Toast::new(
1201 NotificationId::unique::<ArchiveCleanupErrorToast>(),
1202 message,
1203 )
1204 .autohide(),
1205 cx,
1206 );
1207 });
1208}
1209
1210fn all_open_workspaces(cx: &App) -> Vec<Entity<Workspace>> {
1211 cx.windows()
1212 .into_iter()
1213 .filter_map(|window| window.downcast::<MultiWorkspace>())
1214 .flat_map(|multi_workspace| {
1215 multi_workspace
1216 .read(cx)
1217 .map(|multi_workspace| multi_workspace.workspaces().to_vec())
1218 .unwrap_or_default()
1219 })
1220 .collect()
1221}
1222
1223fn workspace_survives(
1224 workspace: &Entity<Workspace>,
1225 doomed_roots: &HashSet<PathBuf>,
1226 cx: &App,
1227) -> bool {
1228 workspace
1229 .read(cx)
1230 .root_paths(cx)
1231 .into_iter()
1232 .any(|root| !doomed_roots.contains(root.as_ref()))
1233}
1234
1235fn workspace_path_list(workspace: &Entity<Workspace>, cx: &App) -> PathList {
1236 PathList::new(&workspace.read(cx).root_paths(cx))
1237}
1238
1239fn window_for_workspace(
1240 workspace: &Entity<Workspace>,
1241 cx: &App,
1242) -> Option<WindowHandle<MultiWorkspace>> {
1243 cx.windows()
1244 .into_iter()
1245 .filter_map(|window| window.downcast::<MultiWorkspace>())
1246 .find(|window| {
1247 window
1248 .read(cx)
1249 .map(|multi_workspace| multi_workspace.workspaces().contains(workspace))
1250 .unwrap_or(false)
1251 })
1252}
1253
1254fn window_for_workspace_async(
1255 workspace: &Entity<Workspace>,
1256 cx: &mut AsyncApp,
1257) -> Option<WindowHandle<MultiWorkspace>> {
1258 let workspace = workspace.clone();
1259 cx.update(|cx| window_for_workspace(&workspace, cx))
1260}
1261
1262fn current_app_state(cx: &mut AsyncApp) -> Option<Arc<AppState>> {
1263 cx.update(|cx| {
1264 all_open_workspaces(cx)
1265 .into_iter()
1266 .next()
1267 .map(|workspace| workspace.read(cx).app_state().clone())
1268 })
1269}
1270
1271fn release_in_flight_roots(roots: &[RootPlan], cx: &mut AsyncApp) {
1272 cx.update_global::<ThreadArchiveCleanupCoordinator, _>(|coordinator, _cx| {
1273 let mut in_flight_roots = coordinator.in_flight_roots.lock();
1274 for root in roots {
1275 in_flight_roots.remove(&root.root_path);
1276 }
1277 });
1278}