1use std::{
2 collections::HashSet,
3 path::{Path, PathBuf},
4 sync::Arc,
5};
6
7use agent_client_protocol as acp;
8use anyhow::{Context as _, Result, anyhow};
9use collections::HashMap;
10use git::repository::{AskPassDelegate, CommitOptions, DEFAULT_WORKTREE_DIRECTORY, ResetMode};
11use gpui::{App, AsyncApp, Entity, Global, Task, WindowHandle};
12use parking_lot::Mutex;
13use project::{
14 LocalProjectFlags, Project, WorktreeId, git_store::Repository, worktrees_directory_for_repo,
15};
16use util::ResultExt;
17use workspace::{
18 AppState, MultiWorkspace, OpenMode, OpenOptions, PathList, Toast, Workspace,
19 notifications::NotificationId, open_new, open_paths,
20};
21
22use crate::thread_metadata_store::{ArchivedGitWorktree, ThreadMetadataStore};
23
24#[derive(Default)]
25pub struct ThreadArchiveCleanupCoordinator {
26 in_flight_roots: Mutex<HashSet<PathBuf>>,
27}
28
29impl Global for ThreadArchiveCleanupCoordinator {}
30
31fn ensure_global(cx: &mut App) {
32 if !cx.has_global::<ThreadArchiveCleanupCoordinator>() {
33 cx.set_global(ThreadArchiveCleanupCoordinator::default());
34 }
35}
36
37#[derive(Clone)]
38pub struct ArchiveOutcome {
39 pub archived_immediately: bool,
40 pub roots_to_delete: Vec<PathBuf>,
41}
42
43#[derive(Clone)]
44struct RootPlan {
45 root_path: PathBuf,
46 main_repo_path: PathBuf,
47 affected_projects: Vec<AffectedProject>,
48 worktree_repo: Option<Entity<Repository>>,
49 branch_name: Option<String>,
50}
51
52#[derive(Clone)]
53struct AffectedProject {
54 project: Entity<Project>,
55 worktree_id: WorktreeId,
56}
57
58#[derive(Clone)]
59enum FallbackTarget {
60 ExistingWorkspace {
61 window: WindowHandle<MultiWorkspace>,
62 workspace: Entity<Workspace>,
63 },
64 OpenPaths {
65 requesting_window: WindowHandle<MultiWorkspace>,
66 paths: Vec<PathBuf>,
67 },
68 OpenEmpty {
69 requesting_window: WindowHandle<MultiWorkspace>,
70 },
71}
72
73#[derive(Clone)]
74struct CleanupPlan {
75 folder_paths: PathList,
76 roots: Vec<RootPlan>,
77 current_workspace: Option<Entity<Workspace>>,
78 current_workspace_will_be_empty: bool,
79 fallback: Option<FallbackTarget>,
80 affected_workspaces: Vec<Entity<Workspace>>,
81}
82
83fn archived_worktree_ref_name(id: i64) -> String {
84 format!("refs/archived-worktrees/{}", id)
85}
86
87struct PersistOutcome {
88 archived_worktree_id: i64,
89 staged_commit_hash: String,
90}
91
92pub fn archive_thread(
93 session_id: &acp::SessionId,
94 current_workspace: Option<Entity<Workspace>>,
95 window: WindowHandle<MultiWorkspace>,
96 cx: &mut App,
97) -> ArchiveOutcome {
98 ensure_global(cx);
99 let plan = build_cleanup_plan(session_id, current_workspace, window, cx);
100
101 ThreadMetadataStore::global(cx).update(cx, |store, cx| store.archive(session_id, cx));
102
103 if let Some(plan) = plan {
104 let roots_to_delete = plan
105 .roots
106 .iter()
107 .map(|root| root.root_path.clone())
108 .collect::<Vec<_>>();
109 if !roots_to_delete.is_empty() {
110 cx.spawn(async move |cx| {
111 run_cleanup(plan, cx).await;
112 })
113 .detach();
114
115 return ArchiveOutcome {
116 archived_immediately: true,
117 roots_to_delete,
118 };
119 }
120 }
121
122 ArchiveOutcome {
123 archived_immediately: true,
124 roots_to_delete: Vec::new(),
125 }
126}
127
128fn build_cleanup_plan(
129 session_id: &acp::SessionId,
130 current_workspace: Option<Entity<Workspace>>,
131 requesting_window: WindowHandle<MultiWorkspace>,
132 cx: &App,
133) -> Option<CleanupPlan> {
134 let metadata = ThreadMetadataStore::global(cx)
135 .read(cx)
136 .entry(session_id)
137 .cloned()?;
138
139 let workspaces = all_open_workspaces(cx);
140
141 let candidate_roots = metadata
142 .folder_paths
143 .ordered_paths()
144 .filter_map(|path| build_root_plan(path, &workspaces, cx))
145 .filter(|plan| {
146 !path_is_referenced_by_other_unarchived_threads(session_id, &plan.root_path, cx)
147 })
148 .collect::<Vec<_>>();
149
150 if candidate_roots.is_empty() {
151 return Some(CleanupPlan {
152 folder_paths: metadata.folder_paths,
153 roots: Vec::new(),
154 current_workspace,
155 current_workspace_will_be_empty: false,
156 fallback: None,
157 affected_workspaces: Vec::new(),
158 });
159 }
160
161 let mut affected_workspaces = Vec::new();
162 let mut current_workspace_will_be_empty = false;
163
164 for workspace in workspaces.iter() {
165 let doomed_root_count = workspace
166 .read(cx)
167 .root_paths(cx)
168 .into_iter()
169 .filter(|path| {
170 candidate_roots
171 .iter()
172 .any(|root| root.root_path.as_path() == path.as_ref())
173 })
174 .count();
175
176 if doomed_root_count == 0 {
177 continue;
178 }
179
180 let surviving_root_count = workspace
181 .read(cx)
182 .root_paths(cx)
183 .len()
184 .saturating_sub(doomed_root_count);
185 if current_workspace
186 .as_ref()
187 .is_some_and(|current| current == workspace)
188 {
189 current_workspace_will_be_empty = surviving_root_count == 0;
190 }
191 affected_workspaces.push(workspace.clone());
192 }
193
194 let fallback = if current_workspace_will_be_empty {
195 choose_fallback_target(
196 session_id,
197 current_workspace.as_ref(),
198 &candidate_roots,
199 &requesting_window,
200 &workspaces,
201 cx,
202 )
203 } else {
204 None
205 };
206
207 Some(CleanupPlan {
208 folder_paths: metadata.folder_paths,
209 roots: candidate_roots,
210 current_workspace,
211 current_workspace_will_be_empty,
212 fallback,
213 affected_workspaces,
214 })
215}
216
217fn build_root_plan(path: &Path, workspaces: &[Entity<Workspace>], cx: &App) -> Option<RootPlan> {
218 let path = path.to_path_buf();
219 let affected_projects = workspaces
220 .iter()
221 .filter_map(|workspace| {
222 let project = workspace.read(cx).project().clone();
223 let worktree = project
224 .read(cx)
225 .visible_worktrees(cx)
226 .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())?;
227 let worktree_id = worktree.read(cx).id();
228 Some(AffectedProject {
229 project,
230 worktree_id,
231 })
232 })
233 .collect::<Vec<_>>();
234
235 let (linked_snapshot, worktree_repo) = workspaces
236 .iter()
237 .flat_map(|workspace| {
238 workspace
239 .read(cx)
240 .project()
241 .read(cx)
242 .repositories(cx)
243 .values()
244 .cloned()
245 .collect::<Vec<_>>()
246 })
247 .find_map(|repo| {
248 let snapshot = repo.read(cx).snapshot();
249 (snapshot.is_linked_worktree()
250 && snapshot.work_directory_abs_path.as_ref() == path.as_path())
251 .then_some((snapshot, repo))
252 })?;
253
254 let branch_name = linked_snapshot
255 .branch
256 .as_ref()
257 .map(|b| b.name().to_string());
258
259 Some(RootPlan {
260 root_path: path,
261 main_repo_path: linked_snapshot.original_repo_abs_path.to_path_buf(),
262 affected_projects,
263 worktree_repo: Some(worktree_repo),
264 branch_name,
265 })
266}
267
268fn path_is_referenced_by_other_unarchived_threads(
269 current_session_id: &acp::SessionId,
270 path: &Path,
271 cx: &App,
272) -> bool {
273 ThreadMetadataStore::global(cx)
274 .read(cx)
275 .entries()
276 .filter(|thread| thread.session_id != *current_session_id)
277 .filter(|thread| !thread.archived)
278 .any(|thread| {
279 thread
280 .folder_paths
281 .paths()
282 .iter()
283 .any(|other_path| other_path.as_path() == path)
284 })
285}
286
287fn choose_fallback_target(
288 current_session_id: &acp::SessionId,
289 current_workspace: Option<&Entity<Workspace>>,
290 roots: &[RootPlan],
291 requesting_window: &WindowHandle<MultiWorkspace>,
292 workspaces: &[Entity<Workspace>],
293 cx: &App,
294) -> Option<FallbackTarget> {
295 let doomed_roots = roots
296 .iter()
297 .map(|root| root.root_path.clone())
298 .collect::<HashSet<_>>();
299
300 let surviving_same_window = requesting_window.read(cx).ok().and_then(|multi_workspace| {
301 multi_workspace
302 .workspaces()
303 .iter()
304 .filter(|workspace| current_workspace.is_none_or(|current| *workspace != current))
305 .find(|workspace| workspace_survives(workspace, &doomed_roots, cx))
306 .cloned()
307 });
308 if let Some(workspace) = surviving_same_window {
309 return Some(FallbackTarget::ExistingWorkspace {
310 window: *requesting_window,
311 workspace,
312 });
313 }
314
315 for window in cx
316 .windows()
317 .into_iter()
318 .filter_map(|window| window.downcast::<MultiWorkspace>())
319 {
320 if window == *requesting_window {
321 continue;
322 }
323 if let Ok(multi_workspace) = window.read(cx) {
324 if let Some(workspace) = multi_workspace
325 .workspaces()
326 .iter()
327 .find(|workspace| workspace_survives(workspace, &doomed_roots, cx))
328 .cloned()
329 {
330 return Some(FallbackTarget::ExistingWorkspace { window, workspace });
331 }
332 }
333 }
334
335 let safe_thread_workspace = ThreadMetadataStore::global(cx)
336 .read(cx)
337 .entries()
338 .filter(|metadata| metadata.session_id != *current_session_id && !metadata.archived)
339 .filter_map(|metadata| {
340 workspaces
341 .iter()
342 .find(|workspace| workspace_path_list(workspace, cx) == metadata.folder_paths)
343 .cloned()
344 })
345 .find(|workspace| workspace_survives(workspace, &doomed_roots, cx));
346
347 if let Some(workspace) = safe_thread_workspace {
348 let window = window_for_workspace(&workspace, cx).unwrap_or(*requesting_window);
349 return Some(FallbackTarget::ExistingWorkspace { window, workspace });
350 }
351
352 if let Some(root) = roots.first() {
353 return Some(FallbackTarget::OpenPaths {
354 requesting_window: *requesting_window,
355 paths: vec![root.main_repo_path.clone()],
356 });
357 }
358
359 Some(FallbackTarget::OpenEmpty {
360 requesting_window: *requesting_window,
361 })
362}
363
364async fn run_cleanup(plan: CleanupPlan, cx: &mut AsyncApp) {
365 let roots_to_delete =
366 cx.update_global::<ThreadArchiveCleanupCoordinator, _>(|coordinator, _cx| {
367 let mut in_flight_roots = coordinator.in_flight_roots.lock();
368 plan.roots
369 .iter()
370 .filter_map(|root| {
371 if in_flight_roots.insert(root.root_path.clone()) {
372 Some(root.clone())
373 } else {
374 None
375 }
376 })
377 .collect::<Vec<_>>()
378 });
379
380 if roots_to_delete.is_empty() {
381 return;
382 }
383
384 let active_workspace = plan.current_workspace.clone();
385 if let Some(workspace) = active_workspace
386 .as_ref()
387 .filter(|_| plan.current_workspace_will_be_empty)
388 {
389 let Some(window) = window_for_workspace_async(workspace, cx) else {
390 release_in_flight_roots(&roots_to_delete, cx);
391 return;
392 };
393
394 let should_continue = save_workspace_for_root_removal(workspace.clone(), window, cx).await;
395 if !should_continue {
396 release_in_flight_roots(&roots_to_delete, cx);
397 return;
398 }
399 }
400
401 for workspace in plan
402 .affected_workspaces
403 .iter()
404 .filter(|workspace| Some((*workspace).clone()) != active_workspace)
405 {
406 let Some(window) = window_for_workspace_async(workspace, cx) else {
407 continue;
408 };
409
410 if !save_workspace_for_root_removal(workspace.clone(), window, cx).await {
411 release_in_flight_roots(&roots_to_delete, cx);
412 return;
413 }
414 }
415
416 if plan.current_workspace_will_be_empty {
417 if let Some(fallback) = plan.fallback.clone() {
418 activate_fallback(fallback, cx).await.log_err();
419 }
420 }
421
422 let mut git_removal_errors: Vec<(PathBuf, anyhow::Error)> = Vec::new();
423 let mut persist_errors: Vec<(PathBuf, anyhow::Error)> = Vec::new();
424 let mut persist_outcomes: HashMap<PathBuf, PersistOutcome> = HashMap::default();
425
426 for root in &roots_to_delete {
427 if root.worktree_repo.is_some() {
428 match persist_worktree_state(root, &plan, cx).await {
429 Ok(outcome) => {
430 persist_outcomes.insert(root.root_path.clone(), outcome);
431 }
432 Err(error) => {
433 log::error!(
434 "Failed to persist worktree state for {}: {error}",
435 root.root_path.display()
436 );
437 persist_errors.push((root.root_path.clone(), error));
438 continue;
439 }
440 }
441 }
442
443 if let Err(error) = remove_root(root.clone(), cx).await {
444 if let Some(outcome) = persist_outcomes.remove(&root.root_path) {
445 rollback_persist(&outcome, root, cx).await;
446 }
447 git_removal_errors.push((root.root_path.clone(), error));
448 }
449 }
450
451 cleanup_empty_workspaces(&plan.affected_workspaces, cx).await;
452
453 let all_errors: Vec<(PathBuf, anyhow::Error)> = persist_errors
454 .into_iter()
455 .chain(git_removal_errors)
456 .collect();
457
458 if !all_errors.is_empty() {
459 let detail = all_errors
460 .into_iter()
461 .map(|(path, error)| format!("{}: {error}", path.display()))
462 .collect::<Vec<_>>()
463 .join("\n");
464 show_error_toast(
465 "Thread archived, but linked worktree cleanup failed",
466 &detail,
467 &plan,
468 cx,
469 );
470 }
471
472 release_in_flight_roots(&roots_to_delete, cx);
473}
474
475async fn save_workspace_for_root_removal(
476 workspace: Entity<Workspace>,
477 window: WindowHandle<MultiWorkspace>,
478 cx: &mut AsyncApp,
479) -> bool {
480 let has_dirty_items = workspace.read_with(cx, |workspace, cx| {
481 workspace.items(cx).any(|item| item.is_dirty(cx))
482 });
483
484 if has_dirty_items {
485 let _ = window.update(cx, |multi_workspace, window, cx| {
486 window.activate_window();
487 multi_workspace.activate(workspace.clone(), window, cx);
488 });
489 }
490
491 let save_task = window.update(cx, |_multi_workspace, window, cx| {
492 workspace.update(cx, |workspace, cx| {
493 workspace.save_for_root_removal(window, cx)
494 })
495 });
496
497 let Ok(task) = save_task else {
498 return false;
499 };
500
501 task.await.unwrap_or(false)
502}
503
504async fn activate_fallback(target: FallbackTarget, cx: &mut AsyncApp) -> Result<()> {
505 match target {
506 FallbackTarget::ExistingWorkspace { window, workspace } => {
507 window.update(cx, |multi_workspace, window, cx| {
508 window.activate_window();
509 multi_workspace.activate(workspace, window, cx);
510 })?;
511 }
512 FallbackTarget::OpenPaths {
513 requesting_window,
514 paths,
515 } => {
516 let app_state = current_app_state(cx).context("no workspace app state available")?;
517 cx.update(|cx| {
518 open_paths(
519 &paths,
520 app_state,
521 OpenOptions {
522 requesting_window: Some(requesting_window),
523 open_mode: OpenMode::Activate,
524 ..Default::default()
525 },
526 cx,
527 )
528 })
529 .await?;
530 }
531 FallbackTarget::OpenEmpty { requesting_window } => {
532 let app_state = current_app_state(cx).context("no workspace app state available")?;
533 cx.update(|cx| {
534 open_new(
535 OpenOptions {
536 requesting_window: Some(requesting_window),
537 open_mode: OpenMode::Activate,
538 ..Default::default()
539 },
540 app_state,
541 cx,
542 |_workspace, _window, _cx| {},
543 )
544 })
545 .await?;
546 }
547 }
548
549 Ok(())
550}
551
552async fn remove_root(root: RootPlan, cx: &mut AsyncApp) -> Result<()> {
553 let release_tasks: Vec<_> = root
554 .affected_projects
555 .iter()
556 .map(|affected| {
557 let project = affected.project.clone();
558 let worktree_id = affected.worktree_id;
559 project.update(cx, |project, cx| {
560 let wait = project.wait_for_worktree_release(worktree_id, cx);
561 project.remove_worktree(worktree_id, cx);
562 wait
563 })
564 })
565 .collect();
566
567 if let Err(error) = remove_root_after_worktree_removal(&root, release_tasks, cx).await {
568 rollback_root(&root, cx).await;
569 return Err(error);
570 }
571
572 Ok(())
573}
574
575async fn remove_root_after_worktree_removal(
576 root: &RootPlan,
577 release_tasks: Vec<Task<Result<()>>>,
578 cx: &mut AsyncApp,
579) -> Result<()> {
580 for task in release_tasks {
581 task.await?;
582 }
583
584 let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?;
585 let receiver = repo.update(cx, |repo: &mut Repository, _cx| {
586 repo.remove_worktree(root.root_path.clone(), false)
587 });
588 let result = receiver
589 .await
590 .map_err(|_| anyhow!("git worktree removal was canceled"))?;
591 result
592}
593
594/// Finds a live `Repository` entity for the given path, or creates a temporary
595/// `Project::local` to obtain one.
596///
597/// `Repository` entities can only be obtained through a `Project` because
598/// `GitStore` (which creates and manages `Repository` entities) is owned by
599/// `Project`. When no open workspace contains the repo we need, we spin up a
600/// headless `Project::local` just to get a `Repository` handle. The caller
601/// keeps the returned `Option<Entity<Project>>` alive for the duration of the
602/// git operations, then drops it.
603///
604/// Future improvement: decoupling `GitStore` from `Project` so that
605/// `Repository` entities can be created standalone would eliminate this
606/// temporary-project workaround.
607async fn find_or_create_repository(
608 repo_path: &Path,
609 cx: &mut AsyncApp,
610) -> Result<(Entity<Repository>, Option<Entity<Project>>)> {
611 let repo_path_owned = repo_path.to_path_buf();
612 let live_repo = cx.update(|cx| {
613 all_open_workspaces(cx)
614 .into_iter()
615 .flat_map(|workspace| {
616 workspace
617 .read(cx)
618 .project()
619 .read(cx)
620 .repositories(cx)
621 .values()
622 .cloned()
623 .collect::<Vec<_>>()
624 })
625 .find(|repo| {
626 repo.read(cx).snapshot().work_directory_abs_path.as_ref()
627 == repo_path_owned.as_path()
628 })
629 });
630
631 if let Some(repo) = live_repo {
632 return Ok((repo, None));
633 }
634
635 let app_state =
636 current_app_state(cx).context("no app state available for temporary project")?;
637 let temp_project = cx.update(|cx| {
638 Project::local(
639 app_state.client.clone(),
640 app_state.node_runtime.clone(),
641 app_state.user_store.clone(),
642 app_state.languages.clone(),
643 app_state.fs.clone(),
644 None,
645 LocalProjectFlags::default(),
646 cx,
647 )
648 });
649
650 let repo_path_for_worktree = repo_path.to_path_buf();
651 let create_worktree = temp_project.update(cx, |project, cx| {
652 project.create_worktree(repo_path_for_worktree, true, cx)
653 });
654 let _worktree = create_worktree.await?;
655 let initial_scan = temp_project.read_with(cx, |project, cx| project.wait_for_initial_scan(cx));
656 initial_scan.await;
657
658 let repo_path_for_find = repo_path.to_path_buf();
659 let repo = temp_project
660 .update(cx, |project, cx| {
661 project
662 .repositories(cx)
663 .values()
664 .find(|repo| {
665 repo.read(cx).snapshot().work_directory_abs_path.as_ref()
666 == repo_path_for_find.as_path()
667 })
668 .cloned()
669 })
670 .context("failed to resolve temporary repository handle")?;
671
672 let barrier = repo.update(cx, |repo: &mut Repository, _cx| repo.barrier());
673 barrier
674 .await
675 .map_err(|_| anyhow!("temporary repository barrier canceled"))?;
676 Ok((repo, Some(temp_project)))
677}
678
679async fn rollback_root(root: &RootPlan, cx: &mut AsyncApp) {
680 for affected in &root.affected_projects {
681 let task = affected.project.update(cx, |project, cx| {
682 project.create_worktree(root.root_path.clone(), true, cx)
683 });
684 let _ = task.await;
685 }
686}
687
688async fn persist_worktree_state(
689 root: &RootPlan,
690 plan: &CleanupPlan,
691 cx: &mut AsyncApp,
692) -> Result<PersistOutcome> {
693 let worktree_repo = root
694 .worktree_repo
695 .clone()
696 .context("no worktree repo entity for persistence")?;
697
698 // Step 1: Create WIP commit #1 (staged state)
699 let askpass = AskPassDelegate::new(cx, |_, _, _| {});
700 let commit_rx = worktree_repo.update(cx, |repo, cx| {
701 repo.commit(
702 "WIP staged".into(),
703 None,
704 CommitOptions {
705 allow_empty: true,
706 ..Default::default()
707 },
708 askpass,
709 cx,
710 )
711 });
712 commit_rx
713 .await
714 .map_err(|_| anyhow!("WIP staged commit canceled"))??;
715
716 // Read SHA after staged commit
717 let staged_sha_result = worktree_repo
718 .update(cx, |repo, _cx| repo.head_sha())
719 .await
720 .map_err(|_| anyhow!("head_sha canceled"))
721 .and_then(|r| r.context("failed to read HEAD SHA after staged commit"))
722 .and_then(|opt| opt.context("HEAD SHA is None after staged commit"));
723 let staged_commit_hash = match staged_sha_result {
724 Ok(sha) => sha,
725 Err(error) => {
726 let rx = worktree_repo.update(cx, |repo, cx| {
727 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
728 });
729 let _ = rx.await;
730 return Err(error);
731 }
732 };
733
734 // Step 2: Stage all files including untracked
735 let stage_rx = worktree_repo.update(cx, |repo, _cx| repo.stage_all_including_untracked());
736 if let Err(error) = stage_rx
737 .await
738 .map_err(|_| anyhow!("stage all canceled"))
739 .and_then(|inner| inner)
740 {
741 let rx = worktree_repo.update(cx, |repo, cx| {
742 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
743 });
744 let _ = rx.await;
745 return Err(error.context("failed to stage all files including untracked"));
746 }
747
748 // Step 3: Create WIP commit #2 (unstaged/untracked state)
749 let askpass = AskPassDelegate::new(cx, |_, _, _| {});
750 let commit_rx = worktree_repo.update(cx, |repo, cx| {
751 repo.commit(
752 "WIP unstaged".into(),
753 None,
754 CommitOptions {
755 allow_empty: true,
756 ..Default::default()
757 },
758 askpass,
759 cx,
760 )
761 });
762 if let Err(error) = commit_rx
763 .await
764 .map_err(|_| anyhow!("WIP unstaged commit canceled"))
765 .and_then(|inner| inner)
766 {
767 let rx = worktree_repo.update(cx, |repo, cx| {
768 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
769 });
770 let _ = rx.await;
771 return Err(error);
772 }
773
774 // Step 4: Read HEAD SHA after WIP commits
775 let head_sha_result = worktree_repo
776 .update(cx, |repo, _cx| repo.head_sha())
777 .await
778 .map_err(|_| anyhow!("head_sha canceled"))
779 .and_then(|r| r.context("failed to read HEAD SHA after WIP commits"))
780 .and_then(|opt| opt.context("HEAD SHA is None after WIP commits"));
781 let unstaged_commit_hash = match head_sha_result {
782 Ok(sha) => sha,
783 Err(error) => {
784 let rx = worktree_repo.update(cx, |repo, cx| {
785 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
786 });
787 let _ = rx.await;
788 return Err(error);
789 }
790 };
791
792 // Step 5: Create DB record
793 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
794 let worktree_path_str = root.root_path.to_string_lossy().to_string();
795 let main_repo_path_str = root.main_repo_path.to_string_lossy().to_string();
796 let branch_name = root.branch_name.clone();
797
798 let db_result = store
799 .read_with(cx, |store, cx| {
800 store.create_archived_worktree(
801 &worktree_path_str,
802 &main_repo_path_str,
803 branch_name.as_deref(),
804 &staged_commit_hash,
805 &unstaged_commit_hash,
806 cx,
807 )
808 })
809 .await
810 .context("failed to create archived worktree DB record");
811 let archived_worktree_id = match db_result {
812 Ok(id) => id,
813 Err(error) => {
814 let rx = worktree_repo.update(cx, |repo, cx| {
815 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
816 });
817 let _ = rx.await;
818 return Err(error);
819 }
820 };
821
822 // Step 6: Link all threads on this worktree to the archived record
823 let session_ids: Vec<acp::SessionId> = store.read_with(cx, |store, _cx| {
824 store
825 .all_session_ids_for_path(&plan.folder_paths)
826 .cloned()
827 .collect()
828 });
829
830 for session_id in &session_ids {
831 let link_result = store
832 .read_with(cx, |store, cx| {
833 store.link_thread_to_archived_worktree(&session_id.0, archived_worktree_id, cx)
834 })
835 .await;
836 if let Err(error) = link_result {
837 if let Err(delete_error) = store
838 .read_with(cx, |store, cx| {
839 store.delete_archived_worktree(archived_worktree_id, cx)
840 })
841 .await
842 {
843 log::error!(
844 "Failed to delete archived worktree DB record during link rollback: {delete_error:#}"
845 );
846 }
847 let rx = worktree_repo.update(cx, |repo, cx| {
848 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
849 });
850 let _ = rx.await;
851 return Err(error.context("failed to link thread to archived worktree"));
852 }
853 }
854
855 // Step 7: Create git ref on main repo (non-fatal)
856 let ref_name = archived_worktree_ref_name(archived_worktree_id);
857 let main_repo_result = find_or_create_repository(&root.main_repo_path, cx).await;
858 match main_repo_result {
859 Ok((main_repo, _temp_project)) => {
860 let rx = main_repo.update(cx, |repo, _cx| {
861 repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone())
862 });
863 if let Err(error) = rx
864 .await
865 .map_err(|_| anyhow!("update_ref canceled"))
866 .and_then(|r| r)
867 {
868 log::warn!(
869 "Failed to create ref {} on main repo (non-fatal): {error}",
870 ref_name
871 );
872 }
873 }
874 Err(error) => {
875 log::warn!(
876 "Could not find main repo to create ref {} (non-fatal): {error}",
877 ref_name
878 );
879 }
880 }
881
882 Ok(PersistOutcome {
883 archived_worktree_id,
884 staged_commit_hash,
885 })
886}
887
888async fn rollback_persist(outcome: &PersistOutcome, root: &RootPlan, cx: &mut AsyncApp) {
889 // Undo WIP commits on the worktree repo
890 if let Some(worktree_repo) = &root.worktree_repo {
891 let rx = worktree_repo.update(cx, |repo, cx| {
892 repo.reset(
893 format!("{}~1", outcome.staged_commit_hash),
894 ResetMode::Mixed,
895 cx,
896 )
897 });
898 let _ = rx.await;
899 }
900
901 // Delete the git ref on main repo
902 if let Ok((main_repo, _temp_project)) =
903 find_or_create_repository(&root.main_repo_path, cx).await
904 {
905 let ref_name = archived_worktree_ref_name(outcome.archived_worktree_id);
906 let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
907 let _ = rx.await;
908 }
909
910 // Delete the DB record
911 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
912 if let Err(error) = store
913 .read_with(cx, |store, cx| {
914 store.delete_archived_worktree(outcome.archived_worktree_id, cx)
915 })
916 .await
917 {
918 log::error!("Failed to delete archived worktree DB record during rollback: {error:#}");
919 }
920}
921
922async fn cleanup_empty_workspaces(workspaces: &[Entity<Workspace>], cx: &mut AsyncApp) {
923 for workspace in workspaces {
924 let is_empty = match workspace
925 .downgrade()
926 .read_with(cx, |workspace, cx| workspace.root_paths(cx).is_empty())
927 {
928 Ok(is_empty) => is_empty,
929 Err(_) => {
930 log::debug!("Workspace entity already dropped during cleanup; skipping");
931 continue;
932 }
933 };
934 if !is_empty {
935 continue;
936 }
937
938 let Some(window) = window_for_workspace_async(workspace, cx) else {
939 continue;
940 };
941
942 let _ = window.update(cx, |multi_workspace, window, cx| {
943 if !multi_workspace.remove(workspace, window, cx) {
944 window.remove_window();
945 }
946 });
947 }
948}
949
950pub async fn restore_worktree_via_git(
951 row: &ArchivedGitWorktree,
952 cx: &mut AsyncApp,
953) -> Result<PathBuf> {
954 // Step 1: Find the main repo entity
955 let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?;
956
957 // Step 2: Handle path conflicts
958 let worktree_path = &row.worktree_path;
959 let app_state = current_app_state(cx).context("no app state available")?;
960 let already_exists = app_state.fs.metadata(worktree_path).await?.is_some();
961
962 let final_path = if already_exists {
963 let worktree_directory =
964 worktrees_directory_for_repo(&row.main_repo_path, DEFAULT_WORKTREE_DIRECTORY)?;
965 let new_name = format!(
966 "{}-restored-{}",
967 row.branch_name.as_deref().unwrap_or("worktree"),
968 row.id
969 );
970 let project_name = row
971 .main_repo_path
972 .file_name()
973 .context("git repo must have a directory name")?;
974 worktree_directory.join(&new_name).join(project_name)
975 } else {
976 worktree_path.clone()
977 };
978
979 // Step 3: Create detached worktree
980 let rx = main_repo.update(cx, |repo, _cx| {
981 repo.create_worktree_detached(final_path.clone(), row.unstaged_commit_hash.clone())
982 });
983 rx.await
984 .map_err(|_| anyhow!("worktree creation was canceled"))?
985 .context("failed to create worktree")?;
986
987 // Step 4: Get the worktree's repo entity
988 let (wt_repo, _temp_wt_project) = find_or_create_repository(&final_path, cx).await?;
989
990 // Step 5: Mixed reset to staged commit (undo the "WIP unstaged" commit)
991 let rx = wt_repo.update(cx, |repo, cx| {
992 repo.reset(row.staged_commit_hash.clone(), ResetMode::Mixed, cx)
993 });
994 match rx.await {
995 Ok(Ok(())) => {}
996 Ok(Err(error)) => {
997 let _ = wt_repo
998 .update(cx, |repo, cx| {
999 repo.reset(row.unstaged_commit_hash.clone(), ResetMode::Mixed, cx)
1000 })
1001 .await;
1002 return Err(error.context("mixed reset failed while restoring worktree"));
1003 }
1004 Err(_) => {
1005 return Err(anyhow!("mixed reset was canceled"));
1006 }
1007 }
1008
1009 // Step 6: Soft reset to parent of staged commit (undo the "WIP staged" commit)
1010 let rx = wt_repo.update(cx, |repo, cx| {
1011 repo.reset(format!("{}~1", row.staged_commit_hash), ResetMode::Soft, cx)
1012 });
1013 match rx.await {
1014 Ok(Ok(())) => {}
1015 Ok(Err(error)) => {
1016 let _ = wt_repo
1017 .update(cx, |repo, cx| {
1018 repo.reset(row.unstaged_commit_hash.clone(), ResetMode::Mixed, cx)
1019 })
1020 .await;
1021 return Err(error.context("soft reset failed while restoring worktree"));
1022 }
1023 Err(_) => {
1024 return Err(anyhow!("soft reset was canceled"));
1025 }
1026 }
1027
1028 // Step 7: Restore the branch
1029 if let Some(branch_name) = &row.branch_name {
1030 let rx = wt_repo.update(cx, |repo, _cx| repo.change_branch(branch_name.clone()));
1031 match rx.await {
1032 Ok(Ok(())) => {}
1033 _ => {
1034 let rx = wt_repo.update(cx, |repo, _cx| {
1035 repo.create_branch(branch_name.clone(), None)
1036 });
1037 if let Ok(Err(_)) | Err(_) = rx.await {
1038 log::warn!(
1039 "Could not switch to branch '{}' — \
1040 restored worktree is in detached HEAD state.",
1041 branch_name
1042 );
1043 }
1044 }
1045 }
1046 }
1047
1048 Ok(final_path)
1049}
1050
1051pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) {
1052 // Delete the git ref from the main repo
1053 if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await
1054 {
1055 let ref_name = archived_worktree_ref_name(row.id);
1056 let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
1057 match rx.await {
1058 Ok(Ok(())) => {}
1059 Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"),
1060 Err(_) => log::warn!("Archive ref deletion was canceled"),
1061 }
1062 }
1063
1064 // Delete the DB records
1065 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
1066 store
1067 .read_with(cx, |store, cx| store.delete_archived_worktree(row.id, cx))
1068 .await
1069 .log_err();
1070}
1071
1072fn show_error_toast(summary: &str, detail: &str, plan: &CleanupPlan, cx: &mut AsyncApp) {
1073 let target_workspace = plan
1074 .current_workspace
1075 .clone()
1076 .or_else(|| plan.affected_workspaces.first().cloned());
1077 let Some(workspace) = target_workspace else {
1078 return;
1079 };
1080
1081 let _ = workspace.update(cx, |workspace, cx| {
1082 struct ArchiveCleanupErrorToast;
1083 let message = if detail.is_empty() {
1084 summary.to_string()
1085 } else {
1086 format!("{summary}: {detail}")
1087 };
1088 workspace.show_toast(
1089 Toast::new(
1090 NotificationId::unique::<ArchiveCleanupErrorToast>(),
1091 message,
1092 )
1093 .autohide(),
1094 cx,
1095 );
1096 });
1097}
1098
1099fn all_open_workspaces(cx: &App) -> Vec<Entity<Workspace>> {
1100 cx.windows()
1101 .into_iter()
1102 .filter_map(|window| window.downcast::<MultiWorkspace>())
1103 .flat_map(|multi_workspace| {
1104 multi_workspace
1105 .read(cx)
1106 .map(|multi_workspace| multi_workspace.workspaces().to_vec())
1107 .unwrap_or_default()
1108 })
1109 .collect()
1110}
1111
1112fn workspace_survives(
1113 workspace: &Entity<Workspace>,
1114 doomed_roots: &HashSet<PathBuf>,
1115 cx: &App,
1116) -> bool {
1117 workspace
1118 .read(cx)
1119 .root_paths(cx)
1120 .into_iter()
1121 .any(|root| !doomed_roots.contains(root.as_ref()))
1122}
1123
1124fn workspace_path_list(workspace: &Entity<Workspace>, cx: &App) -> PathList {
1125 PathList::new(&workspace.read(cx).root_paths(cx))
1126}
1127
1128fn window_for_workspace(
1129 workspace: &Entity<Workspace>,
1130 cx: &App,
1131) -> Option<WindowHandle<MultiWorkspace>> {
1132 cx.windows()
1133 .into_iter()
1134 .filter_map(|window| window.downcast::<MultiWorkspace>())
1135 .find(|window| {
1136 window
1137 .read(cx)
1138 .map(|multi_workspace| multi_workspace.workspaces().contains(workspace))
1139 .unwrap_or(false)
1140 })
1141}
1142
1143fn window_for_workspace_async(
1144 workspace: &Entity<Workspace>,
1145 cx: &mut AsyncApp,
1146) -> Option<WindowHandle<MultiWorkspace>> {
1147 let workspace = workspace.clone();
1148 cx.update(|cx| window_for_workspace(&workspace, cx))
1149}
1150
1151fn current_app_state(cx: &mut AsyncApp) -> Option<Arc<AppState>> {
1152 cx.update(|cx| {
1153 all_open_workspaces(cx)
1154 .into_iter()
1155 .next()
1156 .map(|workspace| workspace.read(cx).app_state().clone())
1157 })
1158}
1159
1160fn release_in_flight_roots(roots: &[RootPlan], cx: &mut AsyncApp) {
1161 cx.update_global::<ThreadArchiveCleanupCoordinator, _>(|coordinator, _cx| {
1162 let mut in_flight_roots = coordinator.in_flight_roots.lock();
1163 for root in roots {
1164 in_flight_roots.remove(&root.root_path);
1165 }
1166 });
1167}