thread_worktree_archive.rs

   1use std::{
   2    collections::HashSet,
   3    path::{Path, PathBuf},
   4    sync::Arc,
   5};
   6
   7use agent_client_protocol as acp;
   8use anyhow::{Context as _, Result, anyhow};
   9use collections::HashMap;
  10use git::repository::{AskPassDelegate, CommitOptions, ResetMode};
  11use gpui::{App, AsyncApp, Entity, Global, Task, WindowHandle};
  12use parking_lot::Mutex;
  13use project::{
  14    LocalProjectFlags, Project, WorktreeId,
  15    git_store::{Repository, resolve_git_worktree_to_main_repo},
  16};
  17use util::ResultExt;
  18use workspace::{
  19    AppState, MultiWorkspace, OpenMode, OpenOptions, PathList, Toast, Workspace,
  20    notifications::NotificationId, open_new, open_paths,
  21};
  22
  23use crate::thread_metadata_store::{ArchivedGitWorktree, ThreadMetadataStore};
  24
  25#[derive(Default)]
  26pub struct ThreadArchiveCleanupCoordinator {
  27    in_flight_roots: Mutex<HashSet<PathBuf>>,
  28}
  29
  30impl Global for ThreadArchiveCleanupCoordinator {}
  31
  32fn ensure_global(cx: &mut App) {
  33    if !cx.has_global::<ThreadArchiveCleanupCoordinator>() {
  34        cx.set_global(ThreadArchiveCleanupCoordinator::default());
  35    }
  36}
  37
  38#[derive(Clone)]
  39pub struct ArchiveOutcome {
  40    pub archived_immediately: bool,
  41    pub roots_to_delete: Vec<PathBuf>,
  42}
  43
  44#[derive(Clone)]
  45struct RootPlan {
  46    root_path: PathBuf,
  47    main_repo_path: PathBuf,
  48    affected_projects: Vec<AffectedProject>,
  49    worktree_repo: Option<Entity<Repository>>,
  50    branch_name: Option<String>,
  51}
  52
  53#[derive(Clone)]
  54struct AffectedProject {
  55    project: Entity<Project>,
  56    worktree_id: WorktreeId,
  57}
  58
  59#[derive(Clone)]
  60enum FallbackTarget {
  61    ExistingWorkspace {
  62        window: WindowHandle<MultiWorkspace>,
  63        workspace: Entity<Workspace>,
  64    },
  65    OpenPaths {
  66        requesting_window: WindowHandle<MultiWorkspace>,
  67        paths: Vec<PathBuf>,
  68    },
  69    OpenEmpty {
  70        requesting_window: WindowHandle<MultiWorkspace>,
  71    },
  72}
  73
  74#[derive(Clone)]
  75struct CleanupPlan {
  76    folder_paths: PathList,
  77    roots: Vec<RootPlan>,
  78    current_workspace: Option<Entity<Workspace>>,
  79    current_workspace_will_be_empty: bool,
  80    fallback: Option<FallbackTarget>,
  81    affected_workspaces: Vec<Entity<Workspace>>,
  82}
  83
  84fn archived_worktree_ref_name(id: i64) -> String {
  85    format!("refs/archived-worktrees/{}", id)
  86}
  87
  88struct PersistOutcome {
  89    archived_worktree_id: i64,
  90    staged_commit_hash: String,
  91}
  92
  93pub fn archive_thread(
  94    session_id: &acp::SessionId,
  95    current_workspace: Option<Entity<Workspace>>,
  96    window: Option<WindowHandle<MultiWorkspace>>,
  97    cx: &mut App,
  98) -> ArchiveOutcome {
  99    ensure_global(cx);
 100    let plan =
 101        window.and_then(|window| build_cleanup_plan(session_id, current_workspace, window, cx));
 102
 103    ThreadMetadataStore::global(cx).update(cx, |store, cx| store.archive(session_id, cx));
 104
 105    if let Some(plan) = plan {
 106        let roots_to_delete = plan
 107            .roots
 108            .iter()
 109            .map(|root| root.root_path.clone())
 110            .collect::<Vec<_>>();
 111        if !roots_to_delete.is_empty() {
 112            cx.spawn(async move |cx| {
 113                run_cleanup(plan, cx).await;
 114            })
 115            .detach();
 116
 117            return ArchiveOutcome {
 118                archived_immediately: true,
 119                roots_to_delete,
 120            };
 121        }
 122    }
 123
 124    ArchiveOutcome {
 125        archived_immediately: true,
 126        roots_to_delete: Vec::new(),
 127    }
 128}
 129
 130fn build_cleanup_plan(
 131    session_id: &acp::SessionId,
 132    current_workspace: Option<Entity<Workspace>>,
 133    requesting_window: WindowHandle<MultiWorkspace>,
 134    cx: &App,
 135) -> Option<CleanupPlan> {
 136    let metadata = ThreadMetadataStore::global(cx)
 137        .read(cx)
 138        .entry(session_id)
 139        .cloned()?;
 140
 141    let workspaces = all_open_workspaces(cx);
 142
 143    let candidate_roots = metadata
 144        .folder_paths
 145        .ordered_paths()
 146        .filter_map(|path| build_root_plan(path, &workspaces, cx))
 147        .filter(|plan| {
 148            !path_is_referenced_by_other_unarchived_threads(session_id, &plan.root_path, cx)
 149        })
 150        .collect::<Vec<_>>();
 151
 152    if candidate_roots.is_empty() {
 153        return Some(CleanupPlan {
 154            folder_paths: metadata.folder_paths,
 155            roots: Vec::new(),
 156            current_workspace,
 157            current_workspace_will_be_empty: false,
 158            fallback: None,
 159            affected_workspaces: Vec::new(),
 160        });
 161    }
 162
 163    let mut affected_workspaces = Vec::new();
 164    let mut current_workspace_will_be_empty = false;
 165
 166    for workspace in workspaces.iter() {
 167        let doomed_root_count = workspace
 168            .read(cx)
 169            .root_paths(cx)
 170            .into_iter()
 171            .filter(|path| {
 172                candidate_roots
 173                    .iter()
 174                    .any(|root| root.root_path.as_path() == path.as_ref())
 175            })
 176            .count();
 177
 178        if doomed_root_count == 0 {
 179            continue;
 180        }
 181
 182        let surviving_root_count = workspace
 183            .read(cx)
 184            .root_paths(cx)
 185            .len()
 186            .saturating_sub(doomed_root_count);
 187        if current_workspace
 188            .as_ref()
 189            .is_some_and(|current| current == workspace)
 190        {
 191            current_workspace_will_be_empty = surviving_root_count == 0;
 192        }
 193        affected_workspaces.push(workspace.clone());
 194    }
 195
 196    let fallback = if current_workspace_will_be_empty {
 197        choose_fallback_target(
 198            session_id,
 199            current_workspace.as_ref(),
 200            &candidate_roots,
 201            &requesting_window,
 202            &workspaces,
 203            cx,
 204        )
 205    } else {
 206        None
 207    };
 208
 209    Some(CleanupPlan {
 210        folder_paths: metadata.folder_paths,
 211        roots: candidate_roots,
 212        current_workspace,
 213        current_workspace_will_be_empty,
 214        fallback,
 215        affected_workspaces,
 216    })
 217}
 218
 219fn build_root_plan(path: &Path, workspaces: &[Entity<Workspace>], cx: &App) -> Option<RootPlan> {
 220    let path = path.to_path_buf();
 221    let affected_projects = workspaces
 222        .iter()
 223        .filter_map(|workspace| {
 224            let project = workspace.read(cx).project().clone();
 225            let worktree = project
 226                .read(cx)
 227                .visible_worktrees(cx)
 228                .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())?;
 229            let worktree_id = worktree.read(cx).id();
 230            Some(AffectedProject {
 231                project,
 232                worktree_id,
 233            })
 234        })
 235        .collect::<Vec<_>>();
 236
 237    let (linked_snapshot, worktree_repo) = workspaces
 238        .iter()
 239        .flat_map(|workspace| {
 240            workspace
 241                .read(cx)
 242                .project()
 243                .read(cx)
 244                .repositories(cx)
 245                .values()
 246                .cloned()
 247                .collect::<Vec<_>>()
 248        })
 249        .find_map(|repo| {
 250            let snapshot = repo.read(cx).snapshot();
 251            (snapshot.is_linked_worktree()
 252                && snapshot.work_directory_abs_path.as_ref() == path.as_path())
 253            .then_some((snapshot, repo))
 254        })?;
 255
 256    let branch_name = linked_snapshot
 257        .branch
 258        .as_ref()
 259        .map(|b| b.name().to_string());
 260
 261    Some(RootPlan {
 262        root_path: path,
 263        main_repo_path: linked_snapshot.original_repo_abs_path.to_path_buf(),
 264        affected_projects,
 265        worktree_repo: Some(worktree_repo),
 266        branch_name,
 267    })
 268}
 269
 270fn path_is_referenced_by_other_unarchived_threads(
 271    current_session_id: &acp::SessionId,
 272    path: &Path,
 273    cx: &App,
 274) -> bool {
 275    ThreadMetadataStore::global(cx)
 276        .read(cx)
 277        .entries()
 278        .filter(|thread| thread.session_id != *current_session_id)
 279        .filter(|thread| !thread.archived)
 280        .any(|thread| {
 281            thread
 282                .folder_paths
 283                .paths()
 284                .iter()
 285                .any(|other_path| other_path.as_path() == path)
 286        })
 287}
 288
 289fn choose_fallback_target(
 290    current_session_id: &acp::SessionId,
 291    current_workspace: Option<&Entity<Workspace>>,
 292    roots: &[RootPlan],
 293    requesting_window: &WindowHandle<MultiWorkspace>,
 294    workspaces: &[Entity<Workspace>],
 295    cx: &App,
 296) -> Option<FallbackTarget> {
 297    let doomed_roots = roots
 298        .iter()
 299        .map(|root| root.root_path.clone())
 300        .collect::<HashSet<_>>();
 301
 302    let surviving_same_window = requesting_window.read(cx).ok().and_then(|multi_workspace| {
 303        multi_workspace
 304            .workspaces()
 305            .iter()
 306            .filter(|workspace| current_workspace.is_none_or(|current| *workspace != current))
 307            .find(|workspace| workspace_survives(workspace, &doomed_roots, cx))
 308            .cloned()
 309    });
 310    if let Some(workspace) = surviving_same_window {
 311        return Some(FallbackTarget::ExistingWorkspace {
 312            window: *requesting_window,
 313            workspace,
 314        });
 315    }
 316
 317    for window in cx
 318        .windows()
 319        .into_iter()
 320        .filter_map(|window| window.downcast::<MultiWorkspace>())
 321    {
 322        if window == *requesting_window {
 323            continue;
 324        }
 325        if let Ok(multi_workspace) = window.read(cx) {
 326            if let Some(workspace) = multi_workspace
 327                .workspaces()
 328                .iter()
 329                .find(|workspace| workspace_survives(workspace, &doomed_roots, cx))
 330                .cloned()
 331            {
 332                return Some(FallbackTarget::ExistingWorkspace { window, workspace });
 333            }
 334        }
 335    }
 336
 337    let safe_thread_workspace = ThreadMetadataStore::global(cx)
 338        .read(cx)
 339        .entries()
 340        .filter(|metadata| metadata.session_id != *current_session_id && !metadata.archived)
 341        .filter_map(|metadata| {
 342            workspaces
 343                .iter()
 344                .find(|workspace| workspace_path_list(workspace, cx) == metadata.folder_paths)
 345                .cloned()
 346        })
 347        .find(|workspace| workspace_survives(workspace, &doomed_roots, cx));
 348
 349    if let Some(workspace) = safe_thread_workspace {
 350        let window = window_for_workspace(&workspace, cx).unwrap_or(*requesting_window);
 351        return Some(FallbackTarget::ExistingWorkspace { window, workspace });
 352    }
 353
 354    if let Some(root) = roots.first() {
 355        return Some(FallbackTarget::OpenPaths {
 356            requesting_window: *requesting_window,
 357            paths: vec![root.main_repo_path.clone()],
 358        });
 359    }
 360
 361    Some(FallbackTarget::OpenEmpty {
 362        requesting_window: *requesting_window,
 363    })
 364}
 365
 366async fn run_cleanup(plan: CleanupPlan, cx: &mut AsyncApp) {
 367    let roots_to_delete =
 368        cx.update_global::<ThreadArchiveCleanupCoordinator, _>(|coordinator, _cx| {
 369            let mut in_flight_roots = coordinator.in_flight_roots.lock();
 370            plan.roots
 371                .iter()
 372                .filter_map(|root| {
 373                    if in_flight_roots.insert(root.root_path.clone()) {
 374                        Some(root.clone())
 375                    } else {
 376                        None
 377                    }
 378                })
 379                .collect::<Vec<_>>()
 380        });
 381
 382    if roots_to_delete.is_empty() {
 383        return;
 384    }
 385
 386    let active_workspace = plan.current_workspace.clone();
 387    if let Some(workspace) = active_workspace
 388        .as_ref()
 389        .filter(|_| plan.current_workspace_will_be_empty)
 390    {
 391        let Some(window) = window_for_workspace_async(workspace, cx) else {
 392            release_in_flight_roots(&roots_to_delete, cx);
 393            return;
 394        };
 395
 396        let should_continue = save_workspace_for_root_removal(workspace.clone(), window, cx).await;
 397        if !should_continue {
 398            release_in_flight_roots(&roots_to_delete, cx);
 399            return;
 400        }
 401    }
 402
 403    for workspace in plan
 404        .affected_workspaces
 405        .iter()
 406        .filter(|workspace| Some((*workspace).clone()) != active_workspace)
 407    {
 408        let Some(window) = window_for_workspace_async(workspace, cx) else {
 409            continue;
 410        };
 411
 412        if !save_workspace_for_root_removal(workspace.clone(), window, cx).await {
 413            release_in_flight_roots(&roots_to_delete, cx);
 414            return;
 415        }
 416    }
 417
 418    if plan.current_workspace_will_be_empty {
 419        if let Some(fallback) = plan.fallback.clone() {
 420            activate_fallback(fallback, cx).await.log_err();
 421        }
 422    }
 423
 424    let mut git_removal_errors: Vec<(PathBuf, anyhow::Error)> = Vec::new();
 425    let mut persist_errors: Vec<(PathBuf, anyhow::Error)> = Vec::new();
 426    let mut persist_outcomes: HashMap<PathBuf, PersistOutcome> = HashMap::default();
 427
 428    for root in &roots_to_delete {
 429        if root.worktree_repo.is_some() {
 430            match persist_worktree_state(root, &plan, cx).await {
 431                Ok(outcome) => {
 432                    persist_outcomes.insert(root.root_path.clone(), outcome);
 433                }
 434                Err(error) => {
 435                    log::error!(
 436                        "Failed to persist worktree state for {}: {error}",
 437                        root.root_path.display()
 438                    );
 439                    persist_errors.push((root.root_path.clone(), error));
 440                    continue;
 441                }
 442            }
 443        }
 444
 445        if let Err(error) = remove_root(root.clone(), cx).await {
 446            if let Some(outcome) = persist_outcomes.remove(&root.root_path) {
 447                rollback_persist(&outcome, root, cx).await;
 448            }
 449            git_removal_errors.push((root.root_path.clone(), error));
 450        }
 451    }
 452
 453    cleanup_empty_workspaces(&plan.affected_workspaces, cx).await;
 454
 455    let all_errors: Vec<(PathBuf, anyhow::Error)> = persist_errors
 456        .into_iter()
 457        .chain(git_removal_errors)
 458        .collect();
 459
 460    if !all_errors.is_empty() {
 461        let detail = all_errors
 462            .into_iter()
 463            .map(|(path, error)| format!("{}: {error}", path.display()))
 464            .collect::<Vec<_>>()
 465            .join("\n");
 466        show_error_toast(
 467            "Thread archived, but linked worktree cleanup failed",
 468            &detail,
 469            &plan,
 470            cx,
 471        );
 472    }
 473
 474    release_in_flight_roots(&roots_to_delete, cx);
 475}
 476
 477async fn save_workspace_for_root_removal(
 478    workspace: Entity<Workspace>,
 479    window: WindowHandle<MultiWorkspace>,
 480    cx: &mut AsyncApp,
 481) -> bool {
 482    let has_dirty_items = workspace.read_with(cx, |workspace, cx| {
 483        workspace.items(cx).any(|item| item.is_dirty(cx))
 484    });
 485
 486    if has_dirty_items {
 487        let _ = window.update(cx, |multi_workspace, window, cx| {
 488            window.activate_window();
 489            multi_workspace.activate(workspace.clone(), window, cx);
 490        });
 491    }
 492
 493    let save_task = window.update(cx, |_multi_workspace, window, cx| {
 494        workspace.update(cx, |workspace, cx| {
 495            workspace.prompt_to_save_or_discard_dirty_items(window, cx)
 496        })
 497    });
 498
 499    let Ok(task) = save_task else {
 500        return false;
 501    };
 502
 503    task.await.unwrap_or(false)
 504}
 505
 506async fn activate_fallback(target: FallbackTarget, cx: &mut AsyncApp) -> Result<()> {
 507    match target {
 508        FallbackTarget::ExistingWorkspace { window, workspace } => {
 509            window.update(cx, |multi_workspace, window, cx| {
 510                window.activate_window();
 511                multi_workspace.activate(workspace, window, cx);
 512            })?;
 513        }
 514        FallbackTarget::OpenPaths {
 515            requesting_window,
 516            paths,
 517        } => {
 518            let app_state = current_app_state(cx).context("no workspace app state available")?;
 519            cx.update(|cx| {
 520                open_paths(
 521                    &paths,
 522                    app_state,
 523                    OpenOptions {
 524                        requesting_window: Some(requesting_window),
 525                        open_mode: OpenMode::Activate,
 526                        ..Default::default()
 527                    },
 528                    cx,
 529                )
 530            })
 531            .await?;
 532        }
 533        FallbackTarget::OpenEmpty { requesting_window } => {
 534            let app_state = current_app_state(cx).context("no workspace app state available")?;
 535            cx.update(|cx| {
 536                open_new(
 537                    OpenOptions {
 538                        requesting_window: Some(requesting_window),
 539                        open_mode: OpenMode::Activate,
 540                        ..Default::default()
 541                    },
 542                    app_state,
 543                    cx,
 544                    |_workspace, _window, _cx| {},
 545                )
 546            })
 547            .await?;
 548        }
 549    }
 550
 551    Ok(())
 552}
 553
 554async fn remove_root(root: RootPlan, cx: &mut AsyncApp) -> Result<()> {
 555    let release_tasks: Vec<_> = root
 556        .affected_projects
 557        .iter()
 558        .map(|affected| {
 559            let project = affected.project.clone();
 560            let worktree_id = affected.worktree_id;
 561            project.update(cx, |project, cx| {
 562                let wait = project.wait_for_worktree_release(worktree_id, cx);
 563                project.remove_worktree(worktree_id, cx);
 564                wait
 565            })
 566        })
 567        .collect();
 568
 569    if let Err(error) = remove_root_after_worktree_removal(&root, release_tasks, cx).await {
 570        rollback_root(&root, cx).await;
 571        return Err(error);
 572    }
 573
 574    Ok(())
 575}
 576
 577async fn remove_root_after_worktree_removal(
 578    root: &RootPlan,
 579    release_tasks: Vec<Task<Result<()>>>,
 580    cx: &mut AsyncApp,
 581) -> Result<()> {
 582    for task in release_tasks {
 583        task.await?;
 584    }
 585
 586    let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?;
 587    let receiver = repo.update(cx, |repo: &mut Repository, _cx| {
 588        repo.remove_worktree(root.root_path.clone(), false)
 589    });
 590    let result = receiver
 591        .await
 592        .map_err(|_| anyhow!("git worktree removal was canceled"))?;
 593    result
 594}
 595
 596/// Finds a live `Repository` entity for the given path, or creates a temporary
 597/// `Project::local` to obtain one.
 598///
 599/// `Repository` entities can only be obtained through a `Project` because
 600/// `GitStore` (which creates and manages `Repository` entities) is owned by
 601/// `Project`. When no open workspace contains the repo we need, we spin up a
 602/// headless `Project::local` just to get a `Repository` handle. The caller
 603/// keeps the returned `Option<Entity<Project>>` alive for the duration of the
 604/// git operations, then drops it.
 605///
 606/// Future improvement: decoupling `GitStore` from `Project` so that
 607/// `Repository` entities can be created standalone would eliminate this
 608/// temporary-project workaround.
 609async fn find_or_create_repository(
 610    repo_path: &Path,
 611    cx: &mut AsyncApp,
 612) -> Result<(Entity<Repository>, Option<Entity<Project>>)> {
 613    let repo_path_owned = repo_path.to_path_buf();
 614    let live_repo = cx.update(|cx| {
 615        all_open_workspaces(cx)
 616            .into_iter()
 617            .flat_map(|workspace| {
 618                workspace
 619                    .read(cx)
 620                    .project()
 621                    .read(cx)
 622                    .repositories(cx)
 623                    .values()
 624                    .cloned()
 625                    .collect::<Vec<_>>()
 626            })
 627            .find(|repo| {
 628                repo.read(cx).snapshot().work_directory_abs_path.as_ref()
 629                    == repo_path_owned.as_path()
 630            })
 631    });
 632
 633    if let Some(repo) = live_repo {
 634        return Ok((repo, None));
 635    }
 636
 637    let app_state =
 638        current_app_state(cx).context("no app state available for temporary project")?;
 639    let temp_project = cx.update(|cx| {
 640        Project::local(
 641            app_state.client.clone(),
 642            app_state.node_runtime.clone(),
 643            app_state.user_store.clone(),
 644            app_state.languages.clone(),
 645            app_state.fs.clone(),
 646            None,
 647            LocalProjectFlags::default(),
 648            cx,
 649        )
 650    });
 651
 652    let repo_path_for_worktree = repo_path.to_path_buf();
 653    let create_worktree = temp_project.update(cx, |project, cx| {
 654        project.create_worktree(repo_path_for_worktree, true, cx)
 655    });
 656    let _worktree = create_worktree.await?;
 657    let initial_scan = temp_project.read_with(cx, |project, cx| project.wait_for_initial_scan(cx));
 658    initial_scan.await;
 659
 660    let repo_path_for_find = repo_path.to_path_buf();
 661    let repo = temp_project
 662        .update(cx, |project, cx| {
 663            project
 664                .repositories(cx)
 665                .values()
 666                .find(|repo| {
 667                    repo.read(cx).snapshot().work_directory_abs_path.as_ref()
 668                        == repo_path_for_find.as_path()
 669                })
 670                .cloned()
 671        })
 672        .context("failed to resolve temporary repository handle")?;
 673
 674    let barrier = repo.update(cx, |repo: &mut Repository, _cx| repo.barrier());
 675    barrier
 676        .await
 677        .map_err(|_| anyhow!("temporary repository barrier canceled"))?;
 678    Ok((repo, Some(temp_project)))
 679}
 680
 681async fn rollback_root(root: &RootPlan, cx: &mut AsyncApp) {
 682    for affected in &root.affected_projects {
 683        let task = affected.project.update(cx, |project, cx| {
 684            project.create_worktree(root.root_path.clone(), true, cx)
 685        });
 686        let _ = task.await;
 687    }
 688}
 689
 690async fn persist_worktree_state(
 691    root: &RootPlan,
 692    plan: &CleanupPlan,
 693    cx: &mut AsyncApp,
 694) -> Result<PersistOutcome> {
 695    let worktree_repo = root
 696        .worktree_repo
 697        .clone()
 698        .context("no worktree repo entity for persistence")?;
 699
 700    // Read original HEAD SHA before creating any WIP commits
 701    let original_commit_hash = worktree_repo
 702        .update(cx, |repo, _cx| repo.head_sha())
 703        .await
 704        .map_err(|_| anyhow!("head_sha canceled"))?
 705        .context("failed to read original HEAD SHA")?
 706        .context("HEAD SHA is None before WIP commits")?;
 707
 708    // Create WIP commit #1 (staged state)
 709    let askpass = AskPassDelegate::new(cx, |_, _, _| {});
 710    let commit_rx = worktree_repo.update(cx, |repo, cx| {
 711        repo.commit(
 712            "WIP staged".into(),
 713            None,
 714            CommitOptions {
 715                allow_empty: true,
 716                ..Default::default()
 717            },
 718            askpass,
 719            cx,
 720        )
 721    });
 722    commit_rx
 723        .await
 724        .map_err(|_| anyhow!("WIP staged commit canceled"))??;
 725
 726    // Read SHA after staged commit
 727    let staged_sha_result = worktree_repo
 728        .update(cx, |repo, _cx| repo.head_sha())
 729        .await
 730        .map_err(|_| anyhow!("head_sha canceled"))
 731        .and_then(|r| r.context("failed to read HEAD SHA after staged commit"))
 732        .and_then(|opt| opt.context("HEAD SHA is None after staged commit"));
 733    let staged_commit_hash = match staged_sha_result {
 734        Ok(sha) => sha,
 735        Err(error) => {
 736            let rx = worktree_repo.update(cx, |repo, cx| {
 737                repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
 738            });
 739            let _ = rx.await;
 740            return Err(error);
 741        }
 742    };
 743
 744    // Stage all files including untracked
 745    let stage_rx = worktree_repo.update(cx, |repo, _cx| repo.stage_all_including_untracked());
 746    if let Err(error) = stage_rx
 747        .await
 748        .map_err(|_| anyhow!("stage all canceled"))
 749        .and_then(|inner| inner)
 750    {
 751        let rx = worktree_repo.update(cx, |repo, cx| {
 752            repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
 753        });
 754        let _ = rx.await;
 755        return Err(error.context("failed to stage all files including untracked"));
 756    }
 757
 758    // Create WIP commit #2 (unstaged/untracked state)
 759    let askpass = AskPassDelegate::new(cx, |_, _, _| {});
 760    let commit_rx = worktree_repo.update(cx, |repo, cx| {
 761        repo.commit(
 762            "WIP unstaged".into(),
 763            None,
 764            CommitOptions {
 765                allow_empty: true,
 766                ..Default::default()
 767            },
 768            askpass,
 769            cx,
 770        )
 771    });
 772    if let Err(error) = commit_rx
 773        .await
 774        .map_err(|_| anyhow!("WIP unstaged commit canceled"))
 775        .and_then(|inner| inner)
 776    {
 777        let rx = worktree_repo.update(cx, |repo, cx| {
 778            repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
 779        });
 780        let _ = rx.await;
 781        return Err(error);
 782    }
 783
 784    // Read HEAD SHA after WIP commits
 785    let head_sha_result = worktree_repo
 786        .update(cx, |repo, _cx| repo.head_sha())
 787        .await
 788        .map_err(|_| anyhow!("head_sha canceled"))
 789        .and_then(|r| r.context("failed to read HEAD SHA after WIP commits"))
 790        .and_then(|opt| opt.context("HEAD SHA is None after WIP commits"));
 791    let unstaged_commit_hash = match head_sha_result {
 792        Ok(sha) => sha,
 793        Err(error) => {
 794            let rx = worktree_repo.update(cx, |repo, cx| {
 795                repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
 796            });
 797            let _ = rx.await;
 798            return Err(error);
 799        }
 800    };
 801
 802    // Create DB record
 803    let store = cx.update(|cx| ThreadMetadataStore::global(cx));
 804    let worktree_path_str = root.root_path.to_string_lossy().to_string();
 805    let main_repo_path_str = root.main_repo_path.to_string_lossy().to_string();
 806    let branch_name = root.branch_name.clone();
 807
 808    let db_result = store
 809        .read_with(cx, |store, cx| {
 810            store.create_archived_worktree(
 811                worktree_path_str.clone(),
 812                main_repo_path_str.clone(),
 813                branch_name.clone(),
 814                staged_commit_hash.clone(),
 815                unstaged_commit_hash.clone(),
 816                original_commit_hash.clone(),
 817                cx,
 818            )
 819        })
 820        .await
 821        .context("failed to create archived worktree DB record");
 822    let archived_worktree_id = match db_result {
 823        Ok(id) => id,
 824        Err(error) => {
 825            let rx = worktree_repo.update(cx, |repo, cx| {
 826                repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
 827            });
 828            let _ = rx.await;
 829            return Err(error);
 830        }
 831    };
 832
 833    // Link all threads on this worktree to the archived record
 834    let session_ids: Vec<acp::SessionId> = store.read_with(cx, |store, _cx| {
 835        store
 836            .all_session_ids_for_path(&plan.folder_paths)
 837            .cloned()
 838            .collect()
 839    });
 840
 841    for session_id in &session_ids {
 842        let link_result = store
 843            .read_with(cx, |store, cx| {
 844                store.link_thread_to_archived_worktree(
 845                    session_id.0.to_string(),
 846                    archived_worktree_id,
 847                    cx,
 848                )
 849            })
 850            .await;
 851        if let Err(error) = link_result {
 852            if let Err(delete_error) = store
 853                .read_with(cx, |store, cx| {
 854                    store.delete_archived_worktree(archived_worktree_id, cx)
 855                })
 856                .await
 857            {
 858                log::error!(
 859                    "Failed to delete archived worktree DB record during link rollback: {delete_error:#}"
 860                );
 861            }
 862            let rx = worktree_repo.update(cx, |repo, cx| {
 863                repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
 864            });
 865            let _ = rx.await;
 866            return Err(error.context("failed to link thread to archived worktree"));
 867        }
 868    }
 869
 870    // Create git ref on main repo (non-fatal)
 871    let ref_name = archived_worktree_ref_name(archived_worktree_id);
 872    let main_repo_result = find_or_create_repository(&root.main_repo_path, cx).await;
 873    match main_repo_result {
 874        Ok((main_repo, _temp_project)) => {
 875            let rx = main_repo.update(cx, |repo, _cx| {
 876                repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone())
 877            });
 878            if let Err(error) = rx
 879                .await
 880                .map_err(|_| anyhow!("update_ref canceled"))
 881                .and_then(|r| r)
 882            {
 883                log::warn!(
 884                    "Failed to create ref {} on main repo (non-fatal): {error}",
 885                    ref_name
 886                );
 887            }
 888        }
 889        Err(error) => {
 890            log::warn!(
 891                "Could not find main repo to create ref {} (non-fatal): {error}",
 892                ref_name
 893            );
 894        }
 895    }
 896
 897    Ok(PersistOutcome {
 898        archived_worktree_id,
 899        staged_commit_hash,
 900    })
 901}
 902
 903async fn rollback_persist(outcome: &PersistOutcome, root: &RootPlan, cx: &mut AsyncApp) {
 904    // Undo WIP commits on the worktree repo
 905    if let Some(worktree_repo) = &root.worktree_repo {
 906        let rx = worktree_repo.update(cx, |repo, cx| {
 907            repo.reset(
 908                format!("{}~1", outcome.staged_commit_hash),
 909                ResetMode::Mixed,
 910                cx,
 911            )
 912        });
 913        let _ = rx.await;
 914    }
 915
 916    // Delete the git ref on main repo
 917    if let Ok((main_repo, _temp_project)) =
 918        find_or_create_repository(&root.main_repo_path, cx).await
 919    {
 920        let ref_name = archived_worktree_ref_name(outcome.archived_worktree_id);
 921        let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
 922        let _ = rx.await;
 923    }
 924
 925    // Delete the DB record
 926    let store = cx.update(|cx| ThreadMetadataStore::global(cx));
 927    if let Err(error) = store
 928        .read_with(cx, |store, cx| {
 929            store.delete_archived_worktree(outcome.archived_worktree_id, cx)
 930        })
 931        .await
 932    {
 933        log::error!("Failed to delete archived worktree DB record during rollback: {error:#}");
 934    }
 935}
 936
 937async fn cleanup_empty_workspaces(workspaces: &[Entity<Workspace>], cx: &mut AsyncApp) {
 938    for workspace in workspaces {
 939        let is_empty = match workspace
 940            .downgrade()
 941            .read_with(cx, |workspace, cx| workspace.root_paths(cx).is_empty())
 942        {
 943            Ok(is_empty) => is_empty,
 944            Err(_) => {
 945                log::debug!("Workspace entity already dropped during cleanup; skipping");
 946                continue;
 947            }
 948        };
 949        if !is_empty {
 950            continue;
 951        }
 952
 953        let Some(window) = window_for_workspace_async(workspace, cx) else {
 954            continue;
 955        };
 956
 957        let _ = window.update(cx, |multi_workspace, window, cx| {
 958            if !multi_workspace.remove(workspace, window, cx) {
 959                window.remove_window();
 960            }
 961        });
 962    }
 963}
 964
 965pub async fn restore_worktree_via_git(
 966    row: &ArchivedGitWorktree,
 967    cx: &mut AsyncApp,
 968) -> Result<PathBuf> {
 969    // Find the main repo entity and verify original_commit_hash exists
 970    let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?;
 971
 972    let commit_exists = main_repo
 973        .update(cx, |repo, _cx| {
 974            repo.resolve_commit(row.original_commit_hash.clone())
 975        })
 976        .await
 977        .map_err(|_| anyhow!("resolve_commit was canceled"))?
 978        .context("failed to check if original commit exists")?;
 979
 980    if !commit_exists {
 981        anyhow::bail!(
 982            "Original commit {} no longer exists in the repository — \
 983             cannot restore worktree. The git history this archive depends on may have been \
 984             rewritten or garbage-collected.",
 985            row.original_commit_hash
 986        );
 987    }
 988
 989    // Check if worktree path already exists on disk
 990    let worktree_path = &row.worktree_path;
 991    let app_state = current_app_state(cx).context("no app state available")?;
 992    let already_exists = app_state.fs.metadata(worktree_path).await?.is_some();
 993
 994    if already_exists {
 995        let is_git_worktree =
 996            resolve_git_worktree_to_main_repo(app_state.fs.as_ref(), worktree_path)
 997                .await
 998                .is_some();
 999
1000        if is_git_worktree {
1001            // Already a git worktree — another thread on the same worktree
1002            // already restored it. Reuse as-is.
1003            return Ok(worktree_path.clone());
1004        }
1005
1006        // Path exists but isn't a git worktree. Ask git to adopt it.
1007        let rx = main_repo.update(cx, |repo, _cx| repo.repair_worktrees());
1008        rx.await
1009            .map_err(|_| anyhow!("worktree repair was canceled"))?
1010            .context("failed to repair worktrees")?;
1011    } else {
1012        // Create detached worktree at the unstaged commit
1013        let rx = main_repo.update(cx, |repo, _cx| {
1014            repo.create_worktree_detached(worktree_path.clone(), row.unstaged_commit_hash.clone())
1015        });
1016        rx.await
1017            .map_err(|_| anyhow!("worktree creation was canceled"))?
1018            .context("failed to create worktree")?;
1019    }
1020
1021    // Get the worktree's repo entity
1022    let (wt_repo, _temp_wt_project) = find_or_create_repository(worktree_path, cx).await?;
1023
1024    // Reset past the WIP commits to recover original state
1025    let mixed_reset_ok = {
1026        let rx = wt_repo.update(cx, |repo, cx| {
1027            repo.reset(row.staged_commit_hash.clone(), ResetMode::Mixed, cx)
1028        });
1029        match rx.await {
1030            Ok(Ok(())) => true,
1031            Ok(Err(error)) => {
1032                log::error!("Mixed reset to staged commit failed: {error:#}");
1033                false
1034            }
1035            Err(_) => {
1036                log::error!("Mixed reset to staged commit was canceled");
1037                false
1038            }
1039        }
1040    };
1041
1042    let soft_reset_ok = if mixed_reset_ok {
1043        let rx = wt_repo.update(cx, |repo, cx| {
1044            repo.reset(row.original_commit_hash.clone(), ResetMode::Soft, cx)
1045        });
1046        match rx.await {
1047            Ok(Ok(())) => true,
1048            Ok(Err(error)) => {
1049                log::error!("Soft reset to original commit failed: {error:#}");
1050                false
1051            }
1052            Err(_) => {
1053                log::error!("Soft reset to original commit was canceled");
1054                false
1055            }
1056        }
1057    } else {
1058        false
1059    };
1060
1061    // If either WIP reset failed, fall back to a mixed reset directly to
1062    // original_commit_hash so we at least land on the right commit.
1063    if !mixed_reset_ok || !soft_reset_ok {
1064        log::warn!(
1065            "WIP reset(s) failed (mixed_ok={mixed_reset_ok}, soft_ok={soft_reset_ok}); \
1066             falling back to mixed reset to original commit {}",
1067            row.original_commit_hash
1068        );
1069        let rx = wt_repo.update(cx, |repo, cx| {
1070            repo.reset(row.original_commit_hash.clone(), ResetMode::Mixed, cx)
1071        });
1072        match rx.await {
1073            Ok(Ok(())) => {}
1074            Ok(Err(error)) => {
1075                return Err(error.context(format!(
1076                    "fallback reset to original commit {} also failed",
1077                    row.original_commit_hash
1078                )));
1079            }
1080            Err(_) => {
1081                return Err(anyhow!(
1082                    "fallback reset to original commit {} was canceled",
1083                    row.original_commit_hash
1084                ));
1085            }
1086        }
1087    }
1088
1089    // Verify HEAD is at original_commit_hash
1090    let current_head = wt_repo
1091        .update(cx, |repo, _cx| repo.head_sha())
1092        .await
1093        .map_err(|_| anyhow!("post-restore head_sha was canceled"))?
1094        .context("failed to read HEAD after restore")?
1095        .context("HEAD is None after restore")?;
1096
1097    if current_head != row.original_commit_hash {
1098        anyhow::bail!(
1099            "After restore, HEAD is at {current_head} but expected {}. \
1100             The worktree may be in an inconsistent state.",
1101            row.original_commit_hash
1102        );
1103    }
1104
1105    // Restore the branch
1106    if let Some(branch_name) = &row.branch_name {
1107        // Check if the branch exists and points at original_commit_hash.
1108        // If it does, switch to it. If not, create a new branch there.
1109        let rx = wt_repo.update(cx, |repo, _cx| repo.change_branch(branch_name.clone()));
1110        if matches!(rx.await, Ok(Ok(()))) {
1111            // Verify the branch actually points at original_commit_hash after switching
1112            let head_after_switch = wt_repo
1113                .update(cx, |repo, _cx| repo.head_sha())
1114                .await
1115                .ok()
1116                .and_then(|r| r.ok())
1117                .flatten();
1118
1119            if head_after_switch.as_deref() != Some(&row.original_commit_hash) {
1120                // Branch exists but doesn't point at the right commit.
1121                // Switch back to detached HEAD at original_commit_hash.
1122                log::warn!(
1123                    "Branch '{}' exists but points at {:?}, not {}. Creating fresh branch.",
1124                    branch_name,
1125                    head_after_switch,
1126                    row.original_commit_hash
1127                );
1128                let rx = wt_repo.update(cx, |repo, cx| {
1129                    repo.reset(row.original_commit_hash.clone(), ResetMode::Mixed, cx)
1130                });
1131                let _ = rx.await;
1132                // Delete the old branch and create fresh
1133                let rx = wt_repo.update(cx, |repo, _cx| {
1134                    repo.create_branch(branch_name.clone(), None)
1135                });
1136                let _ = rx.await;
1137            }
1138        } else {
1139            // Branch doesn't exist or can't be switched to — create it.
1140            let rx = wt_repo.update(cx, |repo, _cx| {
1141                repo.create_branch(branch_name.clone(), None)
1142            });
1143            if let Ok(Err(error)) | Err(error) = rx.await.map_err(|e| anyhow::anyhow!("{e}")) {
1144                log::warn!(
1145                    "Could not create branch '{}': {error} — \
1146                     restored worktree is in detached HEAD state.",
1147                    branch_name
1148                );
1149            }
1150        }
1151    }
1152
1153    Ok(worktree_path.clone())
1154}
1155
1156pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) {
1157    // Delete the git ref from the main repo
1158    if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await
1159    {
1160        let ref_name = archived_worktree_ref_name(row.id);
1161        let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
1162        match rx.await {
1163            Ok(Ok(())) => {}
1164            Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"),
1165            Err(_) => log::warn!("Archive ref deletion was canceled"),
1166        }
1167    }
1168
1169    // Delete the DB records
1170    let store = cx.update(|cx| ThreadMetadataStore::global(cx));
1171    store
1172        .read_with(cx, |store, cx| store.delete_archived_worktree(row.id, cx))
1173        .await
1174        .log_err();
1175}
1176
1177fn show_error_toast(summary: &str, detail: &str, plan: &CleanupPlan, cx: &mut AsyncApp) {
1178    let target_workspace = plan
1179        .current_workspace
1180        .clone()
1181        .or_else(|| plan.affected_workspaces.first().cloned());
1182    let Some(workspace) = target_workspace else {
1183        return;
1184    };
1185
1186    let _ = workspace.update(cx, |workspace, cx| {
1187        struct ArchiveCleanupErrorToast;
1188        let message = if detail.is_empty() {
1189            summary.to_string()
1190        } else {
1191            format!("{summary}: {detail}")
1192        };
1193        workspace.show_toast(
1194            Toast::new(
1195                NotificationId::unique::<ArchiveCleanupErrorToast>(),
1196                message,
1197            )
1198            .autohide(),
1199            cx,
1200        );
1201    });
1202}
1203
1204fn all_open_workspaces(cx: &App) -> Vec<Entity<Workspace>> {
1205    cx.windows()
1206        .into_iter()
1207        .filter_map(|window| window.downcast::<MultiWorkspace>())
1208        .flat_map(|multi_workspace| {
1209            multi_workspace
1210                .read(cx)
1211                .map(|multi_workspace| multi_workspace.workspaces().to_vec())
1212                .unwrap_or_default()
1213        })
1214        .collect()
1215}
1216
1217fn workspace_survives(
1218    workspace: &Entity<Workspace>,
1219    doomed_roots: &HashSet<PathBuf>,
1220    cx: &App,
1221) -> bool {
1222    workspace
1223        .read(cx)
1224        .root_paths(cx)
1225        .into_iter()
1226        .any(|root| !doomed_roots.contains(root.as_ref()))
1227}
1228
1229fn workspace_path_list(workspace: &Entity<Workspace>, cx: &App) -> PathList {
1230    PathList::new(&workspace.read(cx).root_paths(cx))
1231}
1232
1233fn window_for_workspace(
1234    workspace: &Entity<Workspace>,
1235    cx: &App,
1236) -> Option<WindowHandle<MultiWorkspace>> {
1237    cx.windows()
1238        .into_iter()
1239        .filter_map(|window| window.downcast::<MultiWorkspace>())
1240        .find(|window| {
1241            window
1242                .read(cx)
1243                .map(|multi_workspace| multi_workspace.workspaces().contains(workspace))
1244                .unwrap_or(false)
1245        })
1246}
1247
1248fn window_for_workspace_async(
1249    workspace: &Entity<Workspace>,
1250    cx: &mut AsyncApp,
1251) -> Option<WindowHandle<MultiWorkspace>> {
1252    let workspace = workspace.clone();
1253    cx.update(|cx| window_for_workspace(&workspace, cx))
1254}
1255
1256fn current_app_state(cx: &mut AsyncApp) -> Option<Arc<AppState>> {
1257    cx.update(|cx| {
1258        all_open_workspaces(cx)
1259            .into_iter()
1260            .next()
1261            .map(|workspace| workspace.read(cx).app_state().clone())
1262    })
1263}
1264
1265fn release_in_flight_roots(roots: &[RootPlan], cx: &mut AsyncApp) {
1266    cx.update_global::<ThreadArchiveCleanupCoordinator, _>(|coordinator, _cx| {
1267        let mut in_flight_roots = coordinator.in_flight_roots.lock();
1268        for root in roots {
1269            in_flight_roots.remove(&root.root_path);
1270        }
1271    });
1272}