1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use anyhow::{Context as _, Result, anyhow};
7use gpui::{App, AsyncApp, Entity, Task};
8use project::{
9 LocalProjectFlags, Project, WorktreeId,
10 git_store::{Repository, resolve_git_worktree_to_main_repo, worktrees_directory_for_repo},
11 project_settings::ProjectSettings,
12};
13use settings::Settings;
14use util::ResultExt;
15use workspace::{AppState, MultiWorkspace, Workspace};
16
17use crate::thread_metadata_store::{ArchivedGitWorktree, ThreadId, ThreadMetadataStore};
18
19/// The plan for archiving a single git worktree root.
20///
21/// A thread can have multiple folder paths open, so there may be multiple
22/// `RootPlan`s per archival operation. Each one captures everything needed to
23/// persist the worktree's git state and then remove it from disk.
24///
25/// All fields are gathered synchronously by [`build_root_plan`] while the
26/// worktree is still loaded in open projects. This is important because
27/// workspace removal tears down project and repository entities, making
28/// them unavailable for the later async persist/remove steps.
29#[derive(Clone)]
30pub struct RootPlan {
31 /// Absolute path of the git worktree on disk.
32 pub root_path: PathBuf,
33 /// Absolute path to the main git repository this worktree is linked to.
34 /// Used both for creating a git ref to prevent GC of WIP commits during
35 /// [`persist_worktree_state`], and for `git worktree remove` during
36 /// [`remove_root`].
37 pub main_repo_path: PathBuf,
38 /// Every open `Project` that has this worktree loaded, so they can all
39 /// call `remove_worktree` and release it during [`remove_root`].
40 /// Multiple projects can reference the same path when the user has the
41 /// worktree open in more than one workspace.
42 pub affected_projects: Vec<AffectedProject>,
43 /// The `Repository` entity for this linked worktree, used to run git
44 /// commands (create WIP commits, stage files, reset) during
45 /// [`persist_worktree_state`].
46 pub worktree_repo: Entity<Repository>,
47 /// The branch the worktree was on, so it can be restored later.
48 /// `None` if the worktree was in detached HEAD state.
49 pub branch_name: Option<String>,
50}
51
52/// A `Project` that references a worktree being archived, paired with the
53/// `WorktreeId` it uses for that worktree.
54///
55/// The same worktree path can appear in multiple open workspaces/projects
56/// (e.g. when the user has two windows open that both include the same
57/// linked worktree). Each one needs to call `remove_worktree` and wait for
58/// the release during [`remove_root`], otherwise the project would still
59/// hold a reference to the directory and `git worktree remove` would fail.
60#[derive(Clone)]
61pub struct AffectedProject {
62 pub project: Entity<Project>,
63 pub worktree_id: WorktreeId,
64}
65
66fn archived_worktree_ref_name(id: i64) -> String {
67 format!("refs/archived-worktrees/{}", id)
68}
69
70/// Builds a [`RootPlan`] for archiving the git worktree at `path`.
71///
72/// This is a synchronous planning step that must run *before* any workspace
73/// removal, because it needs live project and repository entities that are
74/// torn down when a workspace is removed. It does three things:
75///
76/// 1. Finds every `Project` across all open workspaces that has this
77/// worktree loaded (`affected_projects`).
78/// 2. Looks for a `Repository` entity whose snapshot identifies this path
79/// as a linked worktree (`worktree_repo`), which is needed for the git
80/// operations in [`persist_worktree_state`].
81/// 3. Determines the `main_repo_path` — the parent repo that owns this
82/// linked worktree — needed for both git ref creation and
83/// `git worktree remove`.
84///
85/// Returns `None` if the path is not a linked worktree (main worktrees
86/// cannot be archived to disk) or if no open project has it loaded.
87pub fn build_root_plan(
88 path: &Path,
89 workspaces: &[Entity<Workspace>],
90 cx: &App,
91) -> Option<RootPlan> {
92 let path = path.to_path_buf();
93
94 let affected_projects = workspaces
95 .iter()
96 .filter_map(|workspace| {
97 let project = workspace.read(cx).project().clone();
98 let worktree = project
99 .read(cx)
100 .visible_worktrees(cx)
101 .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())?;
102 let worktree_id = worktree.read(cx).id();
103 Some(AffectedProject {
104 project,
105 worktree_id,
106 })
107 })
108 .collect::<Vec<_>>();
109
110 if affected_projects.is_empty() {
111 return None;
112 }
113
114 let linked_repo = workspaces
115 .iter()
116 .flat_map(|workspace| {
117 workspace
118 .read(cx)
119 .project()
120 .read(cx)
121 .repositories(cx)
122 .values()
123 .cloned()
124 .collect::<Vec<_>>()
125 })
126 .find_map(|repo| {
127 let snapshot = repo.read(cx).snapshot();
128 (snapshot.is_linked_worktree()
129 && snapshot.work_directory_abs_path.as_ref() == path.as_path())
130 .then_some((snapshot, repo))
131 });
132
133 // Only linked worktrees can be archived to disk via `git worktree remove`.
134 // Main worktrees must be left alone — git refuses to remove them.
135 let (linked_snapshot, repo) = linked_repo?;
136 let main_repo_path = linked_snapshot.original_repo_abs_path.to_path_buf();
137 let branch_name = linked_snapshot
138 .branch
139 .as_ref()
140 .map(|branch| branch.name().to_string());
141 Some(RootPlan {
142 root_path: path,
143 main_repo_path,
144 affected_projects,
145 worktree_repo: repo,
146 branch_name,
147 })
148}
149
150/// Removes a worktree from all affected projects and deletes it from disk
151/// via `git worktree remove`.
152///
153/// This is the destructive counterpart to [`persist_worktree_state`]. It
154/// first detaches the worktree from every [`AffectedProject`], waits for
155/// each project to fully release it, then asks the main repository to
156/// delete the worktree directory. If the git removal fails, the worktree
157/// is re-added to each project via [`rollback_root`].
158pub async fn remove_root(root: RootPlan, cx: &mut AsyncApp) -> Result<()> {
159 let release_tasks: Vec<_> = root
160 .affected_projects
161 .iter()
162 .map(|affected| {
163 let project = affected.project.clone();
164 let worktree_id = affected.worktree_id;
165 project.update(cx, |project, cx| {
166 let wait = project.wait_for_worktree_release(worktree_id, cx);
167 project.remove_worktree(worktree_id, cx);
168 wait
169 })
170 })
171 .collect();
172
173 if let Err(error) = remove_root_after_worktree_removal(&root, release_tasks, cx).await {
174 rollback_root(&root, cx).await;
175 return Err(error);
176 }
177
178 Ok(())
179}
180
181async fn remove_root_after_worktree_removal(
182 root: &RootPlan,
183 release_tasks: Vec<Task<Result<()>>>,
184 cx: &mut AsyncApp,
185) -> Result<()> {
186 for task in release_tasks {
187 if let Err(error) = task.await {
188 log::error!("Failed waiting for worktree release: {error:#}");
189 }
190 }
191
192 // Delete the directory ourselves first, then tell git to clean up the
193 // metadata. This avoids a problem where `git worktree remove` can
194 // remove the metadata in `.git/worktrees/<name>` but fail to delete
195 // the directory (git continues past directory-removal errors), leaving
196 // an orphaned folder on disk. By deleting the directory first, we
197 // guarantee it's gone, and `git worktree remove --force` with a
198 // missing working tree just cleans up the admin entry.
199 let root_path = root.root_path.clone();
200 cx.background_executor()
201 .spawn(async move {
202 match std::fs::remove_dir_all(&root_path) {
203 Ok(()) => Ok(()),
204 Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(()),
205 Err(error) => Err(error),
206 }
207 })
208 .await
209 .with_context(|| {
210 format!(
211 "failed to delete worktree directory '{}'",
212 root.root_path.display()
213 )
214 })?;
215
216 let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?;
217 let receiver = repo.update(cx, |repo: &mut Repository, _cx| {
218 repo.remove_worktree(root.root_path.clone(), true)
219 });
220 let result = receiver
221 .await
222 .map_err(|_| anyhow!("git worktree metadata cleanup was canceled"))?;
223 // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
224 drop(_temp_project);
225 result.context("git worktree metadata cleanup failed")?;
226
227 remove_empty_parent_dirs_up_to_worktrees_base(
228 root.root_path.clone(),
229 root.main_repo_path.clone(),
230 cx,
231 )
232 .await;
233
234 Ok(())
235}
236
237/// After `git worktree remove` deletes the worktree directory, clean up any
238/// empty parent directories between it and the Zed-managed worktrees base
239/// directory (configured via `git.worktree_directory`). The base directory
240/// itself is never removed.
241///
242/// If the base directory is not an ancestor of `root_path`, no parent
243/// directories are removed.
244async fn remove_empty_parent_dirs_up_to_worktrees_base(
245 root_path: PathBuf,
246 main_repo_path: PathBuf,
247 cx: &mut AsyncApp,
248) {
249 let worktrees_base = cx.update(|cx| {
250 let setting = &ProjectSettings::get_global(cx).git.worktree_directory;
251 worktrees_directory_for_repo(&main_repo_path, setting).log_err()
252 });
253
254 if let Some(worktrees_base) = worktrees_base {
255 cx.background_executor()
256 .spawn(async move {
257 remove_empty_ancestors(&root_path, &worktrees_base);
258 })
259 .await;
260 }
261}
262
263/// Removes empty directories between `child_path` and `base_path`.
264///
265/// Walks upward from `child_path`, removing each empty parent directory,
266/// stopping before `base_path` itself is removed. If `base_path` is not
267/// an ancestor of `child_path`, nothing is removed. If any directory is
268/// non-empty (i.e. `std::fs::remove_dir` fails), the walk stops.
269fn remove_empty_ancestors(child_path: &Path, base_path: &Path) {
270 let mut current = child_path;
271 while let Some(parent) = current.parent() {
272 if parent == base_path {
273 break;
274 }
275 if !parent.starts_with(base_path) {
276 break;
277 }
278 match std::fs::remove_dir(parent) {
279 Ok(()) => {
280 log::info!("Removed empty parent directory: {}", parent.display());
281 }
282 Err(err) if err.kind() == std::io::ErrorKind::DirectoryNotEmpty => break,
283 Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
284 // Already removed by a concurrent process; keep walking upward.
285 }
286 Err(err) => {
287 log::error!(
288 "Failed to remove parent directory {}: {err}",
289 parent.display()
290 );
291 break;
292 }
293 }
294 current = parent;
295 }
296}
297
298/// Finds a live `Repository` entity for the given path, or creates a temporary
299/// `Project::local` to obtain one.
300///
301/// `Repository` entities can only be obtained through a `Project` because
302/// `GitStore` (which creates and manages `Repository` entities) is owned by
303/// `Project`. When no open workspace contains the repo we need, we spin up a
304/// headless `Project::local` just to get a `Repository` handle. The caller
305/// keeps the returned `Option<Entity<Project>>` alive for the duration of the
306/// git operations, then drops it.
307///
308/// Future improvement: decoupling `GitStore` from `Project` so that
309/// `Repository` entities can be created standalone would eliminate this
310/// temporary-project workaround.
311async fn find_or_create_repository(
312 repo_path: &Path,
313 cx: &mut AsyncApp,
314) -> Result<(Entity<Repository>, Option<Entity<Project>>)> {
315 let repo_path_owned = repo_path.to_path_buf();
316 let live_repo = cx.update(|cx| {
317 all_open_workspaces(cx)
318 .into_iter()
319 .flat_map(|workspace| {
320 workspace
321 .read(cx)
322 .project()
323 .read(cx)
324 .repositories(cx)
325 .values()
326 .cloned()
327 .collect::<Vec<_>>()
328 })
329 .find(|repo| {
330 repo.read(cx).snapshot().work_directory_abs_path.as_ref()
331 == repo_path_owned.as_path()
332 })
333 });
334
335 if let Some(repo) = live_repo {
336 return Ok((repo, None));
337 }
338
339 let app_state =
340 current_app_state(cx).context("no app state available for temporary project")?;
341 let temp_project = cx.update(|cx| {
342 Project::local(
343 app_state.client.clone(),
344 app_state.node_runtime.clone(),
345 app_state.user_store.clone(),
346 app_state.languages.clone(),
347 app_state.fs.clone(),
348 None,
349 LocalProjectFlags::default(),
350 cx,
351 )
352 });
353
354 let repo_path_for_worktree = repo_path.to_path_buf();
355 let create_worktree = temp_project.update(cx, |project, cx| {
356 project.create_worktree(repo_path_for_worktree, true, cx)
357 });
358 let _worktree = create_worktree.await?;
359 let initial_scan = temp_project.read_with(cx, |project, cx| project.wait_for_initial_scan(cx));
360 initial_scan.await;
361
362 let repo_path_for_find = repo_path.to_path_buf();
363 let repo = temp_project
364 .update(cx, |project, cx| {
365 project
366 .repositories(cx)
367 .values()
368 .find(|repo| {
369 repo.read(cx).snapshot().work_directory_abs_path.as_ref()
370 == repo_path_for_find.as_path()
371 })
372 .cloned()
373 })
374 .context("failed to resolve temporary repository handle")?;
375
376 let barrier = repo.update(cx, |repo: &mut Repository, _cx| repo.barrier());
377 barrier
378 .await
379 .map_err(|_| anyhow!("temporary repository barrier canceled"))?;
380 Ok((repo, Some(temp_project)))
381}
382
383/// Re-adds the worktree to every affected project after a failed
384/// [`remove_root`].
385async fn rollback_root(root: &RootPlan, cx: &mut AsyncApp) {
386 for affected in &root.affected_projects {
387 let task = affected.project.update(cx, |project, cx| {
388 project.create_worktree(root.root_path.clone(), true, cx)
389 });
390 task.await.log_err();
391 }
392}
393
394/// Saves the worktree's full git state so it can be restored later.
395///
396/// This creates two detached commits (via [`create_archive_checkpoint`] on
397/// the `GitRepository` trait) that capture the staged and unstaged state
398/// without moving any branch ref. The commits are:
399/// - "WIP staged": a tree matching the current index, parented on HEAD
400/// - "WIP unstaged": a tree with all files (including untracked),
401/// parented on the staged commit
402///
403/// After creating the commits, this function:
404/// 1. Records the commit SHAs, branch name, and paths in a DB record.
405/// 2. Links every thread referencing this worktree to that record.
406/// 3. Creates a git ref on the main repo to prevent GC of the commits.
407///
408/// On success, returns the archived worktree DB row ID for rollback.
409pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Result<i64> {
410 let worktree_repo = root.worktree_repo.clone();
411
412 let original_commit_hash = worktree_repo
413 .update(cx, |repo, _cx| repo.head_sha())
414 .await
415 .map_err(|_| anyhow!("head_sha canceled"))?
416 .context("failed to read original HEAD SHA")?
417 .context("HEAD SHA is None")?;
418
419 // Create two detached WIP commits without moving the branch.
420 let checkpoint_rx = worktree_repo.update(cx, |repo, _cx| repo.create_archive_checkpoint());
421 let (staged_commit_hash, unstaged_commit_hash) = checkpoint_rx
422 .await
423 .map_err(|_| anyhow!("create_archive_checkpoint canceled"))?
424 .context("failed to create archive checkpoint")?;
425
426 // Create DB record
427 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
428 let worktree_path_str = root.root_path.to_string_lossy().to_string();
429 let main_repo_path_str = root.main_repo_path.to_string_lossy().to_string();
430 let branch_name = root.branch_name.clone().or_else(|| {
431 worktree_repo.read_with(cx, |repo, _cx| {
432 repo.snapshot()
433 .branch
434 .as_ref()
435 .map(|branch| branch.name().to_string())
436 })
437 });
438
439 let db_result = store
440 .read_with(cx, |store, cx| {
441 store.create_archived_worktree(
442 worktree_path_str.clone(),
443 main_repo_path_str.clone(),
444 branch_name.clone(),
445 staged_commit_hash.clone(),
446 unstaged_commit_hash.clone(),
447 original_commit_hash.clone(),
448 cx,
449 )
450 })
451 .await
452 .context("failed to create archived worktree DB record");
453 let archived_worktree_id = match db_result {
454 Ok(id) => id,
455 Err(error) => {
456 return Err(error);
457 }
458 };
459
460 // Link all threads on this worktree to the archived record
461 let thread_ids: Vec<ThreadId> = store.read_with(cx, |store, _cx| {
462 store
463 .entries()
464 .filter(|thread| {
465 thread
466 .folder_paths()
467 .paths()
468 .iter()
469 .any(|p| p.as_path() == root.root_path)
470 })
471 .map(|thread| thread.thread_id)
472 .collect()
473 });
474
475 for thread_id in &thread_ids {
476 let link_result = store
477 .read_with(cx, |store, cx| {
478 store.link_thread_to_archived_worktree(*thread_id, archived_worktree_id, cx)
479 })
480 .await;
481 if let Err(error) = link_result {
482 if let Err(delete_error) = store
483 .read_with(cx, |store, cx| {
484 store.delete_archived_worktree(archived_worktree_id, cx)
485 })
486 .await
487 {
488 log::error!(
489 "Failed to delete archived worktree DB record during link rollback: \
490 {delete_error:#}"
491 );
492 }
493 return Err(error.context("failed to link thread to archived worktree"));
494 }
495 }
496
497 // Create git ref on main repo to prevent GC of the detached commits.
498 // This is fatal: without the ref, git gc will eventually collect the
499 // WIP commits and a later restore will silently fail.
500 let ref_name = archived_worktree_ref_name(archived_worktree_id);
501 let (main_repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx)
502 .await
503 .context("could not open main repo to create archive ref")?;
504 let rx = main_repo.update(cx, |repo, _cx| {
505 repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone())
506 });
507 rx.await
508 .map_err(|_| anyhow!("update_ref canceled"))
509 .and_then(|r| r)
510 .with_context(|| format!("failed to create ref {ref_name} on main repo"))?;
511 drop(_temp_project);
512
513 Ok(archived_worktree_id)
514}
515
516/// Undoes a successful [`persist_worktree_state`] by deleting the git ref
517/// on the main repo and removing the DB record. Since the WIP commits are
518/// detached (they don't move any branch), no git reset is needed — the
519/// commits will be garbage-collected once the ref is removed.
520pub async fn rollback_persist(archived_worktree_id: i64, root: &RootPlan, cx: &mut AsyncApp) {
521 // Delete the git ref on main repo
522 if let Ok((main_repo, _temp_project)) =
523 find_or_create_repository(&root.main_repo_path, cx).await
524 {
525 let ref_name = archived_worktree_ref_name(archived_worktree_id);
526 let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
527 rx.await.ok().and_then(|r| r.log_err());
528 drop(_temp_project);
529 }
530
531 // Delete the DB record
532 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
533 if let Err(error) = store
534 .read_with(cx, |store, cx| {
535 store.delete_archived_worktree(archived_worktree_id, cx)
536 })
537 .await
538 {
539 log::error!("Failed to delete archived worktree DB record during rollback: {error:#}");
540 }
541}
542
543/// Restores a previously archived worktree back to disk from its DB record.
544///
545/// Creates the git worktree at the original commit (the branch never moved
546/// during archival since WIP commits are detached), switches to the branch,
547/// then uses [`restore_archive_checkpoint`] to reconstruct the staged/
548/// unstaged state from the WIP commit trees.
549pub async fn restore_worktree_via_git(
550 row: &ArchivedGitWorktree,
551 cx: &mut AsyncApp,
552) -> Result<PathBuf> {
553 let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?;
554
555 let worktree_path = &row.worktree_path;
556 let app_state = current_app_state(cx).context("no app state available")?;
557 let already_exists = app_state.fs.metadata(worktree_path).await?.is_some();
558
559 let created_new_worktree = if already_exists {
560 let is_git_worktree =
561 resolve_git_worktree_to_main_repo(app_state.fs.as_ref(), worktree_path)
562 .await
563 .is_some();
564
565 if !is_git_worktree {
566 let rx = main_repo.update(cx, |repo, _cx| repo.repair_worktrees());
567 rx.await
568 .map_err(|_| anyhow!("worktree repair was canceled"))?
569 .context("failed to repair worktrees")?;
570 }
571 false
572 } else {
573 // Create worktree at the original commit — the branch still points
574 // here because archival used detached commits.
575 let rx = main_repo.update(cx, |repo, _cx| {
576 repo.create_worktree_detached(worktree_path.clone(), row.original_commit_hash.clone())
577 });
578 rx.await
579 .map_err(|_| anyhow!("worktree creation was canceled"))?
580 .context("failed to create worktree")?;
581 true
582 };
583
584 let (wt_repo, _temp_wt_project) = match find_or_create_repository(worktree_path, cx).await {
585 Ok(result) => result,
586 Err(error) => {
587 remove_new_worktree_on_error(created_new_worktree, &main_repo, worktree_path, cx).await;
588 return Err(error);
589 }
590 };
591
592 if let Some(branch_name) = &row.branch_name {
593 // Attempt to check out the branch the worktree was previously on.
594 let checkout_result = wt_repo
595 .update(cx, |repo, _cx| repo.change_branch(branch_name.clone()))
596 .await;
597
598 match checkout_result.map_err(|e| anyhow!("{e}")).flatten() {
599 Ok(()) => {
600 // Branch checkout succeeded. Check whether the branch has moved since
601 // we archived the worktree, by comparing HEAD to the expected SHA.
602 let head_sha = wt_repo
603 .update(cx, |repo, _cx| repo.head_sha())
604 .await
605 .map_err(|e| anyhow!("{e}"))
606 .and_then(|r| r);
607
608 match head_sha {
609 Ok(Some(sha)) if sha == row.original_commit_hash => {
610 // Branch still points at the original commit; we're all done!
611 }
612 Ok(Some(sha)) => {
613 // The branch has moved. We don't want to restore the worktree to
614 // a different filesystem state, so checkout the original commit
615 // in detached HEAD state.
616 log::info!(
617 "Branch '{branch_name}' has moved since archival (now at {sha}); \
618 restoring worktree in detached HEAD at {}",
619 row.original_commit_hash
620 );
621 let detach_result = main_repo
622 .update(cx, |repo, _cx| {
623 repo.checkout_branch_in_worktree(
624 row.original_commit_hash.clone(),
625 row.worktree_path.clone(),
626 false,
627 )
628 })
629 .await;
630
631 if let Err(error) = detach_result.map_err(|e| anyhow!("{e}")).flatten() {
632 log::warn!(
633 "Failed to detach HEAD at {}: {error:#}",
634 row.original_commit_hash
635 );
636 }
637 }
638 Ok(None) => {
639 log::warn!(
640 "head_sha unexpectedly returned None after checking out \"{branch_name}\"; \
641 proceeding in current HEAD state."
642 );
643 }
644 Err(error) => {
645 log::warn!(
646 "Failed to read HEAD after checking out \"{branch_name}\": {error:#}"
647 );
648 }
649 }
650 }
651 Err(checkout_error) => {
652 // We weren't able to check out the branch, most likely because it was deleted.
653 // This is fine; users will often delete old branches! We'll try to recreate it.
654 log::debug!(
655 "change_branch('{branch_name}') failed: {checkout_error:#}, trying create_branch"
656 );
657 let create_result = wt_repo
658 .update(cx, |repo, _cx| {
659 repo.create_branch(branch_name.clone(), None)
660 })
661 .await;
662
663 if let Err(error) = create_result.map_err(|e| anyhow!("{e}")).flatten() {
664 log::warn!(
665 "Failed to create branch '{branch_name}': {error:#}; \
666 restored worktree will be in detached HEAD state."
667 );
668 }
669 }
670 }
671 }
672
673 // Restore the staged/unstaged state from the WIP commit trees.
674 // read-tree --reset -u applies the unstaged tree (including deletions)
675 // to the working directory, then a bare read-tree sets the index to
676 // the staged tree without touching the working directory.
677 let restore_rx = wt_repo.update(cx, |repo, _cx| {
678 repo.restore_archive_checkpoint(
679 row.staged_commit_hash.clone(),
680 row.unstaged_commit_hash.clone(),
681 )
682 });
683 if let Err(error) = restore_rx
684 .await
685 .map_err(|_| anyhow!("restore_archive_checkpoint canceled"))
686 .and_then(|r| r)
687 {
688 remove_new_worktree_on_error(created_new_worktree, &main_repo, worktree_path, cx).await;
689 return Err(error.context("failed to restore archive checkpoint"));
690 }
691
692 Ok(worktree_path.clone())
693}
694
695async fn remove_new_worktree_on_error(
696 created_new_worktree: bool,
697 main_repo: &Entity<Repository>,
698 worktree_path: &PathBuf,
699 cx: &mut AsyncApp,
700) {
701 if created_new_worktree {
702 let rx = main_repo.update(cx, |repo, _cx| {
703 repo.remove_worktree(worktree_path.clone(), true)
704 });
705 rx.await.ok().and_then(|r| r.log_err());
706 }
707}
708
709/// Deletes the git ref and DB records for a single archived worktree.
710/// Used when an archived worktree is no longer referenced by any thread.
711pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) {
712 // Delete the git ref from the main repo
713 if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await
714 {
715 let ref_name = archived_worktree_ref_name(row.id);
716 let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
717 match rx.await {
718 Ok(Ok(())) => {}
719 Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"),
720 Err(_) => log::warn!("Archive ref deletion was canceled"),
721 }
722 // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
723 drop(_temp_project);
724 }
725
726 // Delete the DB records
727 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
728 store
729 .read_with(cx, |store, cx| store.delete_archived_worktree(row.id, cx))
730 .await
731 .log_err();
732}
733
734/// Cleans up all archived worktree data associated with a thread being deleted.
735///
736/// This unlinks the thread from all its archived worktrees and, for any
737/// archived worktree that is no longer referenced by any other thread,
738/// deletes the git ref and DB records.
739pub async fn cleanup_thread_archived_worktrees(thread_id: ThreadId, cx: &mut AsyncApp) {
740 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
741
742 let archived_worktrees = store
743 .read_with(cx, |store, cx| {
744 store.get_archived_worktrees_for_thread(thread_id, cx)
745 })
746 .await;
747 let archived_worktrees = match archived_worktrees {
748 Ok(rows) => rows,
749 Err(error) => {
750 log::error!("Failed to fetch archived worktrees for thread {thread_id:?}: {error:#}");
751 return;
752 }
753 };
754
755 if archived_worktrees.is_empty() {
756 return;
757 }
758
759 if let Err(error) = store
760 .read_with(cx, |store, cx| {
761 store.unlink_thread_from_all_archived_worktrees(thread_id, cx)
762 })
763 .await
764 {
765 log::error!("Failed to unlink thread {thread_id:?} from archived worktrees: {error:#}");
766 return;
767 }
768
769 for row in &archived_worktrees {
770 let still_referenced = store
771 .read_with(cx, |store, cx| {
772 store.is_archived_worktree_referenced(row.id, cx)
773 })
774 .await;
775 match still_referenced {
776 Ok(true) => {}
777 Ok(false) => {
778 cleanup_archived_worktree_record(row, cx).await;
779 }
780 Err(error) => {
781 log::error!(
782 "Failed to check if archived worktree {} is still referenced: {error:#}",
783 row.id
784 );
785 }
786 }
787 }
788}
789
790/// Collects every `Workspace` entity across all open `MultiWorkspace` windows.
791pub fn all_open_workspaces(cx: &App) -> Vec<Entity<Workspace>> {
792 cx.windows()
793 .into_iter()
794 .filter_map(|window| window.downcast::<MultiWorkspace>())
795 .flat_map(|multi_workspace| {
796 multi_workspace
797 .read(cx)
798 .map(|multi_workspace| multi_workspace.workspaces().cloned().collect::<Vec<_>>())
799 .unwrap_or_default()
800 })
801 .collect()
802}
803
804fn current_app_state(cx: &mut AsyncApp) -> Option<Arc<AppState>> {
805 cx.update(|cx| {
806 all_open_workspaces(cx)
807 .into_iter()
808 .next()
809 .map(|workspace| workspace.read(cx).app_state().clone())
810 })
811}
812#[cfg(test)]
813mod tests {
814 use super::*;
815 use fs::{FakeFs, Fs as _};
816 use git::repository::Worktree as GitWorktree;
817 use gpui::TestAppContext;
818 use project::Project;
819 use serde_json::json;
820 use settings::SettingsStore;
821 use tempfile::TempDir;
822 use workspace::MultiWorkspace;
823
824 fn init_test(cx: &mut TestAppContext) {
825 cx.update(|cx| {
826 let settings_store = SettingsStore::test(cx);
827 cx.set_global(settings_store);
828 theme_settings::init(theme::LoadThemes::JustBase, cx);
829 editor::init(cx);
830 release_channel::init(semver::Version::new(0, 0, 0), cx);
831 });
832 }
833
834 #[test]
835 fn test_remove_empty_ancestors_single_empty_parent() {
836 let tmp = TempDir::new().unwrap();
837 let base = tmp.path().join("worktrees");
838 let branch_dir = base.join("my-branch");
839 let child = branch_dir.join("zed");
840
841 std::fs::create_dir_all(&child).unwrap();
842 // Simulate git worktree remove having deleted the child.
843 std::fs::remove_dir(&child).unwrap();
844
845 assert!(branch_dir.exists());
846 remove_empty_ancestors(&child, &base);
847 assert!(!branch_dir.exists(), "empty parent should be removed");
848 assert!(base.exists(), "base directory should be preserved");
849 }
850
851 #[test]
852 fn test_remove_empty_ancestors_nested_empty_parents() {
853 let tmp = TempDir::new().unwrap();
854 let base = tmp.path().join("worktrees");
855 // Branch name with slash creates nested dirs: fix/thing/zed
856 let child = base.join("fix").join("thing").join("zed");
857
858 std::fs::create_dir_all(&child).unwrap();
859 std::fs::remove_dir(&child).unwrap();
860
861 assert!(base.join("fix").join("thing").exists());
862 remove_empty_ancestors(&child, &base);
863 assert!(!base.join("fix").join("thing").exists());
864 assert!(
865 !base.join("fix").exists(),
866 "all empty ancestors should be removed"
867 );
868 assert!(base.exists(), "base directory should be preserved");
869 }
870
871 #[test]
872 fn test_remove_empty_ancestors_stops_at_non_empty_parent() {
873 let tmp = TempDir::new().unwrap();
874 let base = tmp.path().join("worktrees");
875 let branch_dir = base.join("my-branch");
876 let child = branch_dir.join("zed");
877 let sibling = branch_dir.join("other-file.txt");
878
879 std::fs::create_dir_all(&child).unwrap();
880 std::fs::write(&sibling, "content").unwrap();
881 std::fs::remove_dir(&child).unwrap();
882
883 remove_empty_ancestors(&child, &base);
884 assert!(branch_dir.exists(), "non-empty parent should be preserved");
885 assert!(sibling.exists());
886 }
887
888 #[test]
889 fn test_remove_empty_ancestors_not_an_ancestor() {
890 let tmp = TempDir::new().unwrap();
891 let base = tmp.path().join("worktrees");
892 let unrelated = tmp.path().join("other-place").join("branch").join("zed");
893
894 std::fs::create_dir_all(&base).unwrap();
895 std::fs::create_dir_all(&unrelated).unwrap();
896 std::fs::remove_dir(&unrelated).unwrap();
897
898 let parent = unrelated.parent().unwrap();
899 assert!(parent.exists());
900 remove_empty_ancestors(&unrelated, &base);
901 assert!(parent.exists(), "should not remove dirs outside base");
902 }
903
904 #[test]
905 fn test_remove_empty_ancestors_child_is_direct_child_of_base() {
906 let tmp = TempDir::new().unwrap();
907 let base = tmp.path().join("worktrees");
908 let child = base.join("zed");
909
910 std::fs::create_dir_all(&child).unwrap();
911 std::fs::remove_dir(&child).unwrap();
912
913 remove_empty_ancestors(&child, &base);
914 assert!(base.exists(), "base directory should be preserved");
915 }
916
917 #[test]
918 fn test_remove_empty_ancestors_partially_non_empty_chain() {
919 let tmp = TempDir::new().unwrap();
920 let base = tmp.path().join("worktrees");
921 // Structure: base/a/b/c/zed where a/ has another child besides b/
922 let child = base.join("a").join("b").join("c").join("zed");
923 let other_in_a = base.join("a").join("other-branch");
924
925 std::fs::create_dir_all(&child).unwrap();
926 std::fs::create_dir_all(&other_in_a).unwrap();
927 std::fs::remove_dir(&child).unwrap();
928
929 remove_empty_ancestors(&child, &base);
930 assert!(
931 !base.join("a").join("b").join("c").exists(),
932 "c/ should be removed (empty)"
933 );
934 assert!(
935 !base.join("a").join("b").exists(),
936 "b/ should be removed (empty)"
937 );
938 assert!(
939 base.join("a").exists(),
940 "a/ should be preserved (has other-branch sibling)"
941 );
942 assert!(other_in_a.exists());
943 }
944
945 #[gpui::test]
946 async fn test_build_root_plan_returns_none_for_main_worktree(cx: &mut TestAppContext) {
947 init_test(cx);
948
949 let fs = FakeFs::new(cx.executor());
950 fs.insert_tree(
951 "/project",
952 json!({
953 ".git": {},
954 "src": { "main.rs": "fn main() {}" }
955 }),
956 )
957 .await;
958 fs.set_branch_name(Path::new("/project/.git"), Some("main"));
959
960 let project = Project::test(fs.clone(), [Path::new("/project")], cx).await;
961
962 let multi_workspace =
963 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
964 let workspace = multi_workspace
965 .read_with(cx, |mw, _cx| mw.workspace().clone())
966 .unwrap();
967
968 cx.run_until_parked();
969
970 // The main worktree should NOT produce a root plan.
971 workspace.read_with(cx, |_workspace, cx| {
972 let plan = build_root_plan(Path::new("/project"), std::slice::from_ref(&workspace), cx);
973 assert!(
974 plan.is_none(),
975 "build_root_plan should return None for a main worktree",
976 );
977 });
978 }
979
980 #[gpui::test]
981 async fn test_build_root_plan_returns_some_for_linked_worktree(cx: &mut TestAppContext) {
982 init_test(cx);
983
984 let fs = FakeFs::new(cx.executor());
985 fs.insert_tree(
986 "/project",
987 json!({
988 ".git": {},
989 "src": { "main.rs": "fn main() {}" }
990 }),
991 )
992 .await;
993 fs.set_branch_name(Path::new("/project/.git"), Some("main"));
994 fs.insert_branches(Path::new("/project/.git"), &["main", "feature"]);
995
996 fs.add_linked_worktree_for_repo(
997 Path::new("/project/.git"),
998 true,
999 GitWorktree {
1000 path: PathBuf::from("/linked-worktree"),
1001 ref_name: Some("refs/heads/feature".into()),
1002 sha: "abc123".into(),
1003 is_main: false,
1004 is_bare: false,
1005 },
1006 )
1007 .await;
1008
1009 let project = Project::test(
1010 fs.clone(),
1011 [Path::new("/project"), Path::new("/linked-worktree")],
1012 cx,
1013 )
1014 .await;
1015 project
1016 .update(cx, |project, cx| project.git_scans_complete(cx))
1017 .await;
1018
1019 let multi_workspace =
1020 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1021 let workspace = multi_workspace
1022 .read_with(cx, |mw, _cx| mw.workspace().clone())
1023 .unwrap();
1024
1025 cx.run_until_parked();
1026
1027 workspace.read_with(cx, |_workspace, cx| {
1028 // The linked worktree SHOULD produce a root plan.
1029 let plan = build_root_plan(
1030 Path::new("/linked-worktree"),
1031 std::slice::from_ref(&workspace),
1032 cx,
1033 );
1034 assert!(
1035 plan.is_some(),
1036 "build_root_plan should return Some for a linked worktree",
1037 );
1038 let plan = plan.unwrap();
1039 assert_eq!(plan.root_path, PathBuf::from("/linked-worktree"));
1040 assert_eq!(plan.main_repo_path, PathBuf::from("/project"));
1041
1042 // The main worktree should still return None.
1043 let main_plan =
1044 build_root_plan(Path::new("/project"), std::slice::from_ref(&workspace), cx);
1045 assert!(
1046 main_plan.is_none(),
1047 "build_root_plan should return None for the main worktree \
1048 even when a linked worktree exists",
1049 );
1050 });
1051 }
1052
1053 #[gpui::test]
1054 async fn test_remove_root_deletes_directory_and_git_metadata(cx: &mut TestAppContext) {
1055 init_test(cx);
1056
1057 let fs = FakeFs::new(cx.executor());
1058 fs.insert_tree(
1059 "/project",
1060 json!({
1061 ".git": {},
1062 "src": { "main.rs": "fn main() {}" }
1063 }),
1064 )
1065 .await;
1066 fs.set_branch_name(Path::new("/project/.git"), Some("main"));
1067 fs.insert_branches(Path::new("/project/.git"), &["main", "feature"]);
1068
1069 fs.add_linked_worktree_for_repo(
1070 Path::new("/project/.git"),
1071 true,
1072 GitWorktree {
1073 path: PathBuf::from("/linked-worktree"),
1074 ref_name: Some("refs/heads/feature".into()),
1075 sha: "abc123".into(),
1076 is_main: false,
1077 is_bare: false,
1078 },
1079 )
1080 .await;
1081
1082 let project = Project::test(
1083 fs.clone(),
1084 [Path::new("/project"), Path::new("/linked-worktree")],
1085 cx,
1086 )
1087 .await;
1088 project
1089 .update(cx, |project, cx| project.git_scans_complete(cx))
1090 .await;
1091
1092 let multi_workspace =
1093 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1094 let workspace = multi_workspace
1095 .read_with(cx, |mw, _cx| mw.workspace().clone())
1096 .unwrap();
1097
1098 cx.run_until_parked();
1099
1100 // Build the root plan while the worktree is still loaded.
1101 let root = workspace
1102 .read_with(cx, |_workspace, cx| {
1103 build_root_plan(
1104 Path::new("/linked-worktree"),
1105 std::slice::from_ref(&workspace),
1106 cx,
1107 )
1108 })
1109 .expect("should produce a root plan for the linked worktree");
1110
1111 assert!(fs.is_dir(Path::new("/linked-worktree")).await);
1112
1113 // Remove the root.
1114 let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, cx).await));
1115 task.await.expect("remove_root should succeed");
1116
1117 cx.run_until_parked();
1118
1119 // The FakeFs directory should be gone (removed by the FakeGitRepository
1120 // backend's remove_worktree implementation).
1121 assert!(
1122 !fs.is_dir(Path::new("/linked-worktree")).await,
1123 "linked worktree directory should be removed from FakeFs"
1124 );
1125 }
1126
1127 #[gpui::test]
1128 async fn test_remove_root_succeeds_when_directory_already_gone(cx: &mut TestAppContext) {
1129 init_test(cx);
1130
1131 let fs = FakeFs::new(cx.executor());
1132 fs.insert_tree(
1133 "/project",
1134 json!({
1135 ".git": {},
1136 "src": { "main.rs": "fn main() {}" }
1137 }),
1138 )
1139 .await;
1140 fs.set_branch_name(Path::new("/project/.git"), Some("main"));
1141 fs.insert_branches(Path::new("/project/.git"), &["main", "feature"]);
1142
1143 fs.add_linked_worktree_for_repo(
1144 Path::new("/project/.git"),
1145 true,
1146 GitWorktree {
1147 path: PathBuf::from("/linked-worktree"),
1148 ref_name: Some("refs/heads/feature".into()),
1149 sha: "abc123".into(),
1150 is_main: false,
1151 is_bare: false,
1152 },
1153 )
1154 .await;
1155
1156 let project = Project::test(
1157 fs.clone(),
1158 [Path::new("/project"), Path::new("/linked-worktree")],
1159 cx,
1160 )
1161 .await;
1162 project
1163 .update(cx, |project, cx| project.git_scans_complete(cx))
1164 .await;
1165
1166 let multi_workspace =
1167 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1168 let workspace = multi_workspace
1169 .read_with(cx, |mw, _cx| mw.workspace().clone())
1170 .unwrap();
1171
1172 cx.run_until_parked();
1173
1174 let root = workspace
1175 .read_with(cx, |_workspace, cx| {
1176 build_root_plan(
1177 Path::new("/linked-worktree"),
1178 std::slice::from_ref(&workspace),
1179 cx,
1180 )
1181 })
1182 .expect("should produce a root plan for the linked worktree");
1183
1184 // Manually remove the worktree directory from FakeFs before calling
1185 // remove_root, simulating the directory being deleted externally.
1186 fs.as_ref()
1187 .remove_dir(
1188 Path::new("/linked-worktree"),
1189 fs::RemoveOptions {
1190 recursive: true,
1191 ignore_if_not_exists: false,
1192 },
1193 )
1194 .await
1195 .unwrap();
1196 assert!(!fs.as_ref().is_dir(Path::new("/linked-worktree")).await);
1197
1198 // remove_root should still succeed — the std::fs::remove_dir_all
1199 // handles NotFound, and git worktree remove handles a missing
1200 // working tree directory.
1201 let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, cx).await));
1202 task.await
1203 .expect("remove_root should succeed even when directory is already gone");
1204 }
1205
1206 #[test]
1207 fn test_remove_dir_all_deletes_real_directory() {
1208 let tmp = TempDir::new().unwrap();
1209 let worktree_dir = tmp.path().join("linked-worktree");
1210 std::fs::create_dir_all(worktree_dir.join("src")).unwrap();
1211 std::fs::write(worktree_dir.join("src/main.rs"), "fn main() {}").unwrap();
1212 std::fs::write(worktree_dir.join("README.md"), "# Hello").unwrap();
1213
1214 assert!(worktree_dir.is_dir());
1215
1216 // This is the same pattern used in remove_root_after_worktree_removal.
1217 match std::fs::remove_dir_all(&worktree_dir) {
1218 Ok(()) => {}
1219 Err(error) if error.kind() == std::io::ErrorKind::NotFound => {}
1220 Err(error) => panic!("unexpected error: {error}"),
1221 }
1222
1223 assert!(
1224 !worktree_dir.exists(),
1225 "worktree directory should be deleted"
1226 );
1227 }
1228
1229 #[test]
1230 fn test_remove_dir_all_handles_not_found() {
1231 let tmp = TempDir::new().unwrap();
1232 let nonexistent = tmp.path().join("does-not-exist");
1233
1234 assert!(!nonexistent.exists());
1235
1236 // Should not panic — NotFound is handled gracefully.
1237 match std::fs::remove_dir_all(&nonexistent) {
1238 Ok(()) => panic!("expected NotFound error"),
1239 Err(error) if error.kind() == std::io::ErrorKind::NotFound => {}
1240 Err(error) => panic!("unexpected error: {error}"),
1241 }
1242 }
1243}