1use std::{
2 path::{Path, PathBuf},
3 sync::Arc,
4};
5
6use agent_client_protocol as acp;
7use anyhow::{Context as _, Result, anyhow};
8use git::repository::{AskPassDelegate, CommitOptions, ResetMode};
9use gpui::{App, AsyncApp, Entity, Task};
10use project::{
11 LocalProjectFlags, Project, WorktreeId,
12 git_store::{Repository, resolve_git_worktree_to_main_repo},
13};
14use util::ResultExt;
15use workspace::{AppState, MultiWorkspace, Workspace};
16
17use crate::thread_metadata_store::{ArchivedGitWorktree, ThreadMetadataStore};
18
19/// The plan for archiving a single git worktree root.
20///
21/// A thread can have multiple folder paths open, so there may be multiple
22/// `RootPlan`s per archival operation. Each one captures everything needed to
23/// persist the worktree's git state and then remove it from disk.
24///
25/// All fields are gathered synchronously by [`build_root_plan`] while the
26/// worktree is still loaded in open projects. This is important because
27/// workspace removal tears down project and repository entities, making
28/// them unavailable for the later async persist/remove steps.
29#[derive(Clone)]
30pub struct RootPlan {
31 /// Absolute path of the git worktree on disk.
32 pub root_path: PathBuf,
33 /// Absolute path to the main git repository this worktree is linked to.
34 /// Used both for creating a git ref to prevent GC of WIP commits during
35 /// [`persist_worktree_state`], and for `git worktree remove` during
36 /// [`remove_root`].
37 pub main_repo_path: PathBuf,
38 /// Every open `Project` that has this worktree loaded, so they can all
39 /// call `remove_worktree` and release it during [`remove_root`].
40 /// Multiple projects can reference the same path when the user has the
41 /// worktree open in more than one workspace.
42 pub affected_projects: Vec<AffectedProject>,
43 /// The `Repository` entity for this worktree, used to run git commands
44 /// (create WIP commits, stage files, reset) during
45 /// [`persist_worktree_state`]. `None` when the `GitStore` hasn't created
46 /// a `Repository` for this worktree yet — in that case,
47 /// `persist_worktree_state` falls back to creating a temporary headless
48 /// project to obtain one.
49 pub worktree_repo: Option<Entity<Repository>>,
50 /// The branch the worktree was on, so it can be restored later.
51 /// `None` if the worktree was in detached HEAD state or if no
52 /// `Repository` entity was available at planning time (in which case
53 /// `persist_worktree_state` reads it from the repo snapshot instead).
54 pub branch_name: Option<String>,
55}
56
57/// A `Project` that references a worktree being archived, paired with the
58/// `WorktreeId` it uses for that worktree.
59///
60/// The same worktree path can appear in multiple open workspaces/projects
61/// (e.g. when the user has two windows open that both include the same
62/// linked worktree). Each one needs to call `remove_worktree` and wait for
63/// the release during [`remove_root`], otherwise the project would still
64/// hold a reference to the directory and `git worktree remove` would fail.
65#[derive(Clone)]
66pub struct AffectedProject {
67 pub project: Entity<Project>,
68 pub worktree_id: WorktreeId,
69}
70
71fn archived_worktree_ref_name(id: i64) -> String {
72 format!("refs/archived-worktrees/{}", id)
73}
74
75/// The result of a successful [`persist_worktree_state`] call.
76///
77/// Carries exactly the information needed to roll back the persist via
78/// [`rollback_persist`]: the DB row ID (to delete the record and the
79/// corresponding `refs/archived-worktrees/<id>` git ref) and the staged
80/// commit hash (to `git reset` back past both WIP commits).
81pub struct PersistOutcome {
82 pub archived_worktree_id: i64,
83 pub staged_commit_hash: String,
84}
85
86/// Builds a [`RootPlan`] for archiving the git worktree at `path`.
87///
88/// This is a synchronous planning step that must run *before* any workspace
89/// removal, because it needs live project and repository entities that are
90/// torn down when a workspace is removed. It does three things:
91///
92/// 1. Finds every `Project` across all open workspaces that has this
93/// worktree loaded (`affected_projects`).
94/// 2. Looks for a `Repository` entity whose snapshot identifies this path
95/// as a linked worktree (`worktree_repo`), which is needed for the git
96/// operations in [`persist_worktree_state`].
97/// 3. Determines the `main_repo_path` — the parent repo that owns this
98/// linked worktree — needed for both git ref creation and
99/// `git worktree remove`.
100///
101/// When no `Repository` entity is available (e.g. the `GitStore` hasn't
102/// finished scanning), the function falls back to deriving `main_repo_path`
103/// from the worktree snapshot's `root_repo_common_dir`. In that case
104/// `worktree_repo` is `None` and [`persist_worktree_state`] will create a
105/// temporary headless project to obtain one.
106///
107/// Returns `None` if no open project has this path as a visible worktree.
108pub fn build_root_plan(
109 path: &Path,
110 workspaces: &[Entity<Workspace>],
111 cx: &App,
112) -> Option<RootPlan> {
113 let path = path.to_path_buf();
114
115 let affected_projects = workspaces
116 .iter()
117 .filter_map(|workspace| {
118 let project = workspace.read(cx).project().clone();
119 let worktree = project
120 .read(cx)
121 .visible_worktrees(cx)
122 .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())?;
123 let worktree_id = worktree.read(cx).id();
124 Some(AffectedProject {
125 project,
126 worktree_id,
127 })
128 })
129 .collect::<Vec<_>>();
130
131 if affected_projects.is_empty() {
132 return None;
133 }
134
135 let linked_repo = workspaces
136 .iter()
137 .flat_map(|workspace| {
138 workspace
139 .read(cx)
140 .project()
141 .read(cx)
142 .repositories(cx)
143 .values()
144 .cloned()
145 .collect::<Vec<_>>()
146 })
147 .find_map(|repo| {
148 let snapshot = repo.read(cx).snapshot();
149 (snapshot.is_linked_worktree()
150 && snapshot.work_directory_abs_path.as_ref() == path.as_path())
151 .then_some((snapshot, repo))
152 });
153
154 let matching_worktree_snapshot = workspaces.iter().find_map(|workspace| {
155 workspace
156 .read(cx)
157 .project()
158 .read(cx)
159 .visible_worktrees(cx)
160 .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())
161 .map(|worktree| worktree.read(cx).snapshot())
162 });
163
164 let (main_repo_path, worktree_repo, branch_name) =
165 if let Some((linked_snapshot, repo)) = linked_repo {
166 (
167 linked_snapshot.original_repo_abs_path.to_path_buf(),
168 Some(repo),
169 linked_snapshot
170 .branch
171 .as_ref()
172 .map(|branch| branch.name().to_string()),
173 )
174 } else {
175 let main_repo_path = matching_worktree_snapshot
176 .as_ref()?
177 .root_repo_common_dir()
178 .and_then(|dir| dir.parent())?
179 .to_path_buf();
180 (main_repo_path, None, None)
181 };
182
183 Some(RootPlan {
184 root_path: path,
185 main_repo_path,
186 affected_projects,
187 worktree_repo,
188 branch_name,
189 })
190}
191
192/// Returns `true` if any unarchived thread other than `current_session_id`
193/// references `path` in its folder paths. Used to determine whether a
194/// worktree can safely be removed from disk.
195pub fn path_is_referenced_by_other_unarchived_threads(
196 current_session_id: &acp::SessionId,
197 path: &Path,
198 cx: &App,
199) -> bool {
200 ThreadMetadataStore::global(cx)
201 .read(cx)
202 .entries()
203 .filter(|thread| thread.session_id != *current_session_id)
204 .filter(|thread| !thread.archived)
205 .any(|thread| {
206 thread
207 .folder_paths
208 .paths()
209 .iter()
210 .any(|other_path| other_path.as_path() == path)
211 })
212}
213
214/// Removes a worktree from all affected projects and deletes it from disk
215/// via `git worktree remove`.
216///
217/// This is the destructive counterpart to [`persist_worktree_state`]. It
218/// first detaches the worktree from every [`AffectedProject`], waits for
219/// each project to fully release it, then asks the main repository to
220/// delete the worktree directory. If the git removal fails, the worktree
221/// is re-added to each project via [`rollback_root`].
222pub async fn remove_root(root: RootPlan, cx: &mut AsyncApp) -> Result<()> {
223 let release_tasks: Vec<_> = root
224 .affected_projects
225 .iter()
226 .map(|affected| {
227 let project = affected.project.clone();
228 let worktree_id = affected.worktree_id;
229 project.update(cx, |project, cx| {
230 let wait = project.wait_for_worktree_release(worktree_id, cx);
231 project.remove_worktree(worktree_id, cx);
232 wait
233 })
234 })
235 .collect();
236
237 if let Err(error) = remove_root_after_worktree_removal(&root, release_tasks, cx).await {
238 rollback_root(&root, cx).await;
239 return Err(error);
240 }
241
242 Ok(())
243}
244
245async fn remove_root_after_worktree_removal(
246 root: &RootPlan,
247 release_tasks: Vec<Task<Result<()>>>,
248 cx: &mut AsyncApp,
249) -> Result<()> {
250 for task in release_tasks {
251 if let Err(error) = task.await {
252 log::error!("Failed waiting for worktree release: {error:#}");
253 }
254 }
255
256 let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?;
257 let receiver = repo.update(cx, |repo: &mut Repository, _cx| {
258 repo.remove_worktree(root.root_path.clone(), false)
259 });
260 let result = receiver
261 .await
262 .map_err(|_| anyhow!("git worktree removal was canceled"))?;
263 // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
264 drop(_temp_project);
265 result
266}
267
268/// Finds a live `Repository` entity for the given path, or creates a temporary
269/// `Project::local` to obtain one.
270///
271/// `Repository` entities can only be obtained through a `Project` because
272/// `GitStore` (which creates and manages `Repository` entities) is owned by
273/// `Project`. When no open workspace contains the repo we need, we spin up a
274/// headless `Project::local` just to get a `Repository` handle. The caller
275/// keeps the returned `Option<Entity<Project>>` alive for the duration of the
276/// git operations, then drops it.
277///
278/// Future improvement: decoupling `GitStore` from `Project` so that
279/// `Repository` entities can be created standalone would eliminate this
280/// temporary-project workaround.
281async fn find_or_create_repository(
282 repo_path: &Path,
283 cx: &mut AsyncApp,
284) -> Result<(Entity<Repository>, Option<Entity<Project>>)> {
285 let repo_path_owned = repo_path.to_path_buf();
286 let live_repo = cx.update(|cx| {
287 all_open_workspaces(cx)
288 .into_iter()
289 .flat_map(|workspace| {
290 workspace
291 .read(cx)
292 .project()
293 .read(cx)
294 .repositories(cx)
295 .values()
296 .cloned()
297 .collect::<Vec<_>>()
298 })
299 .find(|repo| {
300 repo.read(cx).snapshot().work_directory_abs_path.as_ref()
301 == repo_path_owned.as_path()
302 })
303 });
304
305 if let Some(repo) = live_repo {
306 return Ok((repo, None));
307 }
308
309 let app_state =
310 current_app_state(cx).context("no app state available for temporary project")?;
311 let temp_project = cx.update(|cx| {
312 Project::local(
313 app_state.client.clone(),
314 app_state.node_runtime.clone(),
315 app_state.user_store.clone(),
316 app_state.languages.clone(),
317 app_state.fs.clone(),
318 None,
319 LocalProjectFlags::default(),
320 cx,
321 )
322 });
323
324 let repo_path_for_worktree = repo_path.to_path_buf();
325 let create_worktree = temp_project.update(cx, |project, cx| {
326 project.create_worktree(repo_path_for_worktree, true, cx)
327 });
328 let _worktree = create_worktree.await?;
329 let initial_scan = temp_project.read_with(cx, |project, cx| project.wait_for_initial_scan(cx));
330 initial_scan.await;
331
332 let repo_path_for_find = repo_path.to_path_buf();
333 let repo = temp_project
334 .update(cx, |project, cx| {
335 project
336 .repositories(cx)
337 .values()
338 .find(|repo| {
339 repo.read(cx).snapshot().work_directory_abs_path.as_ref()
340 == repo_path_for_find.as_path()
341 })
342 .cloned()
343 })
344 .context("failed to resolve temporary repository handle")?;
345
346 let barrier = repo.update(cx, |repo: &mut Repository, _cx| repo.barrier());
347 barrier
348 .await
349 .map_err(|_| anyhow!("temporary repository barrier canceled"))?;
350 Ok((repo, Some(temp_project)))
351}
352
353/// Re-adds the worktree to every affected project after a failed
354/// [`remove_root`].
355async fn rollback_root(root: &RootPlan, cx: &mut AsyncApp) {
356 for affected in &root.affected_projects {
357 let task = affected.project.update(cx, |project, cx| {
358 project.create_worktree(root.root_path.clone(), true, cx)
359 });
360 task.await.log_err();
361 }
362}
363
364/// Saves the worktree's full git state so it can be restored later.
365///
366/// This is a multi-step operation:
367/// 1. Records the original HEAD SHA.
368/// 2. Creates WIP commit #1 ("staged") capturing the current index.
369/// 3. Stages everything including untracked files, then creates WIP commit
370/// #2 ("unstaged") capturing the full working directory.
371/// 4. Creates a DB record (`ArchivedGitWorktree`) with all the SHAs, the
372/// branch name, and both paths.
373/// 5. Links every thread that references this worktree to the DB record.
374/// 6. Creates a git ref (`refs/archived-worktrees/<id>`) on the main repo
375/// pointing at the unstaged commit, preventing git from
376/// garbage-collecting the WIP commits after the worktree is deleted.
377///
378/// Each step has rollback logic: if step N fails, steps 1..N-1 are undone.
379/// On success, returns a [`PersistOutcome`] that can be passed to
380/// [`rollback_persist`] if a later step in the archival pipeline fails.
381pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Result<PersistOutcome> {
382 let (worktree_repo, _temp_worktree_project) = match &root.worktree_repo {
383 Some(worktree_repo) => (worktree_repo.clone(), None),
384 None => find_or_create_repository(&root.root_path, cx).await?,
385 };
386
387 // Read original HEAD SHA before creating any WIP commits
388 let original_commit_hash = worktree_repo
389 .update(cx, |repo, _cx| repo.head_sha())
390 .await
391 .map_err(|_| anyhow!("head_sha canceled"))?
392 .context("failed to read original HEAD SHA")?
393 .context("HEAD SHA is None before WIP commits")?;
394
395 // Create WIP commit #1 (staged state)
396 let askpass = AskPassDelegate::new(cx, |_, _, _| {});
397 let commit_rx = worktree_repo.update(cx, |repo, cx| {
398 repo.commit(
399 "WIP staged".into(),
400 None,
401 CommitOptions {
402 allow_empty: true,
403 ..Default::default()
404 },
405 askpass,
406 cx,
407 )
408 });
409 commit_rx
410 .await
411 .map_err(|_| anyhow!("WIP staged commit canceled"))??;
412
413 // Read SHA after staged commit
414 let staged_sha_result = worktree_repo
415 .update(cx, |repo, _cx| repo.head_sha())
416 .await
417 .map_err(|_| anyhow!("head_sha canceled"))
418 .and_then(|r| r.context("failed to read HEAD SHA after staged commit"))
419 .and_then(|opt| opt.context("HEAD SHA is None after staged commit"));
420 let staged_commit_hash = match staged_sha_result {
421 Ok(sha) => sha,
422 Err(error) => {
423 let rx = worktree_repo.update(cx, |repo, cx| {
424 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
425 });
426 rx.await.ok().and_then(|r| r.log_err());
427 return Err(error);
428 }
429 };
430
431 // Stage all files including untracked
432 let stage_rx = worktree_repo.update(cx, |repo, _cx| repo.stage_all_including_untracked());
433 if let Err(error) = stage_rx
434 .await
435 .map_err(|_| anyhow!("stage all canceled"))
436 .and_then(|inner| inner)
437 {
438 let rx = worktree_repo.update(cx, |repo, cx| {
439 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
440 });
441 rx.await.ok().and_then(|r| r.log_err());
442 return Err(error.context("failed to stage all files including untracked"));
443 }
444
445 // Create WIP commit #2 (unstaged/untracked state)
446 let askpass = AskPassDelegate::new(cx, |_, _, _| {});
447 let commit_rx = worktree_repo.update(cx, |repo, cx| {
448 repo.commit(
449 "WIP unstaged".into(),
450 None,
451 CommitOptions {
452 allow_empty: true,
453 ..Default::default()
454 },
455 askpass,
456 cx,
457 )
458 });
459 if let Err(error) = commit_rx
460 .await
461 .map_err(|_| anyhow!("WIP unstaged commit canceled"))
462 .and_then(|inner| inner)
463 {
464 let rx = worktree_repo.update(cx, |repo, cx| {
465 repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
466 });
467 rx.await.ok().and_then(|r| r.log_err());
468 return Err(error);
469 }
470
471 // Read HEAD SHA after WIP commits
472 let head_sha_result = worktree_repo
473 .update(cx, |repo, _cx| repo.head_sha())
474 .await
475 .map_err(|_| anyhow!("head_sha canceled"))
476 .and_then(|r| r.context("failed to read HEAD SHA after WIP commits"))
477 .and_then(|opt| opt.context("HEAD SHA is None after WIP commits"));
478 let unstaged_commit_hash = match head_sha_result {
479 Ok(sha) => sha,
480 Err(error) => {
481 let rx = worktree_repo.update(cx, |repo, cx| {
482 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
483 });
484 rx.await.ok().and_then(|r| r.log_err());
485 return Err(error);
486 }
487 };
488
489 // Create DB record
490 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
491 let worktree_path_str = root.root_path.to_string_lossy().to_string();
492 let main_repo_path_str = root.main_repo_path.to_string_lossy().to_string();
493 let branch_name = root.branch_name.clone().or_else(|| {
494 worktree_repo.read_with(cx, |repo, _cx| {
495 repo.snapshot()
496 .branch
497 .as_ref()
498 .map(|branch| branch.name().to_string())
499 })
500 });
501
502 let db_result = store
503 .read_with(cx, |store, cx| {
504 store.create_archived_worktree(
505 worktree_path_str.clone(),
506 main_repo_path_str.clone(),
507 branch_name.clone(),
508 staged_commit_hash.clone(),
509 unstaged_commit_hash.clone(),
510 original_commit_hash.clone(),
511 cx,
512 )
513 })
514 .await
515 .context("failed to create archived worktree DB record");
516 let archived_worktree_id = match db_result {
517 Ok(id) => id,
518 Err(error) => {
519 let rx = worktree_repo.update(cx, |repo, cx| {
520 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
521 });
522 rx.await.ok().and_then(|r| r.log_err());
523 return Err(error);
524 }
525 };
526
527 // Link all threads on this worktree to the archived record
528 let session_ids: Vec<acp::SessionId> = store.read_with(cx, |store, _cx| {
529 store
530 .entries()
531 .filter(|thread| {
532 thread
533 .folder_paths
534 .paths()
535 .iter()
536 .any(|p| p.as_path() == root.root_path)
537 })
538 .map(|thread| thread.session_id.clone())
539 .collect()
540 });
541
542 for session_id in &session_ids {
543 let link_result = store
544 .read_with(cx, |store, cx| {
545 store.link_thread_to_archived_worktree(
546 session_id.0.to_string(),
547 archived_worktree_id,
548 cx,
549 )
550 })
551 .await;
552 if let Err(error) = link_result {
553 if let Err(delete_error) = store
554 .read_with(cx, |store, cx| {
555 store.delete_archived_worktree(archived_worktree_id, cx)
556 })
557 .await
558 {
559 log::error!(
560 "Failed to delete archived worktree DB record during link rollback: {delete_error:#}"
561 );
562 }
563 let rx = worktree_repo.update(cx, |repo, cx| {
564 repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
565 });
566 rx.await.ok().and_then(|r| r.log_err());
567 return Err(error.context("failed to link thread to archived worktree"));
568 }
569 }
570
571 // Create git ref on main repo (non-fatal)
572 let ref_name = archived_worktree_ref_name(archived_worktree_id);
573 let main_repo_result = find_or_create_repository(&root.main_repo_path, cx).await;
574 match main_repo_result {
575 Ok((main_repo, _temp_project)) => {
576 let rx = main_repo.update(cx, |repo, _cx| {
577 repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone())
578 });
579 if let Err(error) = rx
580 .await
581 .map_err(|_| anyhow!("update_ref canceled"))
582 .and_then(|r| r)
583 {
584 log::warn!(
585 "Failed to create ref {} on main repo (non-fatal): {error}",
586 ref_name
587 );
588 }
589 // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
590 drop(_temp_project);
591 }
592 Err(error) => {
593 log::warn!(
594 "Could not find main repo to create ref {} (non-fatal): {error}",
595 ref_name
596 );
597 }
598 }
599
600 Ok(PersistOutcome {
601 archived_worktree_id,
602 staged_commit_hash,
603 })
604}
605
606/// Undoes a successful [`persist_worktree_state`] by resetting the WIP
607/// commits, deleting the git ref on the main repo, and removing the DB
608/// record.
609pub async fn rollback_persist(outcome: &PersistOutcome, root: &RootPlan, cx: &mut AsyncApp) {
610 // Undo WIP commits on the worktree repo
611 if let Some(worktree_repo) = &root.worktree_repo {
612 let rx = worktree_repo.update(cx, |repo, cx| {
613 repo.reset(
614 format!("{}~1", outcome.staged_commit_hash),
615 ResetMode::Mixed,
616 cx,
617 )
618 });
619 rx.await.ok().and_then(|r| r.log_err());
620 }
621
622 // Delete the git ref on main repo
623 if let Ok((main_repo, _temp_project)) =
624 find_or_create_repository(&root.main_repo_path, cx).await
625 {
626 let ref_name = archived_worktree_ref_name(outcome.archived_worktree_id);
627 let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
628 rx.await.ok().and_then(|r| r.log_err());
629 // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
630 drop(_temp_project);
631 }
632
633 // Delete the DB record
634 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
635 if let Err(error) = store
636 .read_with(cx, |store, cx| {
637 store.delete_archived_worktree(outcome.archived_worktree_id, cx)
638 })
639 .await
640 {
641 log::error!("Failed to delete archived worktree DB record during rollback: {error:#}");
642 }
643}
644
645/// Restores a previously archived worktree back to disk from its DB record.
646///
647/// Re-creates the git worktree (or adopts an existing directory), resets
648/// past the two WIP commits to recover the original working directory
649/// state, verifies HEAD matches the expected commit, and restores the
650/// original branch if one was recorded.
651pub async fn restore_worktree_via_git(
652 row: &ArchivedGitWorktree,
653 cx: &mut AsyncApp,
654) -> Result<PathBuf> {
655 let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?;
656
657 // Check if worktree path already exists on disk
658 let worktree_path = &row.worktree_path;
659 let app_state = current_app_state(cx).context("no app state available")?;
660 let already_exists = app_state.fs.metadata(worktree_path).await?.is_some();
661
662 if already_exists {
663 let is_git_worktree =
664 resolve_git_worktree_to_main_repo(app_state.fs.as_ref(), worktree_path)
665 .await
666 .is_some();
667
668 if is_git_worktree {
669 // Already a git worktree — another thread on the same worktree
670 // already restored it. Reuse as-is.
671 return Ok(worktree_path.clone());
672 }
673
674 // Path exists but isn't a git worktree. Ask git to adopt it.
675 let rx = main_repo.update(cx, |repo, _cx| repo.repair_worktrees());
676 rx.await
677 .map_err(|_| anyhow!("worktree repair was canceled"))?
678 .context("failed to repair worktrees")?;
679 } else {
680 // Create detached worktree at the unstaged commit
681 let rx = main_repo.update(cx, |repo, _cx| {
682 repo.create_worktree_detached(worktree_path.clone(), row.unstaged_commit_hash.clone())
683 });
684 rx.await
685 .map_err(|_| anyhow!("worktree creation was canceled"))?
686 .context("failed to create worktree")?;
687 }
688
689 // Get the worktree's repo entity
690 let (wt_repo, _temp_wt_project) = find_or_create_repository(worktree_path, cx).await?;
691
692 // Reset past the WIP commits to recover original state
693 let mixed_reset_ok = {
694 let rx = wt_repo.update(cx, |repo, cx| {
695 repo.reset(row.staged_commit_hash.clone(), ResetMode::Mixed, cx)
696 });
697 match rx.await {
698 Ok(Ok(())) => true,
699 Ok(Err(error)) => {
700 log::error!("Mixed reset to staged commit failed: {error:#}");
701 false
702 }
703 Err(_) => {
704 log::error!("Mixed reset to staged commit was canceled");
705 false
706 }
707 }
708 };
709
710 let soft_reset_ok = if mixed_reset_ok {
711 let rx = wt_repo.update(cx, |repo, cx| {
712 repo.reset(row.original_commit_hash.clone(), ResetMode::Soft, cx)
713 });
714 match rx.await {
715 Ok(Ok(())) => true,
716 Ok(Err(error)) => {
717 log::error!("Soft reset to original commit failed: {error:#}");
718 false
719 }
720 Err(_) => {
721 log::error!("Soft reset to original commit was canceled");
722 false
723 }
724 }
725 } else {
726 false
727 };
728
729 // If either WIP reset failed, fall back to a mixed reset directly to
730 // original_commit_hash so we at least land on the right commit.
731 if !mixed_reset_ok || !soft_reset_ok {
732 log::warn!(
733 "WIP reset(s) failed (mixed_ok={mixed_reset_ok}, soft_ok={soft_reset_ok}); \
734 falling back to mixed reset to original commit {}",
735 row.original_commit_hash
736 );
737 let rx = wt_repo.update(cx, |repo, cx| {
738 repo.reset(row.original_commit_hash.clone(), ResetMode::Mixed, cx)
739 });
740 match rx.await {
741 Ok(Ok(())) => {}
742 Ok(Err(error)) => {
743 return Err(error.context(format!(
744 "fallback reset to original commit {} also failed",
745 row.original_commit_hash
746 )));
747 }
748 Err(_) => {
749 return Err(anyhow!(
750 "fallback reset to original commit {} was canceled",
751 row.original_commit_hash
752 ));
753 }
754 }
755 }
756
757 // Verify HEAD is at original_commit_hash
758 let current_head = wt_repo
759 .update(cx, |repo, _cx| repo.head_sha())
760 .await
761 .map_err(|_| anyhow!("post-restore head_sha was canceled"))?
762 .context("failed to read HEAD after restore")?
763 .context("HEAD is None after restore")?;
764
765 if current_head != row.original_commit_hash {
766 anyhow::bail!(
767 "After restore, HEAD is at {current_head} but expected {}. \
768 The worktree may be in an inconsistent state.",
769 row.original_commit_hash
770 );
771 }
772
773 // Restore the branch
774 if let Some(branch_name) = &row.branch_name {
775 // Check if the branch exists and points at original_commit_hash.
776 // If it does, switch to it. If not, create a new branch there.
777 let rx = wt_repo.update(cx, |repo, _cx| repo.change_branch(branch_name.clone()));
778 if matches!(rx.await, Ok(Ok(()))) {
779 // Verify the branch actually points at original_commit_hash after switching
780 let head_after_switch = wt_repo
781 .update(cx, |repo, _cx| repo.head_sha())
782 .await
783 .ok()
784 .and_then(|r| r.ok())
785 .flatten();
786
787 if head_after_switch.as_deref() != Some(&row.original_commit_hash) {
788 // Branch exists but doesn't point at the right commit.
789 // Switch back to detached HEAD at original_commit_hash.
790 log::warn!(
791 "Branch '{}' exists but points at {:?}, not {}. Creating fresh branch.",
792 branch_name,
793 head_after_switch,
794 row.original_commit_hash
795 );
796 let rx = wt_repo.update(cx, |repo, cx| {
797 repo.reset(row.original_commit_hash.clone(), ResetMode::Mixed, cx)
798 });
799 rx.await.ok().and_then(|r| r.log_err());
800 // Delete the old branch and create fresh
801 let rx = wt_repo.update(cx, |repo, _cx| {
802 repo.create_branch(branch_name.clone(), None)
803 });
804 rx.await.ok().and_then(|r| r.log_err());
805 }
806 } else {
807 // Branch doesn't exist or can't be switched to — create it.
808 let rx = wt_repo.update(cx, |repo, _cx| {
809 repo.create_branch(branch_name.clone(), None)
810 });
811 if let Ok(Err(error)) | Err(error) = rx.await.map_err(|e| anyhow::anyhow!("{e}")) {
812 log::warn!(
813 "Could not create branch '{}': {error} — \
814 restored worktree is in detached HEAD state.",
815 branch_name
816 );
817 }
818 }
819 }
820
821 Ok(worktree_path.clone())
822}
823
824/// Deletes the git ref and DB records for a single archived worktree.
825/// Used when an archived worktree is no longer referenced by any thread.
826pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) {
827 // Delete the git ref from the main repo
828 if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await
829 {
830 let ref_name = archived_worktree_ref_name(row.id);
831 let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
832 match rx.await {
833 Ok(Ok(())) => {}
834 Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"),
835 Err(_) => log::warn!("Archive ref deletion was canceled"),
836 }
837 // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
838 drop(_temp_project);
839 }
840
841 // Delete the DB records
842 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
843 store
844 .read_with(cx, |store, cx| store.delete_archived_worktree(row.id, cx))
845 .await
846 .log_err();
847}
848
849/// Cleans up all archived worktree data associated with a thread being deleted.
850///
851/// This unlinks the thread from all its archived worktrees and, for any
852/// archived worktree that is no longer referenced by any other thread,
853/// deletes the git ref and DB records.
854pub async fn cleanup_thread_archived_worktrees(session_id: &acp::SessionId, cx: &mut AsyncApp) {
855 let store = cx.update(|cx| ThreadMetadataStore::global(cx));
856
857 let archived_worktrees = store
858 .read_with(cx, |store, cx| {
859 store.get_archived_worktrees_for_thread(session_id.0.to_string(), cx)
860 })
861 .await;
862 let archived_worktrees = match archived_worktrees {
863 Ok(rows) => rows,
864 Err(error) => {
865 log::error!(
866 "Failed to fetch archived worktrees for thread {}: {error:#}",
867 session_id.0
868 );
869 return;
870 }
871 };
872
873 if archived_worktrees.is_empty() {
874 return;
875 }
876
877 if let Err(error) = store
878 .read_with(cx, |store, cx| {
879 store.unlink_thread_from_all_archived_worktrees(session_id.0.to_string(), cx)
880 })
881 .await
882 {
883 log::error!(
884 "Failed to unlink thread {} from archived worktrees: {error:#}",
885 session_id.0
886 );
887 return;
888 }
889
890 for row in &archived_worktrees {
891 let still_referenced = store
892 .read_with(cx, |store, cx| {
893 store.is_archived_worktree_referenced(row.id, cx)
894 })
895 .await;
896 match still_referenced {
897 Ok(true) => {}
898 Ok(false) => {
899 cleanup_archived_worktree_record(row, cx).await;
900 }
901 Err(error) => {
902 log::error!(
903 "Failed to check if archived worktree {} is still referenced: {error:#}",
904 row.id
905 );
906 }
907 }
908 }
909}
910
911/// Collects every `Workspace` entity across all open `MultiWorkspace` windows.
912pub fn all_open_workspaces(cx: &App) -> Vec<Entity<Workspace>> {
913 cx.windows()
914 .into_iter()
915 .filter_map(|window| window.downcast::<MultiWorkspace>())
916 .flat_map(|multi_workspace| {
917 multi_workspace
918 .read(cx)
919 .map(|multi_workspace| multi_workspace.workspaces().cloned().collect::<Vec<_>>())
920 .unwrap_or_default()
921 })
922 .collect()
923}
924
925fn current_app_state(cx: &mut AsyncApp) -> Option<Arc<AppState>> {
926 cx.update(|cx| {
927 all_open_workspaces(cx)
928 .into_iter()
929 .next()
930 .map(|workspace| workspace.read(cx).app_state().clone())
931 })
932}