Detailed changes
@@ -16083,6 +16083,7 @@ dependencies = [
"git",
"gpui",
"language_model",
+ "log",
"menu",
"platform_title_bar",
"pretty_assertions",
@@ -16093,6 +16094,7 @@ dependencies = [
"serde",
"serde_json",
"settings",
+ "smol",
"theme",
"theme_settings",
"ui",
@@ -4761,7 +4761,7 @@ mod tests {
language_model::LanguageModelRegistry::test(cx);
});
- // --- Create a MultiWorkspace window with two workspaces ---
+ // Create a MultiWorkspace window with two workspaces.
let fs = FakeFs::new(cx.executor());
let project_a = Project::test(fs.clone(), [], cx).await;
let project_b = Project::test(fs, [], cx).await;
@@ -4790,7 +4790,7 @@ mod tests {
let cx = &mut VisualTestContext::from_window(multi_workspace.into(), cx);
- // --- Set up workspace A: with an active thread ---
+ // Set up workspace A: with an active thread.
let panel_a = workspace_a.update_in(cx, |workspace, window, cx| {
cx.new(|cx| AgentPanel::new(workspace, None, window, cx))
});
@@ -4816,7 +4816,7 @@ mod tests {
let agent_type_a = panel_a.read_with(cx, |panel, _cx| panel.selected_agent.clone());
- // --- Set up workspace B: ClaudeCode, no active thread ---
+ // Set up workspace B: ClaudeCode, no active thread.
let panel_b = workspace_b.update_in(cx, |workspace, window, cx| {
cx.new(|cx| AgentPanel::new(workspace, None, window, cx))
});
@@ -4827,12 +4827,12 @@ mod tests {
};
});
- // --- Serialize both panels ---
+ // Serialize both panels.
panel_a.update(cx, |panel, cx| panel.serialize(cx));
panel_b.update(cx, |panel, cx| panel.serialize(cx));
cx.run_until_parked();
- // --- Load fresh panels for each workspace and verify independent state ---
+ // Load fresh panels for each workspace and verify independent state.
let async_cx = cx.update(|window, cx| window.to_async(cx));
let loaded_a = AgentPanel::load(workspace_a.downgrade(), async_cx)
.await
@@ -33,6 +33,7 @@ mod thread_history;
mod thread_history_view;
mod thread_import;
pub mod thread_metadata_store;
+pub mod thread_worktree_archive;
mod thread_worktree_picker;
pub mod threads_archive_view;
mod ui;
@@ -2663,6 +2663,13 @@ impl ConversationView {
if let Some(store) = ThreadMetadataStore::try_global(cx) {
store.update(cx, |store, cx| store.delete(session_id.clone(), cx));
}
+
+ let session_id = session_id.clone();
+ cx.spawn(async move |_this, cx| {
+ crate::thread_worktree_archive::cleanup_thread_archived_worktrees(&session_id, cx)
+ .await;
+ })
+ .detach();
}
}
@@ -190,6 +190,7 @@ pub struct ThreadMetadataStore {
reload_task: Option<Shared<Task<()>>>,
session_subscriptions: HashMap<acp::SessionId, Subscription>,
pending_thread_ops_tx: smol::channel::Sender<DbOperation>,
+ in_flight_archives: HashMap<acp::SessionId, (Task<()>, smol::channel::Sender<()>)>,
_db_operations_task: Task<()>,
}
@@ -403,12 +404,53 @@ impl ThreadMetadataStore {
}
}
- pub fn archive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
+ pub fn archive(
+ &mut self,
+ session_id: &acp::SessionId,
+ archive_job: Option<(Task<()>, smol::channel::Sender<()>)>,
+ cx: &mut Context<Self>,
+ ) {
self.update_archived(session_id, true, cx);
+
+ if let Some(job) = archive_job {
+ self.in_flight_archives.insert(session_id.clone(), job);
+ }
}
pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context<Self>) {
self.update_archived(session_id, false, cx);
+ // Dropping the Sender triggers cancellation in the background task.
+ self.in_flight_archives.remove(session_id);
+ }
+
+ pub fn cleanup_completed_archive(&mut self, session_id: &acp::SessionId) {
+ self.in_flight_archives.remove(session_id);
+ }
+
+ /// Updates a thread's `folder_paths` after an archived worktree has been
+ /// restored to disk. The restored worktree may land at a different path
+ /// than it had before archival, so each `(old_path, new_path)` pair in
+ /// `path_replacements` is applied to the thread's stored folder paths.
+ pub fn update_restored_worktree_paths(
+ &mut self,
+ session_id: &acp::SessionId,
+ path_replacements: &[(PathBuf, PathBuf)],
+ cx: &mut Context<Self>,
+ ) {
+ if let Some(thread) = self.threads.get(session_id).cloned() {
+ let mut paths: Vec<PathBuf> = thread.folder_paths.paths().to_vec();
+ for (old_path, new_path) in path_replacements {
+ if let Some(pos) = paths.iter().position(|p| p == old_path) {
+ paths[pos] = new_path.clone();
+ }
+ }
+ let new_folder_paths = PathList::new(&paths);
+ self.save_internal(ThreadMetadata {
+ folder_paths: new_folder_paths,
+ ..thread
+ });
+ cx.notify();
+ }
}
pub fn create_archived_worktree(
@@ -462,6 +504,30 @@ impl ThreadMetadataStore {
cx.background_spawn(async move { db.delete_archived_worktree(id).await })
}
+ pub fn unlink_thread_from_all_archived_worktrees(
+ &self,
+ session_id: String,
+ cx: &App,
+ ) -> Task<anyhow::Result<()>> {
+ let db = self.db.clone();
+ cx.background_spawn(async move {
+ db.unlink_thread_from_all_archived_worktrees(session_id)
+ .await
+ })
+ }
+
+ pub fn is_archived_worktree_referenced(
+ &self,
+ archived_worktree_id: i64,
+ cx: &App,
+ ) -> Task<anyhow::Result<bool>> {
+ let db = self.db.clone();
+ cx.background_spawn(async move {
+ db.is_archived_worktree_referenced(archived_worktree_id)
+ .await
+ })
+ }
+
fn update_archived(
&mut self,
session_id: &acp::SessionId,
@@ -564,6 +630,7 @@ impl ThreadMetadataStore {
reload_task: None,
session_subscriptions: HashMap::default(),
pending_thread_ops_tx: tx,
+ in_flight_archives: HashMap::default(),
_db_operations_task,
};
let _ = this.reload(cx);
@@ -872,6 +939,31 @@ impl ThreadMetadataDb {
})
.await
}
+
+ pub async fn unlink_thread_from_all_archived_worktrees(
+ &self,
+ session_id: String,
+ ) -> anyhow::Result<()> {
+ self.write(move |conn| {
+ let mut stmt = Statement::prepare(
+ conn,
+ "DELETE FROM thread_archived_worktrees WHERE session_id = ?",
+ )?;
+ stmt.bind(&session_id, 1)?;
+ stmt.exec()
+ })
+ .await
+ }
+
+ pub async fn is_archived_worktree_referenced(
+ &self,
+ archived_worktree_id: i64,
+ ) -> anyhow::Result<bool> {
+ self.select_row_bound::<i64, i64>(
+ "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1",
+ )?(archived_worktree_id)
+ .map(|count| count.unwrap_or(0) > 0)
+ }
}
impl Column for ThreadMetadata {
@@ -1812,10 +1904,11 @@ mod tests {
cx.update(|cx| {
let store = ThreadMetadataStore::global(cx);
store.update(cx, |store, cx| {
- store.archive(&acp::SessionId::new("session-1"), cx);
+ store.archive(&acp::SessionId::new("session-1"), None, cx);
});
});
+ // Thread 1 should now be archived
cx.run_until_parked();
cx.update(|cx| {
@@ -1889,7 +1982,7 @@ mod tests {
cx.update(|cx| {
let store = ThreadMetadataStore::global(cx);
store.update(cx, |store, cx| {
- store.archive(&acp::SessionId::new("session-2"), cx);
+ store.archive(&acp::SessionId::new("session-2"), None, cx);
});
});
@@ -1989,7 +2082,7 @@ mod tests {
cx.update(|cx| {
let store = ThreadMetadataStore::global(cx);
store.update(cx, |store, cx| {
- store.archive(&acp::SessionId::new("session-1"), cx);
+ store.archive(&acp::SessionId::new("session-1"), None, cx);
});
});
@@ -2037,7 +2130,7 @@ mod tests {
cx.update(|cx| {
let store = ThreadMetadataStore::global(cx);
store.update(cx, |store, cx| {
- store.archive(&acp::SessionId::new("nonexistent"), cx);
+ store.archive(&acp::SessionId::new("nonexistent"), None, cx);
});
});
@@ -2066,7 +2159,7 @@ mod tests {
let store = ThreadMetadataStore::global(cx);
store.update(cx, |store, cx| {
store.save(metadata.clone(), cx);
- store.archive(&session_id, cx);
+ store.archive(&session_id, None, cx);
});
});
@@ -2226,6 +2319,97 @@ mod tests {
assert_eq!(wt1[0].id, wt2[0].id);
}
+ #[gpui::test]
+ async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) {
+ init_test(cx);
+ let store = cx.update(|cx| ThreadMetadataStore::global(cx));
+
+ let original_paths = PathList::new(&[
+ Path::new("/projects/worktree-a"),
+ Path::new("/projects/worktree-b"),
+ Path::new("/other/unrelated"),
+ ]);
+ let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths);
+
+ store.update(cx, |store, cx| {
+ store.save_manually(meta, cx);
+ });
+
+ let replacements = vec![
+ (
+ PathBuf::from("/projects/worktree-a"),
+ PathBuf::from("/restored/worktree-a"),
+ ),
+ (
+ PathBuf::from("/projects/worktree-b"),
+ PathBuf::from("/restored/worktree-b"),
+ ),
+ ];
+
+ store.update(cx, |store, cx| {
+ store.update_restored_worktree_paths(
+ &acp::SessionId::new("session-multi"),
+ &replacements,
+ cx,
+ );
+ });
+
+ let entry = store.read_with(cx, |store, _cx| {
+ store.entry(&acp::SessionId::new("session-multi")).cloned()
+ });
+ let entry = entry.unwrap();
+ let paths = entry.folder_paths.paths();
+ assert_eq!(paths.len(), 3);
+ assert!(paths.contains(&PathBuf::from("/restored/worktree-a")));
+ assert!(paths.contains(&PathBuf::from("/restored/worktree-b")));
+ assert!(paths.contains(&PathBuf::from("/other/unrelated")));
+ }
+
+ #[gpui::test]
+ async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) {
+ init_test(cx);
+ let store = cx.update(|cx| ThreadMetadataStore::global(cx));
+
+ let original_paths =
+ PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]);
+ let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths);
+
+ store.update(cx, |store, cx| {
+ store.save_manually(meta, cx);
+ });
+
+ let replacements = vec![
+ (
+ PathBuf::from("/projects/worktree-a"),
+ PathBuf::from("/new/worktree-a"),
+ ),
+ (
+ PathBuf::from("/nonexistent/path"),
+ PathBuf::from("/should/not/appear"),
+ ),
+ ];
+
+ store.update(cx, |store, cx| {
+ store.update_restored_worktree_paths(
+ &acp::SessionId::new("session-partial"),
+ &replacements,
+ cx,
+ );
+ });
+
+ let entry = store.read_with(cx, |store, _cx| {
+ store
+ .entry(&acp::SessionId::new("session-partial"))
+ .cloned()
+ });
+ let entry = entry.unwrap();
+ let paths = entry.folder_paths.paths();
+ assert_eq!(paths.len(), 2);
+ assert!(paths.contains(&PathBuf::from("/new/worktree-a")));
+ assert!(paths.contains(&PathBuf::from("/other/path")));
+ assert!(!paths.contains(&PathBuf::from("/should/not/appear")));
+ }
+
#[gpui::test]
async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) {
init_test(cx);
@@ -0,0 +1,932 @@
+use std::{
+ path::{Path, PathBuf},
+ sync::Arc,
+};
+
+use agent_client_protocol as acp;
+use anyhow::{Context as _, Result, anyhow};
+use git::repository::{AskPassDelegate, CommitOptions, ResetMode};
+use gpui::{App, AsyncApp, Entity, Task};
+use project::{
+ LocalProjectFlags, Project, WorktreeId,
+ git_store::{Repository, resolve_git_worktree_to_main_repo},
+};
+use util::ResultExt;
+use workspace::{AppState, MultiWorkspace, Workspace};
+
+use crate::thread_metadata_store::{ArchivedGitWorktree, ThreadMetadataStore};
+
+/// The plan for archiving a single git worktree root.
+///
+/// A thread can have multiple folder paths open, so there may be multiple
+/// `RootPlan`s per archival operation. Each one captures everything needed to
+/// persist the worktree's git state and then remove it from disk.
+///
+/// All fields are gathered synchronously by [`build_root_plan`] while the
+/// worktree is still loaded in open projects. This is important because
+/// workspace removal tears down project and repository entities, making
+/// them unavailable for the later async persist/remove steps.
+#[derive(Clone)]
+pub struct RootPlan {
+ /// Absolute path of the git worktree on disk.
+ pub root_path: PathBuf,
+ /// Absolute path to the main git repository this worktree is linked to.
+ /// Used both for creating a git ref to prevent GC of WIP commits during
+ /// [`persist_worktree_state`], and for `git worktree remove` during
+ /// [`remove_root`].
+ pub main_repo_path: PathBuf,
+ /// Every open `Project` that has this worktree loaded, so they can all
+ /// call `remove_worktree` and release it during [`remove_root`].
+ /// Multiple projects can reference the same path when the user has the
+ /// worktree open in more than one workspace.
+ pub affected_projects: Vec<AffectedProject>,
+ /// The `Repository` entity for this worktree, used to run git commands
+ /// (create WIP commits, stage files, reset) during
+ /// [`persist_worktree_state`]. `None` when the `GitStore` hasn't created
+ /// a `Repository` for this worktree yet — in that case,
+ /// `persist_worktree_state` falls back to creating a temporary headless
+ /// project to obtain one.
+ pub worktree_repo: Option<Entity<Repository>>,
+ /// The branch the worktree was on, so it can be restored later.
+ /// `None` if the worktree was in detached HEAD state or if no
+ /// `Repository` entity was available at planning time (in which case
+ /// `persist_worktree_state` reads it from the repo snapshot instead).
+ pub branch_name: Option<String>,
+}
+
+/// A `Project` that references a worktree being archived, paired with the
+/// `WorktreeId` it uses for that worktree.
+///
+/// The same worktree path can appear in multiple open workspaces/projects
+/// (e.g. when the user has two windows open that both include the same
+/// linked worktree). Each one needs to call `remove_worktree` and wait for
+/// the release during [`remove_root`], otherwise the project would still
+/// hold a reference to the directory and `git worktree remove` would fail.
+#[derive(Clone)]
+pub struct AffectedProject {
+ pub project: Entity<Project>,
+ pub worktree_id: WorktreeId,
+}
+
+fn archived_worktree_ref_name(id: i64) -> String {
+ format!("refs/archived-worktrees/{}", id)
+}
+
+/// The result of a successful [`persist_worktree_state`] call.
+///
+/// Carries exactly the information needed to roll back the persist via
+/// [`rollback_persist`]: the DB row ID (to delete the record and the
+/// corresponding `refs/archived-worktrees/<id>` git ref) and the staged
+/// commit hash (to `git reset` back past both WIP commits).
+pub struct PersistOutcome {
+ pub archived_worktree_id: i64,
+ pub staged_commit_hash: String,
+}
+
+/// Builds a [`RootPlan`] for archiving the git worktree at `path`.
+///
+/// This is a synchronous planning step that must run *before* any workspace
+/// removal, because it needs live project and repository entities that are
+/// torn down when a workspace is removed. It does three things:
+///
+/// 1. Finds every `Project` across all open workspaces that has this
+/// worktree loaded (`affected_projects`).
+/// 2. Looks for a `Repository` entity whose snapshot identifies this path
+/// as a linked worktree (`worktree_repo`), which is needed for the git
+/// operations in [`persist_worktree_state`].
+/// 3. Determines the `main_repo_path` — the parent repo that owns this
+/// linked worktree — needed for both git ref creation and
+/// `git worktree remove`.
+///
+/// When no `Repository` entity is available (e.g. the `GitStore` hasn't
+/// finished scanning), the function falls back to deriving `main_repo_path`
+/// from the worktree snapshot's `root_repo_common_dir`. In that case
+/// `worktree_repo` is `None` and [`persist_worktree_state`] will create a
+/// temporary headless project to obtain one.
+///
+/// Returns `None` if no open project has this path as a visible worktree.
+pub fn build_root_plan(
+ path: &Path,
+ workspaces: &[Entity<Workspace>],
+ cx: &App,
+) -> Option<RootPlan> {
+ let path = path.to_path_buf();
+
+ let affected_projects = workspaces
+ .iter()
+ .filter_map(|workspace| {
+ let project = workspace.read(cx).project().clone();
+ let worktree = project
+ .read(cx)
+ .visible_worktrees(cx)
+ .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())?;
+ let worktree_id = worktree.read(cx).id();
+ Some(AffectedProject {
+ project,
+ worktree_id,
+ })
+ })
+ .collect::<Vec<_>>();
+
+ if affected_projects.is_empty() {
+ return None;
+ }
+
+ let linked_repo = workspaces
+ .iter()
+ .flat_map(|workspace| {
+ workspace
+ .read(cx)
+ .project()
+ .read(cx)
+ .repositories(cx)
+ .values()
+ .cloned()
+ .collect::<Vec<_>>()
+ })
+ .find_map(|repo| {
+ let snapshot = repo.read(cx).snapshot();
+ (snapshot.is_linked_worktree()
+ && snapshot.work_directory_abs_path.as_ref() == path.as_path())
+ .then_some((snapshot, repo))
+ });
+
+ let matching_worktree_snapshot = workspaces.iter().find_map(|workspace| {
+ workspace
+ .read(cx)
+ .project()
+ .read(cx)
+ .visible_worktrees(cx)
+ .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())
+ .map(|worktree| worktree.read(cx).snapshot())
+ });
+
+ let (main_repo_path, worktree_repo, branch_name) =
+ if let Some((linked_snapshot, repo)) = linked_repo {
+ (
+ linked_snapshot.original_repo_abs_path.to_path_buf(),
+ Some(repo),
+ linked_snapshot
+ .branch
+ .as_ref()
+ .map(|branch| branch.name().to_string()),
+ )
+ } else {
+ let main_repo_path = matching_worktree_snapshot
+ .as_ref()?
+ .root_repo_common_dir()
+ .and_then(|dir| dir.parent())?
+ .to_path_buf();
+ (main_repo_path, None, None)
+ };
+
+ Some(RootPlan {
+ root_path: path,
+ main_repo_path,
+ affected_projects,
+ worktree_repo,
+ branch_name,
+ })
+}
+
+/// Returns `true` if any unarchived thread other than `current_session_id`
+/// references `path` in its folder paths. Used to determine whether a
+/// worktree can safely be removed from disk.
+pub fn path_is_referenced_by_other_unarchived_threads(
+ current_session_id: &acp::SessionId,
+ path: &Path,
+ cx: &App,
+) -> bool {
+ ThreadMetadataStore::global(cx)
+ .read(cx)
+ .entries()
+ .filter(|thread| thread.session_id != *current_session_id)
+ .filter(|thread| !thread.archived)
+ .any(|thread| {
+ thread
+ .folder_paths
+ .paths()
+ .iter()
+ .any(|other_path| other_path.as_path() == path)
+ })
+}
+
+/// Removes a worktree from all affected projects and deletes it from disk
+/// via `git worktree remove`.
+///
+/// This is the destructive counterpart to [`persist_worktree_state`]. It
+/// first detaches the worktree from every [`AffectedProject`], waits for
+/// each project to fully release it, then asks the main repository to
+/// delete the worktree directory. If the git removal fails, the worktree
+/// is re-added to each project via [`rollback_root`].
+pub async fn remove_root(root: RootPlan, cx: &mut AsyncApp) -> Result<()> {
+ let release_tasks: Vec<_> = root
+ .affected_projects
+ .iter()
+ .map(|affected| {
+ let project = affected.project.clone();
+ let worktree_id = affected.worktree_id;
+ project.update(cx, |project, cx| {
+ let wait = project.wait_for_worktree_release(worktree_id, cx);
+ project.remove_worktree(worktree_id, cx);
+ wait
+ })
+ })
+ .collect();
+
+ if let Err(error) = remove_root_after_worktree_removal(&root, release_tasks, cx).await {
+ rollback_root(&root, cx).await;
+ return Err(error);
+ }
+
+ Ok(())
+}
+
+async fn remove_root_after_worktree_removal(
+ root: &RootPlan,
+ release_tasks: Vec<Task<Result<()>>>,
+ cx: &mut AsyncApp,
+) -> Result<()> {
+ for task in release_tasks {
+ if let Err(error) = task.await {
+ log::error!("Failed waiting for worktree release: {error:#}");
+ }
+ }
+
+ let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?;
+ let receiver = repo.update(cx, |repo: &mut Repository, _cx| {
+ repo.remove_worktree(root.root_path.clone(), false)
+ });
+ let result = receiver
+ .await
+ .map_err(|_| anyhow!("git worktree removal was canceled"))?;
+ // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
+ drop(_temp_project);
+ result
+}
+
+/// Finds a live `Repository` entity for the given path, or creates a temporary
+/// `Project::local` to obtain one.
+///
+/// `Repository` entities can only be obtained through a `Project` because
+/// `GitStore` (which creates and manages `Repository` entities) is owned by
+/// `Project`. When no open workspace contains the repo we need, we spin up a
+/// headless `Project::local` just to get a `Repository` handle. The caller
+/// keeps the returned `Option<Entity<Project>>` alive for the duration of the
+/// git operations, then drops it.
+///
+/// Future improvement: decoupling `GitStore` from `Project` so that
+/// `Repository` entities can be created standalone would eliminate this
+/// temporary-project workaround.
+async fn find_or_create_repository(
+ repo_path: &Path,
+ cx: &mut AsyncApp,
+) -> Result<(Entity<Repository>, Option<Entity<Project>>)> {
+ let repo_path_owned = repo_path.to_path_buf();
+ let live_repo = cx.update(|cx| {
+ all_open_workspaces(cx)
+ .into_iter()
+ .flat_map(|workspace| {
+ workspace
+ .read(cx)
+ .project()
+ .read(cx)
+ .repositories(cx)
+ .values()
+ .cloned()
+ .collect::<Vec<_>>()
+ })
+ .find(|repo| {
+ repo.read(cx).snapshot().work_directory_abs_path.as_ref()
+ == repo_path_owned.as_path()
+ })
+ });
+
+ if let Some(repo) = live_repo {
+ return Ok((repo, None));
+ }
+
+ let app_state =
+ current_app_state(cx).context("no app state available for temporary project")?;
+ let temp_project = cx.update(|cx| {
+ Project::local(
+ app_state.client.clone(),
+ app_state.node_runtime.clone(),
+ app_state.user_store.clone(),
+ app_state.languages.clone(),
+ app_state.fs.clone(),
+ None,
+ LocalProjectFlags::default(),
+ cx,
+ )
+ });
+
+ let repo_path_for_worktree = repo_path.to_path_buf();
+ let create_worktree = temp_project.update(cx, |project, cx| {
+ project.create_worktree(repo_path_for_worktree, true, cx)
+ });
+ let _worktree = create_worktree.await?;
+ let initial_scan = temp_project.read_with(cx, |project, cx| project.wait_for_initial_scan(cx));
+ initial_scan.await;
+
+ let repo_path_for_find = repo_path.to_path_buf();
+ let repo = temp_project
+ .update(cx, |project, cx| {
+ project
+ .repositories(cx)
+ .values()
+ .find(|repo| {
+ repo.read(cx).snapshot().work_directory_abs_path.as_ref()
+ == repo_path_for_find.as_path()
+ })
+ .cloned()
+ })
+ .context("failed to resolve temporary repository handle")?;
+
+ let barrier = repo.update(cx, |repo: &mut Repository, _cx| repo.barrier());
+ barrier
+ .await
+ .map_err(|_| anyhow!("temporary repository barrier canceled"))?;
+ Ok((repo, Some(temp_project)))
+}
+
+/// Re-adds the worktree to every affected project after a failed
+/// [`remove_root`].
+async fn rollback_root(root: &RootPlan, cx: &mut AsyncApp) {
+ for affected in &root.affected_projects {
+ let task = affected.project.update(cx, |project, cx| {
+ project.create_worktree(root.root_path.clone(), true, cx)
+ });
+ task.await.log_err();
+ }
+}
+
+/// Saves the worktree's full git state so it can be restored later.
+///
+/// This is a multi-step operation:
+/// 1. Records the original HEAD SHA.
+/// 2. Creates WIP commit #1 ("staged") capturing the current index.
+/// 3. Stages everything including untracked files, then creates WIP commit
+/// #2 ("unstaged") capturing the full working directory.
+/// 4. Creates a DB record (`ArchivedGitWorktree`) with all the SHAs, the
+/// branch name, and both paths.
+/// 5. Links every thread that references this worktree to the DB record.
+/// 6. Creates a git ref (`refs/archived-worktrees/<id>`) on the main repo
+/// pointing at the unstaged commit, preventing git from
+/// garbage-collecting the WIP commits after the worktree is deleted.
+///
+/// Each step has rollback logic: if step N fails, steps 1..N-1 are undone.
+/// On success, returns a [`PersistOutcome`] that can be passed to
+/// [`rollback_persist`] if a later step in the archival pipeline fails.
+pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Result<PersistOutcome> {
+ let (worktree_repo, _temp_worktree_project) = match &root.worktree_repo {
+ Some(worktree_repo) => (worktree_repo.clone(), None),
+ None => find_or_create_repository(&root.root_path, cx).await?,
+ };
+
+ // Read original HEAD SHA before creating any WIP commits
+ let original_commit_hash = worktree_repo
+ .update(cx, |repo, _cx| repo.head_sha())
+ .await
+ .map_err(|_| anyhow!("head_sha canceled"))?
+ .context("failed to read original HEAD SHA")?
+ .context("HEAD SHA is None before WIP commits")?;
+
+ // Create WIP commit #1 (staged state)
+ let askpass = AskPassDelegate::new(cx, |_, _, _| {});
+ let commit_rx = worktree_repo.update(cx, |repo, cx| {
+ repo.commit(
+ "WIP staged".into(),
+ None,
+ CommitOptions {
+ allow_empty: true,
+ ..Default::default()
+ },
+ askpass,
+ cx,
+ )
+ });
+ commit_rx
+ .await
+ .map_err(|_| anyhow!("WIP staged commit canceled"))??;
+
+ // Read SHA after staged commit
+ let staged_sha_result = worktree_repo
+ .update(cx, |repo, _cx| repo.head_sha())
+ .await
+ .map_err(|_| anyhow!("head_sha canceled"))
+ .and_then(|r| r.context("failed to read HEAD SHA after staged commit"))
+ .and_then(|opt| opt.context("HEAD SHA is None after staged commit"));
+ let staged_commit_hash = match staged_sha_result {
+ Ok(sha) => sha,
+ Err(error) => {
+ let rx = worktree_repo.update(cx, |repo, cx| {
+ repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ return Err(error);
+ }
+ };
+
+ // Stage all files including untracked
+ let stage_rx = worktree_repo.update(cx, |repo, _cx| repo.stage_all_including_untracked());
+ if let Err(error) = stage_rx
+ .await
+ .map_err(|_| anyhow!("stage all canceled"))
+ .and_then(|inner| inner)
+ {
+ let rx = worktree_repo.update(cx, |repo, cx| {
+ repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ return Err(error.context("failed to stage all files including untracked"));
+ }
+
+ // Create WIP commit #2 (unstaged/untracked state)
+ let askpass = AskPassDelegate::new(cx, |_, _, _| {});
+ let commit_rx = worktree_repo.update(cx, |repo, cx| {
+ repo.commit(
+ "WIP unstaged".into(),
+ None,
+ CommitOptions {
+ allow_empty: true,
+ ..Default::default()
+ },
+ askpass,
+ cx,
+ )
+ });
+ if let Err(error) = commit_rx
+ .await
+ .map_err(|_| anyhow!("WIP unstaged commit canceled"))
+ .and_then(|inner| inner)
+ {
+ let rx = worktree_repo.update(cx, |repo, cx| {
+ repo.reset("HEAD~1".to_string(), ResetMode::Mixed, cx)
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ return Err(error);
+ }
+
+ // Read HEAD SHA after WIP commits
+ let head_sha_result = worktree_repo
+ .update(cx, |repo, _cx| repo.head_sha())
+ .await
+ .map_err(|_| anyhow!("head_sha canceled"))
+ .and_then(|r| r.context("failed to read HEAD SHA after WIP commits"))
+ .and_then(|opt| opt.context("HEAD SHA is None after WIP commits"));
+ let unstaged_commit_hash = match head_sha_result {
+ Ok(sha) => sha,
+ Err(error) => {
+ let rx = worktree_repo.update(cx, |repo, cx| {
+ repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ return Err(error);
+ }
+ };
+
+ // Create DB record
+ let store = cx.update(|cx| ThreadMetadataStore::global(cx));
+ let worktree_path_str = root.root_path.to_string_lossy().to_string();
+ let main_repo_path_str = root.main_repo_path.to_string_lossy().to_string();
+ let branch_name = root.branch_name.clone().or_else(|| {
+ worktree_repo.read_with(cx, |repo, _cx| {
+ repo.snapshot()
+ .branch
+ .as_ref()
+ .map(|branch| branch.name().to_string())
+ })
+ });
+
+ let db_result = store
+ .read_with(cx, |store, cx| {
+ store.create_archived_worktree(
+ worktree_path_str.clone(),
+ main_repo_path_str.clone(),
+ branch_name.clone(),
+ staged_commit_hash.clone(),
+ unstaged_commit_hash.clone(),
+ original_commit_hash.clone(),
+ cx,
+ )
+ })
+ .await
+ .context("failed to create archived worktree DB record");
+ let archived_worktree_id = match db_result {
+ Ok(id) => id,
+ Err(error) => {
+ let rx = worktree_repo.update(cx, |repo, cx| {
+ repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ return Err(error);
+ }
+ };
+
+ // Link all threads on this worktree to the archived record
+ let session_ids: Vec<acp::SessionId> = store.read_with(cx, |store, _cx| {
+ store
+ .entries()
+ .filter(|thread| {
+ thread
+ .folder_paths
+ .paths()
+ .iter()
+ .any(|p| p.as_path() == root.root_path)
+ })
+ .map(|thread| thread.session_id.clone())
+ .collect()
+ });
+
+ for session_id in &session_ids {
+ let link_result = store
+ .read_with(cx, |store, cx| {
+ store.link_thread_to_archived_worktree(
+ session_id.0.to_string(),
+ archived_worktree_id,
+ cx,
+ )
+ })
+ .await;
+ if let Err(error) = link_result {
+ if let Err(delete_error) = store
+ .read_with(cx, |store, cx| {
+ store.delete_archived_worktree(archived_worktree_id, cx)
+ })
+ .await
+ {
+ log::error!(
+ "Failed to delete archived worktree DB record during link rollback: {delete_error:#}"
+ );
+ }
+ let rx = worktree_repo.update(cx, |repo, cx| {
+ repo.reset(format!("{}~1", staged_commit_hash), ResetMode::Mixed, cx)
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ return Err(error.context("failed to link thread to archived worktree"));
+ }
+ }
+
+ // Create git ref on main repo (non-fatal)
+ let ref_name = archived_worktree_ref_name(archived_worktree_id);
+ let main_repo_result = find_or_create_repository(&root.main_repo_path, cx).await;
+ match main_repo_result {
+ Ok((main_repo, _temp_project)) => {
+ let rx = main_repo.update(cx, |repo, _cx| {
+ repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone())
+ });
+ if let Err(error) = rx
+ .await
+ .map_err(|_| anyhow!("update_ref canceled"))
+ .and_then(|r| r)
+ {
+ log::warn!(
+ "Failed to create ref {} on main repo (non-fatal): {error}",
+ ref_name
+ );
+ }
+ // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
+ drop(_temp_project);
+ }
+ Err(error) => {
+ log::warn!(
+ "Could not find main repo to create ref {} (non-fatal): {error}",
+ ref_name
+ );
+ }
+ }
+
+ Ok(PersistOutcome {
+ archived_worktree_id,
+ staged_commit_hash,
+ })
+}
+
+/// Undoes a successful [`persist_worktree_state`] by resetting the WIP
+/// commits, deleting the git ref on the main repo, and removing the DB
+/// record.
+pub async fn rollback_persist(outcome: &PersistOutcome, root: &RootPlan, cx: &mut AsyncApp) {
+ // Undo WIP commits on the worktree repo
+ if let Some(worktree_repo) = &root.worktree_repo {
+ let rx = worktree_repo.update(cx, |repo, cx| {
+ repo.reset(
+ format!("{}~1", outcome.staged_commit_hash),
+ ResetMode::Mixed,
+ cx,
+ )
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ }
+
+ // Delete the git ref on main repo
+ if let Ok((main_repo, _temp_project)) =
+ find_or_create_repository(&root.main_repo_path, cx).await
+ {
+ let ref_name = archived_worktree_ref_name(outcome.archived_worktree_id);
+ let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
+ rx.await.ok().and_then(|r| r.log_err());
+ // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
+ drop(_temp_project);
+ }
+
+ // Delete the DB record
+ let store = cx.update(|cx| ThreadMetadataStore::global(cx));
+ if let Err(error) = store
+ .read_with(cx, |store, cx| {
+ store.delete_archived_worktree(outcome.archived_worktree_id, cx)
+ })
+ .await
+ {
+ log::error!("Failed to delete archived worktree DB record during rollback: {error:#}");
+ }
+}
+
+/// Restores a previously archived worktree back to disk from its DB record.
+///
+/// Re-creates the git worktree (or adopts an existing directory), resets
+/// past the two WIP commits to recover the original working directory
+/// state, verifies HEAD matches the expected commit, and restores the
+/// original branch if one was recorded.
+pub async fn restore_worktree_via_git(
+ row: &ArchivedGitWorktree,
+ cx: &mut AsyncApp,
+) -> Result<PathBuf> {
+ let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?;
+
+ // Check if worktree path already exists on disk
+ let worktree_path = &row.worktree_path;
+ let app_state = current_app_state(cx).context("no app state available")?;
+ let already_exists = app_state.fs.metadata(worktree_path).await?.is_some();
+
+ if already_exists {
+ let is_git_worktree =
+ resolve_git_worktree_to_main_repo(app_state.fs.as_ref(), worktree_path)
+ .await
+ .is_some();
+
+ if is_git_worktree {
+ // Already a git worktree — another thread on the same worktree
+ // already restored it. Reuse as-is.
+ return Ok(worktree_path.clone());
+ }
+
+ // Path exists but isn't a git worktree. Ask git to adopt it.
+ let rx = main_repo.update(cx, |repo, _cx| repo.repair_worktrees());
+ rx.await
+ .map_err(|_| anyhow!("worktree repair was canceled"))?
+ .context("failed to repair worktrees")?;
+ } else {
+ // Create detached worktree at the unstaged commit
+ let rx = main_repo.update(cx, |repo, _cx| {
+ repo.create_worktree_detached(worktree_path.clone(), row.unstaged_commit_hash.clone())
+ });
+ rx.await
+ .map_err(|_| anyhow!("worktree creation was canceled"))?
+ .context("failed to create worktree")?;
+ }
+
+ // Get the worktree's repo entity
+ let (wt_repo, _temp_wt_project) = find_or_create_repository(worktree_path, cx).await?;
+
+ // Reset past the WIP commits to recover original state
+ let mixed_reset_ok = {
+ let rx = wt_repo.update(cx, |repo, cx| {
+ repo.reset(row.staged_commit_hash.clone(), ResetMode::Mixed, cx)
+ });
+ match rx.await {
+ Ok(Ok(())) => true,
+ Ok(Err(error)) => {
+ log::error!("Mixed reset to staged commit failed: {error:#}");
+ false
+ }
+ Err(_) => {
+ log::error!("Mixed reset to staged commit was canceled");
+ false
+ }
+ }
+ };
+
+ let soft_reset_ok = if mixed_reset_ok {
+ let rx = wt_repo.update(cx, |repo, cx| {
+ repo.reset(row.original_commit_hash.clone(), ResetMode::Soft, cx)
+ });
+ match rx.await {
+ Ok(Ok(())) => true,
+ Ok(Err(error)) => {
+ log::error!("Soft reset to original commit failed: {error:#}");
+ false
+ }
+ Err(_) => {
+ log::error!("Soft reset to original commit was canceled");
+ false
+ }
+ }
+ } else {
+ false
+ };
+
+ // If either WIP reset failed, fall back to a mixed reset directly to
+ // original_commit_hash so we at least land on the right commit.
+ if !mixed_reset_ok || !soft_reset_ok {
+ log::warn!(
+ "WIP reset(s) failed (mixed_ok={mixed_reset_ok}, soft_ok={soft_reset_ok}); \
+ falling back to mixed reset to original commit {}",
+ row.original_commit_hash
+ );
+ let rx = wt_repo.update(cx, |repo, cx| {
+ repo.reset(row.original_commit_hash.clone(), ResetMode::Mixed, cx)
+ });
+ match rx.await {
+ Ok(Ok(())) => {}
+ Ok(Err(error)) => {
+ return Err(error.context(format!(
+ "fallback reset to original commit {} also failed",
+ row.original_commit_hash
+ )));
+ }
+ Err(_) => {
+ return Err(anyhow!(
+ "fallback reset to original commit {} was canceled",
+ row.original_commit_hash
+ ));
+ }
+ }
+ }
+
+ // Verify HEAD is at original_commit_hash
+ let current_head = wt_repo
+ .update(cx, |repo, _cx| repo.head_sha())
+ .await
+ .map_err(|_| anyhow!("post-restore head_sha was canceled"))?
+ .context("failed to read HEAD after restore")?
+ .context("HEAD is None after restore")?;
+
+ if current_head != row.original_commit_hash {
+ anyhow::bail!(
+ "After restore, HEAD is at {current_head} but expected {}. \
+ The worktree may be in an inconsistent state.",
+ row.original_commit_hash
+ );
+ }
+
+ // Restore the branch
+ if let Some(branch_name) = &row.branch_name {
+ // Check if the branch exists and points at original_commit_hash.
+ // If it does, switch to it. If not, create a new branch there.
+ let rx = wt_repo.update(cx, |repo, _cx| repo.change_branch(branch_name.clone()));
+ if matches!(rx.await, Ok(Ok(()))) {
+ // Verify the branch actually points at original_commit_hash after switching
+ let head_after_switch = wt_repo
+ .update(cx, |repo, _cx| repo.head_sha())
+ .await
+ .ok()
+ .and_then(|r| r.ok())
+ .flatten();
+
+ if head_after_switch.as_deref() != Some(&row.original_commit_hash) {
+ // Branch exists but doesn't point at the right commit.
+ // Switch back to detached HEAD at original_commit_hash.
+ log::warn!(
+ "Branch '{}' exists but points at {:?}, not {}. Creating fresh branch.",
+ branch_name,
+ head_after_switch,
+ row.original_commit_hash
+ );
+ let rx = wt_repo.update(cx, |repo, cx| {
+ repo.reset(row.original_commit_hash.clone(), ResetMode::Mixed, cx)
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ // Delete the old branch and create fresh
+ let rx = wt_repo.update(cx, |repo, _cx| {
+ repo.create_branch(branch_name.clone(), None)
+ });
+ rx.await.ok().and_then(|r| r.log_err());
+ }
+ } else {
+ // Branch doesn't exist or can't be switched to — create it.
+ let rx = wt_repo.update(cx, |repo, _cx| {
+ repo.create_branch(branch_name.clone(), None)
+ });
+ if let Ok(Err(error)) | Err(error) = rx.await.map_err(|e| anyhow::anyhow!("{e}")) {
+ log::warn!(
+ "Could not create branch '{}': {error} — \
+ restored worktree is in detached HEAD state.",
+ branch_name
+ );
+ }
+ }
+ }
+
+ Ok(worktree_path.clone())
+}
+
+/// Deletes the git ref and DB records for a single archived worktree.
+/// Used when an archived worktree is no longer referenced by any thread.
+pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) {
+ // Delete the git ref from the main repo
+ if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await
+ {
+ let ref_name = archived_worktree_ref_name(row.id);
+ let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
+ match rx.await {
+ Ok(Ok(())) => {}
+ Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"),
+ Err(_) => log::warn!("Archive ref deletion was canceled"),
+ }
+ // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
+ drop(_temp_project);
+ }
+
+ // Delete the DB records
+ let store = cx.update(|cx| ThreadMetadataStore::global(cx));
+ store
+ .read_with(cx, |store, cx| store.delete_archived_worktree(row.id, cx))
+ .await
+ .log_err();
+}
+
+/// Cleans up all archived worktree data associated with a thread being deleted.
+///
+/// This unlinks the thread from all its archived worktrees and, for any
+/// archived worktree that is no longer referenced by any other thread,
+/// deletes the git ref and DB records.
+pub async fn cleanup_thread_archived_worktrees(session_id: &acp::SessionId, cx: &mut AsyncApp) {
+ let store = cx.update(|cx| ThreadMetadataStore::global(cx));
+
+ let archived_worktrees = store
+ .read_with(cx, |store, cx| {
+ store.get_archived_worktrees_for_thread(session_id.0.to_string(), cx)
+ })
+ .await;
+ let archived_worktrees = match archived_worktrees {
+ Ok(rows) => rows,
+ Err(error) => {
+ log::error!(
+ "Failed to fetch archived worktrees for thread {}: {error:#}",
+ session_id.0
+ );
+ return;
+ }
+ };
+
+ if archived_worktrees.is_empty() {
+ return;
+ }
+
+ if let Err(error) = store
+ .read_with(cx, |store, cx| {
+ store.unlink_thread_from_all_archived_worktrees(session_id.0.to_string(), cx)
+ })
+ .await
+ {
+ log::error!(
+ "Failed to unlink thread {} from archived worktrees: {error:#}",
+ session_id.0
+ );
+ return;
+ }
+
+ for row in &archived_worktrees {
+ let still_referenced = store
+ .read_with(cx, |store, cx| {
+ store.is_archived_worktree_referenced(row.id, cx)
+ })
+ .await;
+ match still_referenced {
+ Ok(true) => {}
+ Ok(false) => {
+ cleanup_archived_worktree_record(row, cx).await;
+ }
+ Err(error) => {
+ log::error!(
+ "Failed to check if archived worktree {} is still referenced: {error:#}",
+ row.id
+ );
+ }
+ }
+ }
+}
+
+/// Collects every `Workspace` entity across all open `MultiWorkspace` windows.
+pub fn all_open_workspaces(cx: &App) -> Vec<Entity<Workspace>> {
+ cx.windows()
+ .into_iter()
+ .filter_map(|window| window.downcast::<MultiWorkspace>())
+ .flat_map(|multi_workspace| {
+ multi_workspace
+ .read(cx)
+ .map(|multi_workspace| multi_workspace.workspaces().cloned().collect::<Vec<_>>())
+ .unwrap_or_default()
+ })
+ .collect()
+}
+
+fn current_app_state(cx: &mut AsyncApp) -> Option<Arc<AppState>> {
+ cx.update(|cx| {
+ all_open_workspaces(cx)
+ .into_iter()
+ .next()
+ .map(|workspace| workspace.read(cx).app_state().clone())
+ })
+}
@@ -603,6 +603,9 @@ impl ThreadsArchiveView {
.wait_for_connection()
});
cx.spawn(async move |_this, cx| {
+ crate::thread_worktree_archive::cleanup_thread_archived_worktrees(&session_id, cx)
+ .await;
+
let state = task.await?;
let task = cx.update(|cx| {
if let Some(list) = state.connection.session_list(cx) {
@@ -1380,6 +1380,39 @@ impl GitRepository for FakeGitRepository {
async { Ok(()) }.boxed()
}
+ fn stage_all_including_untracked(&self) -> BoxFuture<'_, Result<()>> {
+ let workdir_path = self.dot_git_path.parent().unwrap();
+ let git_files: Vec<(RepoPath, String)> = self
+ .fs
+ .files()
+ .iter()
+ .filter_map(|path| {
+ let repo_path = path.strip_prefix(workdir_path).ok()?;
+ if repo_path.starts_with(".git") {
+ return None;
+ }
+ let content = self
+ .fs
+ .read_file_sync(path)
+ .ok()
+ .and_then(|bytes| String::from_utf8(bytes).ok())?;
+ let rel_path = RelPath::new(repo_path, PathStyle::local()).ok()?;
+ Some((RepoPath::from_rel_path(&rel_path), content))
+ })
+ .collect();
+
+ self.with_state_async(true, move |state| {
+ let fs_paths: HashSet<RepoPath> = git_files.iter().map(|(p, _)| p.clone()).collect();
+ for (path, content) in git_files {
+ state.index_contents.insert(path, content);
+ }
+ state
+ .index_contents
+ .retain(|path, _| fs_paths.contains(path));
+ Ok(())
+ })
+ }
+
fn set_trusted(&self, trusted: bool) {
self.is_trusted
.store(trusted, std::sync::atomic::Ordering::Release);
@@ -959,6 +959,8 @@ pub trait GitRepository: Send + Sync {
fn repair_worktrees(&self) -> BoxFuture<'_, Result<()>>;
+ fn stage_all_including_untracked(&self) -> BoxFuture<'_, Result<()>>;
+
fn set_trusted(&self, trusted: bool);
fn is_trusted(&self) -> bool;
}
@@ -2269,6 +2271,18 @@ impl GitRepository for RealGitRepository {
.boxed()
}
+ fn stage_all_including_untracked(&self) -> BoxFuture<'_, Result<()>> {
+ let git_binary = self.git_binary();
+ self.executor
+ .spawn(async move {
+ let args: Vec<OsString> =
+ vec!["--no-optional-locks".into(), "add".into(), "-A".into()];
+ git_binary?.run(&args).await?;
+ Ok(())
+ })
+ .boxed()
+ }
+
fn push(
&self,
branch_name: String,
@@ -6126,15 +6126,16 @@ impl Repository {
})
}
- pub fn commit_exists(&mut self, sha: String) -> oneshot::Receiver<Result<bool>> {
+ pub fn stage_all_including_untracked(&mut self) -> oneshot::Receiver<Result<()>> {
self.send_job(None, move |repo, _cx| async move {
match repo {
RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
- let results = backend.revparse_batch(vec![sha]).await?;
- Ok(results.into_iter().next().flatten().is_some())
+ backend.stage_all_including_untracked().await
}
RepositoryState::Remote(_) => {
- anyhow::bail!("commit_exists is not supported for remote repositories")
+ anyhow::bail!(
+ "stage_all_including_untracked is not supported for remote repositories"
+ )
}
}
})
@@ -4758,6 +4758,44 @@ impl Project {
})
}
+ /// Returns a task that resolves when the given worktree's `Entity` is
+ /// fully dropped (all strong references released), not merely when
+ /// `remove_worktree` is called. `remove_worktree` drops the store's
+ /// reference and emits `WorktreeRemoved`, but other code may still
+ /// hold a strong handle — the worktree isn't safe to delete from
+ /// disk until every handle is gone.
+ ///
+ /// We use `observe_release` on the specific entity rather than
+ /// listening for `WorktreeReleased` events because it's simpler at
+ /// the call site (one awaitable task, no subscription / channel /
+ /// ID filtering).
+ pub fn wait_for_worktree_release(
+ &mut self,
+ worktree_id: WorktreeId,
+ cx: &mut Context<Self>,
+ ) -> Task<Result<()>> {
+ let Some(worktree) = self.worktree_for_id(worktree_id, cx) else {
+ return Task::ready(Ok(()));
+ };
+
+ let (released_tx, released_rx) = futures::channel::oneshot::channel();
+ let released_tx = std::sync::Arc::new(Mutex::new(Some(released_tx)));
+ let release_subscription =
+ cx.observe_release(&worktree, move |_project, _released_worktree, _cx| {
+ if let Some(released_tx) = released_tx.lock().take() {
+ let _ = released_tx.send(());
+ }
+ });
+
+ cx.spawn(async move |_project, _cx| {
+ let _release_subscription = release_subscription;
+ released_rx
+ .await
+ .map_err(|_| anyhow!("worktree release observer dropped before release"))?;
+ Ok(())
+ })
+ }
+
pub fn remove_worktree(&mut self, id_to_remove: WorktreeId, cx: &mut Context<Self>) {
self.worktree_store.update(cx, |worktree_store, cx| {
worktree_store.remove_worktree(id_to_remove, cx);
@@ -27,6 +27,7 @@ editor.workspace = true
fs.workspace = true
git.workspace = true
gpui.workspace = true
+log.workspace = true
menu.workspace = true
platform_title_bar.workspace = true
project.workspace = true
@@ -35,6 +36,7 @@ remote.workspace = true
serde.workspace = true
serde_json.workspace = true
settings.workspace = true
+smol.workspace = true
theme.workspace = true
theme_settings.workspace = true
ui.workspace = true
@@ -5,6 +5,7 @@ use action_log::DiffStats;
use agent_client_protocol::{self as acp};
use agent_settings::AgentSettings;
use agent_ui::thread_metadata_store::{ThreadMetadata, ThreadMetadataStore};
+use agent_ui::thread_worktree_archive;
use agent_ui::threads_archive_view::{
ThreadsArchiveView, ThreadsArchiveViewEvent, format_history_entry_timestamp,
};
@@ -16,7 +17,7 @@ use chrono::{DateTime, Utc};
use editor::Editor;
use gpui::{
Action as _, AnyElement, App, Context, Entity, FocusHandle, Focusable, KeyContext, ListState,
- Pixels, Render, SharedString, WeakEntity, Window, WindowHandle, linear_color_stop,
+ Pixels, Render, SharedString, Task, WeakEntity, Window, WindowHandle, linear_color_stop,
linear_gradient, list, prelude::*, px,
};
use menu::{
@@ -33,6 +34,7 @@ use serde::{Deserialize, Serialize};
use settings::Settings as _;
use std::collections::{HashMap, HashSet};
use std::mem;
+use std::path::PathBuf;
use std::rc::Rc;
use theme::ActiveTheme;
use ui::{
@@ -45,8 +47,8 @@ use util::path_list::{PathList, SerializedPathList};
use workspace::{
AddFolderToProject, CloseWindow, FocusWorkspaceSidebar, MultiWorkspace, MultiWorkspaceEvent,
NextProject, NextThread, Open, PreviousProject, PreviousThread, ShowFewerThreads,
- ShowMoreThreads, Sidebar as WorkspaceSidebar, SidebarSide, ToggleWorkspaceSidebar, Workspace,
- sidebar_side_context_menu,
+ ShowMoreThreads, Sidebar as WorkspaceSidebar, SidebarSide, Toast, ToggleWorkspaceSidebar,
+ Workspace, notifications::NotificationId, sidebar_side_context_menu,
};
use zed_actions::OpenRecent;
@@ -108,6 +110,11 @@ enum SidebarView {
Archive(Entity<ThreadsArchiveView>),
}
+enum ArchiveWorktreeOutcome {
+ Success,
+ Cancelled,
+}
+
#[derive(Clone, Debug)]
enum ActiveEntry {
Thread {
@@ -908,7 +915,7 @@ impl Sidebar {
}
};
- // === Main code path: one query per group via main_worktree_paths ===
+ // Main code path: one query per group via main_worktree_paths.
// The main_worktree_paths column is set on all new threads and
// points to the group's canonical paths regardless of which
// linked worktree the thread was opened in.
@@ -2201,31 +2208,126 @@ impl Sidebar {
window: &mut Window,
cx: &mut Context<Self>,
) {
- ThreadMetadataStore::global(cx)
- .update(cx, |store, cx| store.unarchive(&metadata.session_id, cx));
+ let session_id = metadata.session_id.clone();
+
+ ThreadMetadataStore::global(cx).update(cx, |store, cx| store.unarchive(&session_id, cx));
+
+ if metadata.folder_paths.paths().is_empty() {
+ let active_workspace = self
+ .multi_workspace
+ .upgrade()
+ .map(|w| w.read(cx).workspace().clone());
- if !metadata.folder_paths.paths().is_empty() {
- let path_list = metadata.folder_paths.clone();
- if let Some(workspace) = self.find_current_workspace_for_path_list(&path_list, cx) {
+ if let Some(workspace) = active_workspace {
self.activate_thread_locally(&metadata, &workspace, false, window, cx);
- } else if let Some((target_window, workspace)) =
- self.find_open_workspace_for_path_list(&path_list, cx)
- {
- self.activate_thread_in_other_window(metadata, workspace, target_window, cx);
- } else {
- self.open_workspace_and_activate_thread(metadata, path_list, window, cx);
}
return;
}
- let active_workspace = self
- .multi_workspace
- .upgrade()
- .map(|w| w.read(cx).workspace().clone());
+ let store = ThreadMetadataStore::global(cx);
+ let task = store
+ .read(cx)
+ .get_archived_worktrees_for_thread(session_id.0.to_string(), cx);
+ let path_list = metadata.folder_paths.clone();
- if let Some(workspace) = active_workspace {
- self.activate_thread_locally(&metadata, &workspace, false, window, cx);
- }
+ cx.spawn_in(window, async move |this, cx| {
+ let archived_worktrees = task.await?;
+
+ // No archived worktrees means the thread wasn't associated with a
+ // linked worktree that got deleted, so we just need to find (or
+ // open) a workspace that matches the thread's folder paths.
+ if archived_worktrees.is_empty() {
+ this.update_in(cx, |this, window, cx| {
+ if let Some(workspace) =
+ this.find_current_workspace_for_path_list(&path_list, cx)
+ {
+ this.activate_thread_locally(&metadata, &workspace, false, window, cx);
+ } else if let Some((target_window, workspace)) =
+ this.find_open_workspace_for_path_list(&path_list, cx)
+ {
+ this.activate_thread_in_other_window(
+ metadata,
+ workspace,
+ target_window,
+ cx,
+ );
+ } else {
+ this.open_workspace_and_activate_thread(metadata, path_list, window, cx);
+ }
+ })?;
+ return anyhow::Ok(());
+ }
+
+ // Restore each archived worktree back to disk via git. If the
+ // worktree already exists (e.g. a previous unarchive of a different
+ // thread on the same worktree already restored it), it's reused
+ // as-is. We track (old_path, restored_path) pairs so we can update
+ // the thread's folder_paths afterward.
+ let mut path_replacements: Vec<(PathBuf, PathBuf)> = Vec::new();
+ for row in &archived_worktrees {
+ match thread_worktree_archive::restore_worktree_via_git(row, &mut *cx).await {
+ Ok(restored_path) => {
+ // The worktree is on disk now; clean up the DB record
+ // and git ref we created during archival.
+ thread_worktree_archive::cleanup_archived_worktree_record(row, &mut *cx)
+ .await;
+ path_replacements.push((row.worktree_path.clone(), restored_path));
+ }
+ Err(error) => {
+ log::error!("Failed to restore worktree: {error:#}");
+ this.update_in(cx, |this, _window, cx| {
+ if let Some(multi_workspace) = this.multi_workspace.upgrade() {
+ let workspace = multi_workspace.read(cx).workspace().clone();
+ workspace.update(cx, |workspace, cx| {
+ struct RestoreWorktreeErrorToast;
+ workspace.show_toast(
+ Toast::new(
+ NotificationId::unique::<RestoreWorktreeErrorToast>(),
+ format!("Failed to restore worktree: {error:#}"),
+ )
+ .autohide(),
+ cx,
+ );
+ });
+ }
+ })
+ .ok();
+ return anyhow::Ok(());
+ }
+ }
+ }
+
+ if !path_replacements.is_empty() {
+ // Update the thread's stored folder_paths: swap each old
+ // worktree path for the restored path (which may differ if
+ // the worktree was restored to a new location).
+ cx.update(|_window, cx| {
+ store.update(cx, |store, cx| {
+ store.update_restored_worktree_paths(&session_id, &path_replacements, cx);
+ });
+ })?;
+
+ // Re-read the metadata (now with updated paths) and open
+ // the workspace so the user lands in the restored worktree.
+ let updated_metadata =
+ cx.update(|_window, cx| store.read(cx).entry(&session_id).cloned())?;
+
+ if let Some(updated_metadata) = updated_metadata {
+ let new_paths = updated_metadata.folder_paths.clone();
+ this.update_in(cx, |this, window, cx| {
+ this.open_workspace_and_activate_thread(
+ updated_metadata,
+ new_paths,
+ window,
+ cx,
+ );
+ })?;
+ }
+ }
+
+ anyhow::Ok(())
+ })
+ .detach_and_log_err(cx);
}
fn expand_selected_entry(
@@ -2374,10 +2476,52 @@ impl Sidebar {
window: &mut Window,
cx: &mut Context<Self>,
) {
- let thread_folder_paths = ThreadMetadataStore::global(cx)
+ let metadata = ThreadMetadataStore::global(cx)
.read(cx)
.entry(session_id)
- .map(|m| m.folder_paths.clone());
+ .cloned();
+ let thread_folder_paths = metadata.as_ref().map(|m| m.folder_paths.clone());
+
+ // Compute which linked worktree roots should be archived from disk if
+ // this thread is archived. This must happen before we remove any
+ // workspace from the MultiWorkspace, because `build_root_plan` needs
+ // the currently open workspaces in order to find the affected projects
+ // and repository handles for each linked worktree.
+ let roots_to_archive = metadata
+ .as_ref()
+ .map(|metadata| {
+ let mut workspaces = self
+ .multi_workspace
+ .upgrade()
+ .map(|multi_workspace| {
+ multi_workspace
+ .read(cx)
+ .workspaces()
+ .cloned()
+ .collect::<Vec<_>>()
+ })
+ .unwrap_or_default();
+ for workspace in thread_worktree_archive::all_open_workspaces(cx) {
+ if !workspaces.contains(&workspace) {
+ workspaces.push(workspace);
+ }
+ }
+ metadata
+ .folder_paths
+ .ordered_paths()
+ .filter_map(|path| {
+ thread_worktree_archive::build_root_plan(path, &workspaces, cx)
+ })
+ .filter(|plan| {
+ !thread_worktree_archive::path_is_referenced_by_other_unarchived_threads(
+ session_id,
+ &plan.root_path,
+ cx,
+ )
+ })
+ .collect::<Vec<_>>()
+ })
+ .unwrap_or_default();
// Find the neighbor thread in the sidebar (by display position).
// Look below first, then above, for the nearest thread that isn't
@@ -2466,10 +2610,13 @@ impl Sidebar {
let removed = remove_task.await?;
if removed {
this.update_in(cx, |this, window, cx| {
+ let in_flight =
+ this.start_archive_worktree_task(&session_id, roots_to_archive, cx);
this.archive_and_activate(
&session_id,
neighbor_metadata.as_ref(),
thread_folder_paths.as_ref(),
+ in_flight,
window,
cx,
);
@@ -2481,10 +2628,12 @@ impl Sidebar {
} else {
// Simple case: no workspace removal needed.
let neighbor_metadata = neighbor.map(|(metadata, _)| metadata);
+ let in_flight = self.start_archive_worktree_task(session_id, roots_to_archive, cx);
self.archive_and_activate(
session_id,
neighbor_metadata.as_ref(),
thread_folder_paths.as_ref(),
+ in_flight,
window,
cx,
);
@@ -2492,16 +2641,32 @@ impl Sidebar {
}
/// Archive a thread and activate the nearest neighbor or a draft.
+ ///
+ /// IMPORTANT: when activating a neighbor or creating a fallback draft,
+ /// this method also activates the target workspace in the MultiWorkspace.
+ /// This is critical because `rebuild_contents` derives the active
+ /// workspace from `mw.workspace()`. If the linked worktree workspace is
+ /// still active after archiving its last thread, `rebuild_contents` sees
+ /// the threadless linked worktree as active and emits a spurious
+ /// "+ New Thread" entry with the worktree chip — keeping the worktree
+ /// alive and preventing disk cleanup.
+ ///
+ /// When `in_flight_archive` is present, it is the background task that
+ /// persists the linked worktree's git state and deletes it from disk.
+ /// We attach it to the metadata store at the same time we mark the thread
+ /// archived so failures can automatically unarchive the thread and user-
+ /// initiated unarchive can cancel the task.
fn archive_and_activate(
&mut self,
session_id: &acp::SessionId,
neighbor: Option<&ThreadMetadata>,
thread_folder_paths: Option<&PathList>,
+ in_flight_archive: Option<(Task<()>, smol::channel::Sender<()>)>,
window: &mut Window,
cx: &mut Context<Self>,
) {
ThreadMetadataStore::global(cx).update(cx, |store, cx| {
- store.archive(session_id, cx);
+ store.archive(session_id, in_flight_archive, cx);
});
let is_active = self
@@ -2537,22 +2702,39 @@ impl Sidebar {
}
// Try to activate the neighbor thread. If its workspace is open,
- // tell the panel to load it. `rebuild_contents` will reconcile
- // `active_entry` once the thread finishes loading.
+ // tell the panel to load it and activate that workspace.
+ // `rebuild_contents` will reconcile `active_entry` once the thread
+ // finishes loading.
if let Some(metadata) = neighbor {
if let Some(workspace) = self
.multi_workspace
.upgrade()
.and_then(|mw| mw.read(cx).workspace_for_paths(&metadata.folder_paths, cx))
{
+ self.activate_workspace(&workspace, window, cx);
Self::load_agent_thread_in_workspace(&workspace, metadata, true, window, cx);
return;
}
}
// No neighbor or its workspace isn't open — fall back to a new
- // draft on the active workspace so the user has something to work with.
- if let Some(workspace) = self.active_entry_workspace().cloned() {
+ // draft. Use the group workspace (main project) rather than the
+ // active entry workspace, which may be a linked worktree that is
+ // about to be cleaned up.
+ let fallback_workspace = thread_folder_paths
+ .and_then(|folder_paths| {
+ let mw = self.multi_workspace.upgrade()?;
+ let mw = mw.read(cx);
+ // Find the group's main workspace (whose root paths match
+ // the project group key, not the thread's folder paths).
+ let thread_workspace = mw.workspace_for_paths(folder_paths, cx)?;
+ let group_key = thread_workspace.read(cx).project_group_key(cx);
+ mw.workspace_for_paths(group_key.path_list(), cx)
+ })
+ .or_else(|| self.active_entry_workspace().cloned());
+
+ if let Some(workspace) = fallback_workspace {
+ self.activate_workspace(&workspace, window, cx);
if let Some(panel) = workspace.read(cx).panel::<AgentPanel>(cx) {
panel.update(cx, |panel, cx| {
panel.new_thread(&NewThread, window, cx);
@@ -2561,6 +2743,113 @@ impl Sidebar {
}
}
+ fn start_archive_worktree_task(
+ &self,
+ session_id: &acp::SessionId,
+ roots: Vec<thread_worktree_archive::RootPlan>,
+ cx: &mut Context<Self>,
+ ) -> Option<(Task<()>, smol::channel::Sender<()>)> {
+ if roots.is_empty() {
+ return None;
+ }
+
+ let (cancel_tx, cancel_rx) = smol::channel::bounded::<()>(1);
+ let session_id = session_id.clone();
+ let task = cx.spawn(async move |_this, cx| {
+ match Self::archive_worktree_roots(roots, cancel_rx, cx).await {
+ Ok(ArchiveWorktreeOutcome::Success) => {
+ cx.update(|cx| {
+ ThreadMetadataStore::global(cx).update(cx, |store, _cx| {
+ store.cleanup_completed_archive(&session_id);
+ });
+ });
+ }
+ Ok(ArchiveWorktreeOutcome::Cancelled) => {}
+ Err(error) => {
+ log::error!("Failed to archive worktree: {error:#}");
+ cx.update(|cx| {
+ ThreadMetadataStore::global(cx).update(cx, |store, cx| {
+ store.unarchive(&session_id, cx);
+ });
+ });
+ }
+ }
+ });
+
+ Some((task, cancel_tx))
+ }
+
+ async fn archive_worktree_roots(
+ roots: Vec<thread_worktree_archive::RootPlan>,
+ cancel_rx: smol::channel::Receiver<()>,
+ cx: &mut gpui::AsyncApp,
+ ) -> anyhow::Result<ArchiveWorktreeOutcome> {
+ let mut completed_persists: Vec<(
+ thread_worktree_archive::PersistOutcome,
+ thread_worktree_archive::RootPlan,
+ )> = Vec::new();
+
+ for root in &roots {
+ if cancel_rx.is_closed() {
+ for (outcome, completed_root) in completed_persists.iter().rev() {
+ thread_worktree_archive::rollback_persist(outcome, completed_root, cx).await;
+ }
+ return Ok(ArchiveWorktreeOutcome::Cancelled);
+ }
+
+ if root.worktree_repo.is_some() {
+ match thread_worktree_archive::persist_worktree_state(root, cx).await {
+ Ok(outcome) => {
+ completed_persists.push((outcome, root.clone()));
+ }
+ Err(error) => {
+ for (outcome, completed_root) in completed_persists.iter().rev() {
+ thread_worktree_archive::rollback_persist(outcome, completed_root, cx)
+ .await;
+ }
+ return Err(error);
+ }
+ }
+ }
+
+ if cancel_rx.is_closed() {
+ for (outcome, completed_root) in completed_persists.iter().rev() {
+ thread_worktree_archive::rollback_persist(outcome, completed_root, cx).await;
+ }
+ return Ok(ArchiveWorktreeOutcome::Cancelled);
+ }
+
+ if let Err(error) = thread_worktree_archive::remove_root(root.clone(), cx).await {
+ if let Some((outcome, completed_root)) = completed_persists.last() {
+ if completed_root.root_path == root.root_path {
+ thread_worktree_archive::rollback_persist(outcome, completed_root, cx)
+ .await;
+ completed_persists.pop();
+ }
+ }
+ for (outcome, completed_root) in completed_persists.iter().rev() {
+ thread_worktree_archive::rollback_persist(outcome, completed_root, cx).await;
+ }
+ return Err(error);
+ }
+ }
+
+ Ok(ArchiveWorktreeOutcome::Success)
+ }
+
+ fn activate_workspace(
+ &self,
+ workspace: &Entity<Workspace>,
+ window: &mut Window,
+ cx: &mut Context<Self>,
+ ) {
+ if let Some(multi_workspace) = self.multi_workspace.upgrade() {
+ multi_workspace.update(cx, |mw, cx| {
+ mw.activate(workspace.clone(), window, cx);
+ });
+ }
+ }
+
fn remove_selected_thread(
&mut self,
_: &RemoveSelectedThread,
@@ -6,7 +6,7 @@ use agent_ui::{
thread_metadata_store::ThreadMetadata,
};
use chrono::DateTime;
-use fs::FakeFs;
+use fs::{FakeFs, Fs};
use gpui::TestAppContext;
use pretty_assertions::assert_eq;
use project::AgentId;
@@ -4306,6 +4306,14 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
sidebar.update_in(cx, |sidebar: &mut Sidebar, window, cx| {
sidebar.archive_thread(&wt_thread_id, window, cx);
});
+
+ // archive_thread spawns a chain of tasks:
+ // 1. cx.spawn_in for workspace removal (awaits mw.remove())
+ // 2. start_archive_worktree_task spawns cx.spawn for git persist + disk removal
+ // 3. persist/remove do background_spawn work internally
+ // Each layer needs run_until_parked to drive to completion.
+ cx.run_until_parked();
+ cx.run_until_parked();
cx.run_until_parked();
// The linked worktree workspace should have been removed.
@@ -4315,6 +4323,12 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
"linked worktree workspace should be removed after archiving its last thread"
);
+ // The linked worktree checkout directory should also be removed from disk.
+ assert!(
+ !fs.is_dir(Path::new("/wt-feature-a")).await,
+ "linked worktree directory should be removed from disk after archiving its last thread"
+ );
+
// The main thread should still be visible.
let entries = visible_entries_as_strings(&sidebar, cx);
assert!(
@@ -5003,7 +5017,7 @@ async fn test_archived_threads_excluded_from_sidebar_entries(cx: &mut TestAppCon
cx.update(|_, cx| {
ThreadMetadataStore::global(cx).update(cx, |store, cx| {
- store.archive(&archived_thread_session_id, cx)
+ store.archive(&archived_thread_session_id, None, cx)
})
});
cx.run_until_parked();
@@ -5037,6 +5051,436 @@ async fn test_archived_threads_excluded_from_sidebar_entries(cx: &mut TestAppCon
});
}
+#[gpui::test]
+async fn test_archive_last_thread_on_linked_worktree_does_not_create_new_thread_on_worktree(
+ cx: &mut TestAppContext,
+) {
+ // When a linked worktree has a single thread and that thread is archived,
+ // the sidebar must NOT create a new thread on the same worktree (which
+ // would prevent the worktree from being cleaned up on disk). Instead,
+ // archive_thread switches to a sibling thread on the main workspace (or
+ // creates a draft there) before archiving the metadata.
+ agent_ui::test_support::init_test(cx);
+ cx.update(|cx| {
+ ThreadStore::init_global(cx);
+ ThreadMetadataStore::init_global(cx);
+ language_model::LanguageModelRegistry::test(cx);
+ prompt_store::init(cx);
+ });
+
+ let fs = FakeFs::new(cx.executor());
+
+ fs.insert_tree(
+ "/project",
+ serde_json::json!({
+ ".git": {},
+ "src": {},
+ }),
+ )
+ .await;
+
+ fs.add_linked_worktree_for_repo(
+ Path::new("/project/.git"),
+ false,
+ git::repository::Worktree {
+ path: std::path::PathBuf::from("/wt-ochre-drift"),
+ ref_name: Some("refs/heads/ochre-drift".into()),
+ sha: "aaa".into(),
+ is_main: false,
+ },
+ )
+ .await;
+
+ cx.update(|cx| <dyn fs::Fs>::set_global(fs.clone(), cx));
+
+ let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await;
+ let worktree_project =
+ project::Project::test(fs.clone(), ["/wt-ochre-drift".as_ref()], cx).await;
+
+ main_project
+ .update(cx, |p, cx| p.git_scans_complete(cx))
+ .await;
+ worktree_project
+ .update(cx, |p, cx| p.git_scans_complete(cx))
+ .await;
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx));
+
+ let sidebar = setup_sidebar(&multi_workspace, cx);
+
+ let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.test_add_workspace(worktree_project.clone(), window, cx)
+ });
+
+ // Set up both workspaces with agent panels.
+ let main_workspace =
+ multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ let _main_panel = add_agent_panel(&main_workspace, cx);
+ let worktree_panel = add_agent_panel(&worktree_workspace, cx);
+
+ // Activate the linked worktree workspace so the sidebar tracks it.
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.activate(worktree_workspace.clone(), window, cx);
+ });
+
+ // Open a thread in the linked worktree panel and send a message
+ // so it becomes the active thread.
+ let connection = StubAgentConnection::new();
+ open_thread_with_connection(&worktree_panel, connection.clone(), cx);
+ send_message(&worktree_panel, cx);
+
+ let worktree_thread_id = active_session_id(&worktree_panel, cx);
+
+ // Give the thread a response chunk so it has content.
+ cx.update(|_, cx| {
+ connection.send_update(
+ worktree_thread_id.clone(),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("done".into())),
+ cx,
+ );
+ });
+
+ // Save the worktree thread's metadata.
+ save_thread_metadata(
+ worktree_thread_id.clone(),
+ "Ochre Drift Thread".into(),
+ chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
+ None,
+ &worktree_project,
+ cx,
+ );
+
+ // Also save a thread on the main project so there's a sibling in the
+ // group that can be selected after archiving.
+ save_thread_metadata(
+ acp::SessionId::new(Arc::from("main-project-thread")),
+ "Main Project Thread".into(),
+ chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
+ None,
+ &main_project,
+ cx,
+ );
+
+ cx.run_until_parked();
+
+ // Verify the linked worktree thread appears with its chip.
+ // The live thread title comes from the message text ("Hello"), not
+ // the metadata title we saved.
+ let entries_before = visible_entries_as_strings(&sidebar, cx);
+ assert!(
+ entries_before
+ .iter()
+ .any(|s| s.contains("{wt-ochre-drift}")),
+ "expected worktree thread with chip before archiving, got: {entries_before:?}"
+ );
+ assert!(
+ entries_before
+ .iter()
+ .any(|s| s.contains("Main Project Thread")),
+ "expected main project thread before archiving, got: {entries_before:?}"
+ );
+
+ // Confirm the worktree thread is the active entry.
+ sidebar.read_with(cx, |s, _| {
+ assert_active_thread(
+ s,
+ &worktree_thread_id,
+ "worktree thread should be active before archiving",
+ );
+ });
+
+ // Archive the worktree thread — it's the only thread using ochre-drift.
+ sidebar.update_in(cx, |sidebar, window, cx| {
+ sidebar.archive_thread(&worktree_thread_id, window, cx);
+ });
+
+ cx.run_until_parked();
+
+ // The archived thread should no longer appear in the sidebar.
+ let entries_after = visible_entries_as_strings(&sidebar, cx);
+ assert!(
+ !entries_after
+ .iter()
+ .any(|s| s.contains("Ochre Drift Thread")),
+ "archived thread should be hidden, got: {entries_after:?}"
+ );
+
+ // No "+ New Thread" entry should appear with the ochre-drift worktree
+ // chip — that would keep the worktree alive and prevent cleanup.
+ assert!(
+ !entries_after.iter().any(|s| s.contains("{wt-ochre-drift}")),
+ "no entry should reference the archived worktree, got: {entries_after:?}"
+ );
+
+ // The main project thread should still be visible.
+ assert!(
+ entries_after
+ .iter()
+ .any(|s| s.contains("Main Project Thread")),
+ "main project thread should still be visible, got: {entries_after:?}"
+ );
+}
+
+#[gpui::test]
+async fn test_archive_last_thread_on_linked_worktree_with_no_siblings_creates_draft_on_main(
+ cx: &mut TestAppContext,
+) {
+ // When a linked worktree thread is the ONLY thread in the project group
+ // (no threads on the main repo either), archiving it should create a
+ // draft on the main workspace, not the linked worktree workspace.
+ agent_ui::test_support::init_test(cx);
+ cx.update(|cx| {
+ ThreadStore::init_global(cx);
+ ThreadMetadataStore::init_global(cx);
+ language_model::LanguageModelRegistry::test(cx);
+ prompt_store::init(cx);
+ });
+
+ let fs = FakeFs::new(cx.executor());
+
+ fs.insert_tree(
+ "/project",
+ serde_json::json!({
+ ".git": {},
+ "src": {},
+ }),
+ )
+ .await;
+
+ fs.add_linked_worktree_for_repo(
+ Path::new("/project/.git"),
+ false,
+ git::repository::Worktree {
+ path: std::path::PathBuf::from("/wt-ochre-drift"),
+ ref_name: Some("refs/heads/ochre-drift".into()),
+ sha: "aaa".into(),
+ is_main: false,
+ },
+ )
+ .await;
+
+ cx.update(|cx| <dyn fs::Fs>::set_global(fs.clone(), cx));
+
+ let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await;
+ let worktree_project =
+ project::Project::test(fs.clone(), ["/wt-ochre-drift".as_ref()], cx).await;
+
+ main_project
+ .update(cx, |p, cx| p.git_scans_complete(cx))
+ .await;
+ worktree_project
+ .update(cx, |p, cx| p.git_scans_complete(cx))
+ .await;
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx));
+
+ let sidebar = setup_sidebar(&multi_workspace, cx);
+
+ let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.test_add_workspace(worktree_project.clone(), window, cx)
+ });
+
+ let main_workspace =
+ multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ let _main_panel = add_agent_panel(&main_workspace, cx);
+ let worktree_panel = add_agent_panel(&worktree_workspace, cx);
+
+ // Activate the linked worktree workspace.
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.activate(worktree_workspace.clone(), window, cx);
+ });
+
+ // Open a thread on the linked worktree — this is the ONLY thread.
+ let connection = StubAgentConnection::new();
+ open_thread_with_connection(&worktree_panel, connection.clone(), cx);
+ send_message(&worktree_panel, cx);
+
+ let worktree_thread_id = active_session_id(&worktree_panel, cx);
+
+ cx.update(|_, cx| {
+ connection.send_update(
+ worktree_thread_id.clone(),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("done".into())),
+ cx,
+ );
+ });
+
+ save_thread_metadata(
+ worktree_thread_id.clone(),
+ "Ochre Drift Thread".into(),
+ chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
+ None,
+ &worktree_project,
+ cx,
+ );
+
+ cx.run_until_parked();
+
+ // Archive it — there are no other threads in the group.
+ sidebar.update_in(cx, |sidebar, window, cx| {
+ sidebar.archive_thread(&worktree_thread_id, window, cx);
+ });
+
+ cx.run_until_parked();
+
+ let entries_after = visible_entries_as_strings(&sidebar, cx);
+
+ // No entry should reference the linked worktree.
+ assert!(
+ !entries_after.iter().any(|s| s.contains("{wt-ochre-drift}")),
+ "no entry should reference the archived worktree, got: {entries_after:?}"
+ );
+
+ // The active entry should be a draft on the main workspace.
+ sidebar.read_with(cx, |s, _| {
+ assert_active_draft(
+ s,
+ &main_workspace,
+ "active entry should be a draft on the main workspace",
+ );
+ });
+}
+
+#[gpui::test]
+async fn test_archive_thread_on_linked_worktree_selects_sibling_thread(cx: &mut TestAppContext) {
+ // When a linked worktree thread is archived but the group has other
+ // threads (e.g. on the main project), archive_thread should select
+ // the nearest sibling.
+ agent_ui::test_support::init_test(cx);
+ cx.update(|cx| {
+ ThreadStore::init_global(cx);
+ ThreadMetadataStore::init_global(cx);
+ language_model::LanguageModelRegistry::test(cx);
+ prompt_store::init(cx);
+ });
+
+ let fs = FakeFs::new(cx.executor());
+
+ fs.insert_tree(
+ "/project",
+ serde_json::json!({
+ ".git": {},
+ "src": {},
+ }),
+ )
+ .await;
+
+ fs.add_linked_worktree_for_repo(
+ Path::new("/project/.git"),
+ false,
+ git::repository::Worktree {
+ path: std::path::PathBuf::from("/wt-ochre-drift"),
+ ref_name: Some("refs/heads/ochre-drift".into()),
+ sha: "aaa".into(),
+ is_main: false,
+ },
+ )
+ .await;
+
+ cx.update(|cx| <dyn fs::Fs>::set_global(fs.clone(), cx));
+
+ let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await;
+ let worktree_project =
+ project::Project::test(fs.clone(), ["/wt-ochre-drift".as_ref()], cx).await;
+
+ main_project
+ .update(cx, |p, cx| p.git_scans_complete(cx))
+ .await;
+ worktree_project
+ .update(cx, |p, cx| p.git_scans_complete(cx))
+ .await;
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx));
+
+ let sidebar = setup_sidebar(&multi_workspace, cx);
+
+ let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.test_add_workspace(worktree_project.clone(), window, cx)
+ });
+
+ let main_workspace =
+ multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ let _main_panel = add_agent_panel(&main_workspace, cx);
+ let worktree_panel = add_agent_panel(&worktree_workspace, cx);
+
+ // Activate the linked worktree workspace.
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.activate(worktree_workspace.clone(), window, cx);
+ });
+
+ // Open a thread on the linked worktree.
+ let connection = StubAgentConnection::new();
+ open_thread_with_connection(&worktree_panel, connection.clone(), cx);
+ send_message(&worktree_panel, cx);
+
+ let worktree_thread_id = active_session_id(&worktree_panel, cx);
+
+ cx.update(|_, cx| {
+ connection.send_update(
+ worktree_thread_id.clone(),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("done".into())),
+ cx,
+ );
+ });
+
+ save_thread_metadata(
+ worktree_thread_id.clone(),
+ "Ochre Drift Thread".into(),
+ chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
+ None,
+ &worktree_project,
+ cx,
+ );
+
+ // Save a sibling thread on the main project.
+ let main_thread_id = acp::SessionId::new(Arc::from("main-project-thread"));
+ save_thread_metadata(
+ main_thread_id,
+ "Main Project Thread".into(),
+ chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
+ None,
+ &main_project,
+ cx,
+ );
+
+ cx.run_until_parked();
+
+ // Confirm the worktree thread is active.
+ sidebar.read_with(cx, |s, _| {
+ assert_active_thread(
+ s,
+ &worktree_thread_id,
+ "worktree thread should be active before archiving",
+ );
+ });
+
+ // Archive the worktree thread.
+ sidebar.update_in(cx, |sidebar, window, cx| {
+ sidebar.archive_thread(&worktree_thread_id, window, cx);
+ });
+
+ cx.run_until_parked();
+
+ // The worktree workspace was removed and a draft was created on the
+ // main workspace. No entry should reference the linked worktree.
+ let entries_after = visible_entries_as_strings(&sidebar, cx);
+ assert!(
+ !entries_after.iter().any(|s| s.contains("{wt-ochre-drift}")),
+ "no entry should reference the archived worktree, got: {entries_after:?}"
+ );
+
+ // The main project thread should still be visible.
+ assert!(
+ entries_after
+ .iter()
+ .any(|s| s.contains("Main Project Thread")),
+ "main project thread should still be visible, got: {entries_after:?}"
+ );
+}
+
#[gpui::test]
async fn test_linked_worktree_workspace_reachable_and_dismissable(cx: &mut TestAppContext) {
// When a linked worktree is opened as its own workspace and the user
@@ -3322,6 +3322,18 @@ impl Workspace {
state.task.clone().unwrap()
}
+ /// Prompts the user to save or discard each dirty item, returning
+ /// `true` if they confirmed (saved/discarded everything) or `false`
+ /// if they cancelled. Used before removing worktree roots during
+ /// thread archival.
+ pub fn prompt_to_save_or_discard_dirty_items(
+ &mut self,
+ window: &mut Window,
+ cx: &mut Context<Self>,
+ ) -> Task<Result<bool>> {
+ self.save_all_internal(SaveIntent::Close, window, cx)
+ }
+
fn save_all_internal(
&mut self,
mut save_intent: SaveIntent,