Detailed changes
@@ -564,6 +564,7 @@ fn collect_importable_threads(
title: session.title,
updated_at: session.updated_at.unwrap_or_else(|| Utc::now()),
created_at: session.created_at,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
remote_connection: remote_connection.clone(),
archived: true,
@@ -25,7 +25,7 @@ pub use project::WorktreePaths;
use project::{AgentId, linked_worktree_short_name};
use remote::{RemoteConnectionOptions, same_remote_connection_identity};
use ui::{App, Context, SharedString, ThreadItemWorktreeInfo, WorktreeKind};
-use util::ResultExt as _;
+use util::{ResultExt as _, debug_panic};
use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
use crate::DEFAULT_THREAD_TITLE;
@@ -125,6 +125,7 @@ fn migrate_thread_metadata(cx: &mut App) -> Task<anyhow::Result<()>> {
},
updated_at: entry.updated_at,
created_at: entry.created_at,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&entry.folder_paths),
remote_connection: None,
archived: true,
@@ -294,6 +295,9 @@ pub struct ThreadMetadata {
pub title: Option<SharedString>,
pub updated_at: DateTime<Utc>,
pub created_at: Option<DateTime<Utc>>,
+ /// When a user last interacted to send a message (including queueing).
+ /// Doesn't include the time when a queued message is fired.
+ pub interacted_at: Option<DateTime<Utc>>,
pub worktree_paths: WorktreePaths,
pub remote_connection: Option<RemoteConnectionOptions>,
pub archived: bool,
@@ -618,20 +622,7 @@ impl ThreadMetadataStore {
this.threads_by_session.clear();
for row in rows {
- if let Some(sid) = &row.session_id {
- this.threads_by_session.insert(sid.clone(), row.thread_id);
- }
- this.threads_by_paths
- .entry(row.folder_paths().clone())
- .or_default()
- .insert(row.thread_id);
- if !row.main_worktree_paths().is_empty() {
- this.threads_by_main_paths
- .entry(row.main_worktree_paths().clone())
- .or_default()
- .insert(row.thread_id);
- }
- this.threads.insert(row.thread_id, row);
+ this.cache_thread_metadata(row);
}
cx.notify();
@@ -656,6 +647,11 @@ impl ThreadMetadataStore {
}
fn save_internal(&mut self, metadata: ThreadMetadata) {
+ if metadata.session_id.is_none() {
+ debug_panic!("cannot store thread metadata without a session_id");
+ return;
+ };
+
if let Some(thread) = self.threads.get(&metadata.thread_id) {
if thread.folder_paths() != metadata.folder_paths() {
if let Some(thread_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) {
@@ -674,10 +670,20 @@ impl ThreadMetadataStore {
}
}
- if let Some(sid) = &metadata.session_id {
- self.threads_by_session
- .insert(sid.clone(), metadata.thread_id);
- }
+ self.cache_thread_metadata(metadata.clone());
+ self.pending_thread_ops_tx
+ .try_send(DbOperation::Upsert(metadata))
+ .log_err();
+ }
+
+ fn cache_thread_metadata(&mut self, metadata: ThreadMetadata) {
+ let Some(session_id) = metadata.session_id.as_ref() else {
+ debug_panic!("cannot store thread metadata without a session_id");
+ return;
+ };
+
+ self.threads_by_session
+ .insert(session_id.clone(), metadata.thread_id);
self.threads.insert(metadata.thread_id, metadata.clone());
@@ -692,10 +698,6 @@ impl ThreadMetadataStore {
.or_default()
.insert(metadata.thread_id);
}
-
- self.pending_thread_ops_tx
- .try_send(DbOperation::Upsert(metadata))
- .log_err();
}
pub fn update_working_directories(
@@ -752,6 +754,21 @@ impl ThreadMetadataStore {
}
}
+ pub fn update_interacted_at(
+ &mut self,
+ thread_id: &ThreadId,
+ time: DateTime<Utc>,
+ cx: &mut Context<Self>,
+ ) {
+ if let Some(thread) = self.threads.get(thread_id) {
+ self.save_internal(ThreadMetadata {
+ interacted_at: Some(time),
+ ..thread.clone()
+ });
+ cx.notify();
+ };
+ }
+
pub fn archive(
&mut self,
thread_id: ThreadId,
@@ -1154,6 +1171,8 @@ impl ThreadMetadataStore {
.and_then(|t| t.created_at)
.unwrap_or_else(|| updated_at);
+ let interacted_at = existing_thread.and_then(|t| t.interacted_at);
+
let agent_id = thread_ref.connection().agent_id();
// Preserve project-dependent fields for archived threads.
@@ -1189,6 +1208,7 @@ impl ThreadMetadataStore {
agent_id,
title,
created_at: Some(created_at),
+ interacted_at,
updated_at,
worktree_paths,
remote_connection,
@@ -1279,6 +1299,22 @@ impl Domain for ThreadMetadataDb {
DROP TABLE sidebar_threads;
ALTER TABLE sidebar_threads_v2 RENAME TO sidebar_threads;
),
+ sql!(
+ DELETE FROM thread_archived_worktrees
+ WHERE thread_id IN (
+ SELECT thread_id FROM sidebar_threads WHERE session_id IS NULL
+ );
+
+ DELETE FROM sidebar_threads WHERE session_id IS NULL;
+
+ DELETE FROM archived_git_worktrees
+ WHERE id NOT IN (
+ SELECT archived_worktree_id FROM thread_archived_worktrees
+ );
+ ),
+ sql!(
+ ALTER TABLE sidebar_threads ADD COLUMN interacted_at TEXT;
+ ),
];
}
@@ -1289,14 +1325,16 @@ impl ThreadMetadataDb {
pub fn list_ids(&self) -> anyhow::Result<Vec<ThreadId>> {
self.select::<ThreadId>(
"SELECT thread_id FROM sidebar_threads \
+ WHERE session_id IS NOT NULL \
ORDER BY updated_at DESC",
)?()
}
const LIST_QUERY: &str = "SELECT thread_id, session_id, agent_id, title, updated_at, \
- created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, \
+ created_at, interacted_at, folder_paths, folder_paths_order, archived, main_worktree_paths, \
main_worktree_paths_order, remote_connection \
FROM sidebar_threads \
+ WHERE session_id IS NOT NULL \
ORDER BY updated_at DESC";
/// List all sidebar thread metadata, ordered by updated_at descending.
@@ -1308,6 +1346,11 @@ impl ThreadMetadataDb {
/// Upsert metadata for a thread.
pub async fn save(&self, row: ThreadMetadata) -> anyhow::Result<()> {
+ anyhow::ensure!(
+ row.session_id.is_some(),
+ "refusing to persist thread metadata without a session_id"
+ );
+
let session_id = row.session_id.as_ref().map(|s| s.0.clone());
let agent_id = if row.agent_id.as_ref() == ZED_AGENT_ID.as_ref() {
None
@@ -1321,6 +1364,7 @@ impl ThreadMetadataDb {
.unwrap_or_default();
let updated_at = row.updated_at.to_rfc3339();
let created_at = row.created_at.map(|dt| dt.to_rfc3339());
+ let interacted_at = row.interacted_at.map(|dt| dt.to_rfc3339());
let serialized = row.folder_paths().serialize();
let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() {
(None, None)
@@ -1344,14 +1388,15 @@ impl ThreadMetadataDb {
let archived = row.archived;
self.write(move |conn| {
- let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
- VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12) \
+ let sql = "INSERT INTO sidebar_threads(thread_id, session_id, agent_id, title, updated_at, created_at, interacted_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13) \
ON CONFLICT(thread_id) DO UPDATE SET \
session_id = excluded.session_id, \
agent_id = excluded.agent_id, \
title = excluded.title, \
updated_at = excluded.updated_at, \
created_at = excluded.created_at, \
+ interacted_at = excluded.interacted_at, \
folder_paths = excluded.folder_paths, \
folder_paths_order = excluded.folder_paths_order, \
archived = excluded.archived, \
@@ -1365,6 +1410,7 @@ impl ThreadMetadataDb {
i = stmt.bind(&title, i)?;
i = stmt.bind(&updated_at, i)?;
i = stmt.bind(&created_at, i)?;
+ i = stmt.bind(&interacted_at, i)?;
i = stmt.bind(&folder_paths, i)?;
i = stmt.bind(&folder_paths_order, i)?;
i = stmt.bind(&archived, i)?;
@@ -1516,6 +1562,7 @@ impl Column for ThreadMetadata {
let (title, next): (String, i32) = Column::column(statement, next)?;
let (updated_at_str, next): (String, i32) = Column::column(statement, next)?;
let (created_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
+ let (interacted_at_str, next): (Option<String>, i32) = Column::column(statement, next)?;
let (folder_paths_str, next): (Option<String>, i32) = Column::column(statement, next)?;
let (folder_paths_order_str, next): (Option<String>, i32) =
Column::column(statement, next)?;
@@ -1538,6 +1585,12 @@ impl Column for ThreadMetadata {
.transpose()?
.map(|dt| dt.with_timezone(&Utc));
+ let interacted_at = interacted_at_str
+ .as_deref()
+ .map(DateTime::parse_from_rfc3339)
+ .transpose()?
+ .map(|dt| dt.with_timezone(&Utc));
+
let folder_paths = folder_paths_str
.map(|paths| {
PathList::deserialize(&util::path_list::SerializedPathList {
@@ -1579,6 +1632,7 @@ impl Column for ThreadMetadata {
},
updated_at,
created_at,
+ interacted_at,
worktree_paths,
remote_connection,
archived,
@@ -1668,6 +1722,7 @@ mod tests {
},
updated_at,
created_at: Some(updated_at),
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&folder_paths),
remote_connection: None,
}
@@ -1849,6 +1904,7 @@ mod tests {
title: Some("First Thread".into()),
updated_at: updated_time,
created_at: Some(updated_time),
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&second_paths),
remote_connection: None,
archived: false,
@@ -1932,6 +1988,7 @@ mod tests {
title: Some("Existing Metadata".into()),
updated_at: now - chrono::Duration::seconds(10),
created_at: Some(now - chrono::Duration::seconds(10)),
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&project_a_paths),
remote_connection: None,
archived: false,
@@ -2052,6 +2109,7 @@ mod tests {
title: Some("Existing Metadata".into()),
updated_at: existing_updated_at,
created_at: Some(existing_updated_at),
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&project_paths),
remote_connection: None,
archived: false,
@@ -2725,6 +2783,7 @@ mod tests {
title: Some("Local Linked".into()),
updated_at: now,
created_at: Some(now),
+ interacted_at: None,
worktree_paths: linked_worktree_paths.clone(),
remote_connection: None,
};
@@ -2737,6 +2796,7 @@ mod tests {
title: Some("Remote Linked".into()),
updated_at: now - chrono::Duration::seconds(1),
created_at: Some(now - chrono::Duration::seconds(1)),
+ interacted_at: None,
worktree_paths: linked_worktree_paths,
remote_connection: Some(remote_a.clone()),
};
@@ -10,6 +10,7 @@ use project::{
git_store::{Repository, resolve_git_worktree_to_main_repo, worktrees_directory_for_repo},
project_settings::ProjectSettings,
};
+use remote::{RemoteConnectionOptions, same_remote_connection_identity};
use settings::Settings;
use util::ResultExt;
use workspace::{AppState, MultiWorkspace, Workspace};
@@ -47,6 +48,10 @@ pub struct RootPlan {
/// The branch the worktree was on, so it can be restored later.
/// `None` if the worktree was in detached HEAD state.
pub branch_name: Option<String>,
+ /// Remote connection options for the project that owns this worktree,
+ /// used to create temporary remote projects when the main repo isn't
+ /// loaded in any open workspace.
+ pub remote_connection: Option<RemoteConnectionOptions>,
}
/// A `Project` that references a worktree being archived, paired with the
@@ -67,6 +72,16 @@ fn archived_worktree_ref_name(id: i64) -> String {
format!("refs/archived-worktrees/{}", id)
}
+/// Resolves the Zed-managed worktrees base directory for a given repo.
+///
+/// This intentionally reads the *global* `git.worktree_directory` setting
+/// rather than any project-local override, because Zed always uses the
+/// global value when creating worktrees and the archive check must match.
+fn worktrees_base_for_repo(main_repo_path: &Path, cx: &App) -> Option<PathBuf> {
+ let setting = &ProjectSettings::get_global(cx).git.worktree_directory;
+ worktrees_directory_for_repo(main_repo_path, setting).log_err()
+}
+
/// Builds a [`RootPlan`] for archiving the git worktree at `path`.
///
/// This is a synchronous planning step that must run *before* any workspace
@@ -86,15 +101,26 @@ fn archived_worktree_ref_name(id: i64) -> String {
/// cannot be archived to disk) or if no open project has it loaded.
pub fn build_root_plan(
path: &Path,
+ remote_connection: Option<&RemoteConnectionOptions>,
workspaces: &[Entity<Workspace>],
cx: &App,
) -> Option<RootPlan> {
let path = path.to_path_buf();
+ let matches_target_connection = |project: &Entity<Project>, cx: &App| {
+ same_remote_connection_identity(
+ project.read(cx).remote_connection_options(cx).as_ref(),
+ remote_connection,
+ )
+ };
+
let affected_projects = workspaces
.iter()
.filter_map(|workspace| {
let project = workspace.read(cx).project().clone();
+ if !matches_target_connection(&project, cx) {
+ return None;
+ }
let worktree = project
.read(cx)
.visible_worktrees(cx)
@@ -113,6 +139,7 @@ pub fn build_root_plan(
let linked_repo = workspaces
.iter()
+ .filter(|workspace| matches_target_connection(workspace.read(cx).project(), cx))
.flat_map(|workspace| {
workspace
.read(cx)
@@ -134,16 +161,27 @@ pub fn build_root_plan(
// Main worktrees must be left alone — git refuses to remove them.
let (linked_snapshot, repo) = linked_repo?;
let main_repo_path = linked_snapshot.original_repo_abs_path.to_path_buf();
+
+ // Only archive worktrees that live inside the Zed-managed worktrees
+ // directory (configured via `git.worktree_directory`). Worktrees the
+ // user created outside that directory should be left untouched.
+ let worktrees_base = worktrees_base_for_repo(&main_repo_path, cx)?;
+ if !path.starts_with(&worktrees_base) {
+ return None;
+ }
+
let branch_name = linked_snapshot
.branch
.as_ref()
.map(|branch| branch.name().to_string());
+
Some(RootPlan {
root_path: path,
main_repo_path,
affected_projects,
worktree_repo: repo,
branch_name,
+ remote_connection: remote_connection.cloned(),
})
}
@@ -189,47 +227,36 @@ async fn remove_root_after_worktree_removal(
}
}
- // Delete the directory ourselves first, then tell git to clean up the
- // metadata. This avoids a problem where `git worktree remove` can
- // remove the metadata in `.git/worktrees/<name>` but fail to delete
- // the directory (git continues past directory-removal errors), leaving
- // an orphaned folder on disk. By deleting the directory first, we
- // guarantee it's gone, and `git worktree remove --force` with a
- // missing working tree just cleans up the admin entry.
- let root_path = root.root_path.clone();
- cx.background_executor()
- .spawn(async move {
- match std::fs::remove_dir_all(&root_path) {
- Ok(()) => Ok(()),
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(()),
- Err(error) => Err(error),
- }
- })
- .await
- .with_context(|| {
- format!(
- "failed to delete worktree directory '{}'",
- root.root_path.display()
- )
- })?;
+ let (repo, project) =
+ find_or_create_repository(&root.main_repo_path, root.remote_connection.as_ref(), cx)
+ .await?;
- let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?;
+ // `Repository::remove_worktree` with `force = true` deletes the working
+ // directory before running `git worktree remove --force`, so there's no
+ // need to touch the filesystem here. For remote projects that cleanup
+ // runs on the headless server via the `GitRemoveWorktree` RPC, which is
+ // the only code path with access to the remote machine's filesystem.
let receiver = repo.update(cx, |repo: &mut Repository, _cx| {
repo.remove_worktree(root.root_path.clone(), true)
});
let result = receiver
.await
.map_err(|_| anyhow!("git worktree metadata cleanup was canceled"))?;
- // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
- drop(_temp_project);
+ // `project` may be a live workspace project or a temporary one created
+ // by `find_or_create_repository`. In the temporary case we must keep it
+ // alive until the repo removes the worktree
+ drop(project);
result.context("git worktree metadata cleanup failed")?;
- remove_empty_parent_dirs_up_to_worktrees_base(
- root.root_path.clone(),
- root.main_repo_path.clone(),
- cx,
- )
- .await;
+ // Empty-parent cleanup uses local std::fs — skip for remote projects.
+ if root.remote_connection.is_none() {
+ remove_empty_parent_dirs_up_to_worktrees_base(
+ root.root_path.clone(),
+ root.main_repo_path.clone(),
+ cx,
+ )
+ .await;
+ }
Ok(())
}
@@ -246,10 +273,7 @@ async fn remove_empty_parent_dirs_up_to_worktrees_base(
main_repo_path: PathBuf,
cx: &mut AsyncApp,
) {
- let worktrees_base = cx.update(|cx| {
- let setting = &ProjectSettings::get_global(cx).git.worktree_directory;
- worktrees_directory_for_repo(&main_repo_path, setting).log_err()
- });
+ let worktrees_base = cx.update(|cx| worktrees_base_for_repo(&main_repo_path, cx));
if let Some(worktrees_base) = worktrees_base {
cx.background_executor()
@@ -296,13 +320,17 @@ fn remove_empty_ancestors(child_path: &Path, base_path: &Path) {
}
/// Finds a live `Repository` entity for the given path, or creates a temporary
-/// `Project::local` to obtain one.
+/// project to obtain one.
///
/// `Repository` entities can only be obtained through a `Project` because
/// `GitStore` (which creates and manages `Repository` entities) is owned by
/// `Project`. When no open workspace contains the repo we need, we spin up a
-/// headless `Project::local` just to get a `Repository` handle. The caller
-/// keeps the returned `Option<Entity<Project>>` alive for the duration of the
+/// headless project just to get a `Repository` handle. For local paths this is
+/// a `Project::local`; for remote paths we build a `Project::remote` through
+/// the connection pool (reusing the existing SSH transport), which requires
+/// the caller to pass the matching `RemoteConnectionOptions` so we only match
+/// and fall back onto projects that share the same remote identity. The
+/// caller keeps the returned `Entity<Project>` alive for the duration of the
/// git operations, then drops it.
///
/// Future improvement: decoupling `GitStore` from `Project` so that
@@ -310,46 +338,91 @@ fn remove_empty_ancestors(child_path: &Path, base_path: &Path) {
/// temporary-project workaround.
async fn find_or_create_repository(
repo_path: &Path,
+ remote_connection: Option<&RemoteConnectionOptions>,
cx: &mut AsyncApp,
-) -> Result<(Entity<Repository>, Option<Entity<Project>>)> {
+) -> Result<(Entity<Repository>, Entity<Project>)> {
let repo_path_owned = repo_path.to_path_buf();
+ let remote_connection_owned = remote_connection.cloned();
+
+ // First, try to find a live repository in any open workspace whose
+ // remote connection matches (so a local `/project` and a remote
+ // `/project` are not confused).
let live_repo = cx.update(|cx| {
all_open_workspaces(cx)
.into_iter()
- .flat_map(|workspace| {
- workspace
- .read(cx)
- .project()
- .read(cx)
- .repositories(cx)
- .values()
- .cloned()
- .collect::<Vec<_>>()
- })
- .find(|repo| {
- repo.read(cx).snapshot().work_directory_abs_path.as_ref()
- == repo_path_owned.as_path()
+ .filter_map(|workspace| {
+ let project = workspace.read(cx).project().clone();
+ let project_connection = project.read(cx).remote_connection_options(cx);
+ if !same_remote_connection_identity(
+ project_connection.as_ref(),
+ remote_connection_owned.as_ref(),
+ ) {
+ return None;
+ }
+ Some((
+ project
+ .read(cx)
+ .repositories(cx)
+ .values()
+ .find(|repo| {
+ repo.read(cx).snapshot().work_directory_abs_path.as_ref()
+ == repo_path_owned.as_path()
+ })
+ .cloned()?,
+ project.clone(),
+ ))
})
+ .next()
});
- if let Some(repo) = live_repo {
- return Ok((repo, None));
+ if let Some((repo, project)) = live_repo {
+ return Ok((repo, project));
}
let app_state =
current_app_state(cx).context("no app state available for temporary project")?;
- let temp_project = cx.update(|cx| {
- Project::local(
- app_state.client.clone(),
- app_state.node_runtime.clone(),
- app_state.user_store.clone(),
- app_state.languages.clone(),
- app_state.fs.clone(),
- None,
- LocalProjectFlags::default(),
- cx,
- )
- });
+
+ // For remote paths, create a fresh RemoteClient through the connection
+ // pool (reusing the existing SSH transport) and build a temporary
+ // remote project. Each RemoteClient gets its own server-side headless
+ // project, so there are no RPC routing conflicts with other projects.
+ let temp_project = if let Some(connection) = remote_connection_owned {
+ let remote_client = cx
+ .update(|cx| {
+ if !remote::has_active_connection(&connection, cx) {
+ anyhow::bail!("cannot open repository on disconnected remote machine");
+ }
+ Ok(remote_connection::connect_reusing_pool(connection, cx))
+ })?
+ .await?
+ .context("remote connection was canceled")?;
+
+ cx.update(|cx| {
+ Project::remote(
+ remote_client,
+ app_state.client.clone(),
+ app_state.node_runtime.clone(),
+ app_state.user_store.clone(),
+ app_state.languages.clone(),
+ app_state.fs.clone(),
+ false,
+ cx,
+ )
+ })
+ } else {
+ cx.update(|cx| {
+ Project::local(
+ app_state.client.clone(),
+ app_state.node_runtime.clone(),
+ app_state.user_store.clone(),
+ app_state.languages.clone(),
+ app_state.fs.clone(),
+ None,
+ LocalProjectFlags::default(),
+ cx,
+ )
+ })
+ };
let repo_path_for_worktree = repo_path.to_path_buf();
let create_worktree = temp_project.update(cx, |project, cx| {
@@ -377,7 +450,7 @@ async fn find_or_create_repository(
barrier
.await
.map_err(|_| anyhow!("temporary repository barrier canceled"))?;
- Ok((repo, Some(temp_project)))
+ Ok((repo, temp_project))
}
/// Re-adds the worktree to every affected project after a failed
@@ -498,9 +571,10 @@ pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Resul
// This is fatal: without the ref, git gc will eventually collect the
// WIP commits and a later restore will silently fail.
let ref_name = archived_worktree_ref_name(archived_worktree_id);
- let (main_repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx)
- .await
- .context("could not open main repo to create archive ref")?;
+ let (main_repo, _temp_project) =
+ find_or_create_repository(&root.main_repo_path, root.remote_connection.as_ref(), cx)
+ .await
+ .context("could not open main repo to create archive ref")?;
let rx = main_repo.update(cx, |repo, _cx| {
repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone())
});
@@ -508,6 +582,8 @@ pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Resul
.map_err(|_| anyhow!("update_ref canceled"))
.and_then(|r| r)
.with_context(|| format!("failed to create ref {ref_name} on main repo"))?;
+ // See note in `remove_root_after_worktree_removal`: this may be a live
+ // or temporary project; dropping only matters in the temporary case.
drop(_temp_project);
Ok(archived_worktree_id)
@@ -520,11 +596,14 @@ pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Resul
pub async fn rollback_persist(archived_worktree_id: i64, root: &RootPlan, cx: &mut AsyncApp) {
// Delete the git ref on main repo
if let Ok((main_repo, _temp_project)) =
- find_or_create_repository(&root.main_repo_path, cx).await
+ find_or_create_repository(&root.main_repo_path, root.remote_connection.as_ref(), cx).await
{
let ref_name = archived_worktree_ref_name(archived_worktree_id);
let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
rx.await.ok().and_then(|r| r.log_err());
+ // See note in `remove_root_after_worktree_removal`: this may be a
+ // live or temporary project; dropping only matters in the temporary
+ // case.
drop(_temp_project);
}
@@ -548,9 +627,11 @@ pub async fn rollback_persist(archived_worktree_id: i64, root: &RootPlan, cx: &m
/// unstaged state from the WIP commit trees.
pub async fn restore_worktree_via_git(
row: &ArchivedGitWorktree,
+ remote_connection: Option<&RemoteConnectionOptions>,
cx: &mut AsyncApp,
) -> Result<PathBuf> {
- let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?;
+ let (main_repo, _temp_project) =
+ find_or_create_repository(&row.main_repo_path, remote_connection, cx).await?;
let worktree_path = &row.worktree_path;
let app_state = current_app_state(cx).context("no app state available")?;
@@ -581,13 +662,15 @@ pub async fn restore_worktree_via_git(
true
};
- let (wt_repo, _temp_wt_project) = match find_or_create_repository(worktree_path, cx).await {
- Ok(result) => result,
- Err(error) => {
- remove_new_worktree_on_error(created_new_worktree, &main_repo, worktree_path, cx).await;
- return Err(error);
- }
- };
+ let (wt_repo, _temp_wt_project) =
+ match find_or_create_repository(worktree_path, remote_connection, cx).await {
+ Ok(result) => result,
+ Err(error) => {
+ remove_new_worktree_on_error(created_new_worktree, &main_repo, worktree_path, cx)
+ .await;
+ return Err(error);
+ }
+ };
if let Some(branch_name) = &row.branch_name {
// Attempt to check out the branch the worktree was previously on.
@@ -708,9 +791,14 @@ async fn remove_new_worktree_on_error(
/// Deletes the git ref and DB records for a single archived worktree.
/// Used when an archived worktree is no longer referenced by any thread.
-pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) {
+pub async fn cleanup_archived_worktree_record(
+ row: &ArchivedGitWorktree,
+ remote_connection: Option<&RemoteConnectionOptions>,
+ cx: &mut AsyncApp,
+) {
// Delete the git ref from the main repo
- if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await
+ if let Ok((main_repo, _temp_project)) =
+ find_or_create_repository(&row.main_repo_path, remote_connection, cx).await
{
let ref_name = archived_worktree_ref_name(row.id);
let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
@@ -719,7 +807,9 @@ pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mu
Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"),
Err(_) => log::warn!("Archive ref deletion was canceled"),
}
- // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
+ // See note in `remove_root_after_worktree_removal`: this may be a
+ // live or temporary project; dropping only matters in the temporary
+ // case.
drop(_temp_project);
}
@@ -738,6 +828,11 @@ pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mu
/// deletes the git ref and DB records.
pub async fn cleanup_thread_archived_worktrees(thread_id: ThreadId, cx: &mut AsyncApp) {
let store = cx.update(|cx| ThreadMetadataStore::global(cx));
+ let remote_connection = store.read_with(cx, |store, _cx| {
+ store
+ .entry(thread_id)
+ .and_then(|t| t.remote_connection.clone())
+ });
let archived_worktrees = store
.read_with(cx, |store, cx| {
@@ -775,7 +870,7 @@ pub async fn cleanup_thread_archived_worktrees(thread_id: ThreadId, cx: &mut Asy
match still_referenced {
Ok(true) => {}
Ok(false) => {
- cleanup_archived_worktree_record(row, cx).await;
+ cleanup_archived_worktree_record(row, remote_connection.as_ref(), cx).await;
}
Err(error) => {
log::error!(
@@ -814,7 +909,7 @@ mod tests {
use super::*;
use fs::{FakeFs, Fs as _};
use git::repository::Worktree as GitWorktree;
- use gpui::TestAppContext;
+ use gpui::{BorrowAppContext, TestAppContext};
use project::Project;
use serde_json::json;
use settings::SettingsStore;
@@ -969,7 +1064,12 @@ mod tests {
// The main worktree should NOT produce a root plan.
workspace.read_with(cx, |_workspace, cx| {
- let plan = build_root_plan(Path::new("/project"), std::slice::from_ref(&workspace), cx);
+ let plan = build_root_plan(
+ Path::new("/project"),
+ None,
+ std::slice::from_ref(&workspace),
+ cx,
+ );
assert!(
plan.is_none(),
"build_root_plan should return None for a main worktree",
@@ -997,7 +1097,7 @@ mod tests {
Path::new("/project/.git"),
true,
GitWorktree {
- path: PathBuf::from("/linked-worktree"),
+ path: PathBuf::from("/worktrees/project/feature/project"),
ref_name: Some("refs/heads/feature".into()),
sha: "abc123".into(),
is_main: false,
@@ -1008,7 +1108,10 @@ mod tests {
let project = Project::test(
fs.clone(),
- [Path::new("/project"), Path::new("/linked-worktree")],
+ [
+ Path::new("/project"),
+ Path::new("/worktrees/project/feature/project"),
+ ],
cx,
)
.await;
@@ -1027,7 +1130,8 @@ mod tests {
workspace.read_with(cx, |_workspace, cx| {
// The linked worktree SHOULD produce a root plan.
let plan = build_root_plan(
- Path::new("/linked-worktree"),
+ Path::new("/worktrees/project/feature/project"),
+ None,
std::slice::from_ref(&workspace),
cx,
);
@@ -1036,12 +1140,19 @@ mod tests {
"build_root_plan should return Some for a linked worktree",
);
let plan = plan.unwrap();
- assert_eq!(plan.root_path, PathBuf::from("/linked-worktree"));
+ assert_eq!(
+ plan.root_path,
+ PathBuf::from("/worktrees/project/feature/project")
+ );
assert_eq!(plan.main_repo_path, PathBuf::from("/project"));
// The main worktree should still return None.
- let main_plan =
- build_root_plan(Path::new("/project"), std::slice::from_ref(&workspace), cx);
+ let main_plan = build_root_plan(
+ Path::new("/project"),
+ None,
+ std::slice::from_ref(&workspace),
+ cx,
+ );
assert!(
main_plan.is_none(),
"build_root_plan should return None for the main worktree \
@@ -1050,6 +1161,179 @@ mod tests {
});
}
+ #[gpui::test]
+ async fn test_build_root_plan_returns_none_for_external_linked_worktree(
+ cx: &mut TestAppContext,
+ ) {
+ init_test(cx);
+
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_tree(
+ "/project",
+ json!({
+ ".git": {},
+ "src": { "main.rs": "fn main() {}" }
+ }),
+ )
+ .await;
+ fs.set_branch_name(Path::new("/project/.git"), Some("main"));
+ fs.insert_branches(Path::new("/project/.git"), &["main", "feature"]);
+
+ fs.add_linked_worktree_for_repo(
+ Path::new("/project/.git"),
+ true,
+ GitWorktree {
+ path: PathBuf::from("/external-worktree"),
+ ref_name: Some("refs/heads/feature".into()),
+ sha: "abc123".into(),
+ is_main: false,
+ is_bare: false,
+ },
+ )
+ .await;
+
+ let project = Project::test(
+ fs.clone(),
+ [Path::new("/project"), Path::new("/external-worktree")],
+ cx,
+ )
+ .await;
+ project
+ .update(cx, |project, cx| project.git_scans_complete(cx))
+ .await;
+
+ let multi_workspace =
+ cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
+ let workspace = multi_workspace
+ .read_with(cx, |mw, _cx| mw.workspace().clone())
+ .unwrap();
+
+ cx.run_until_parked();
+
+ workspace.read_with(cx, |_workspace, cx| {
+ let plan = build_root_plan(
+ Path::new("/external-worktree"),
+ None,
+ std::slice::from_ref(&workspace),
+ cx,
+ );
+ assert!(
+ plan.is_none(),
+ "build_root_plan should return None for a linked worktree \
+ outside the Zed-managed worktrees directory",
+ );
+ });
+ }
+
+ #[gpui::test]
+ async fn test_build_root_plan_with_custom_worktree_directory(cx: &mut TestAppContext) {
+ init_test(cx);
+
+ // Override the worktree_directory setting to a non-default location.
+ // With main repo at /project and setting "../custom-worktrees", the
+ // resolved base is /custom-worktrees/project.
+ cx.update(|cx| {
+ cx.update_global::<SettingsStore, _>(|store, cx| {
+ store.update_user_settings(cx, |s| {
+ s.git.get_or_insert(Default::default()).worktree_directory =
+ Some("../custom-worktrees".to_string());
+ });
+ });
+ });
+
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_tree(
+ "/project",
+ json!({
+ ".git": {},
+ "src": { "main.rs": "fn main() {}" }
+ }),
+ )
+ .await;
+ fs.set_branch_name(Path::new("/project/.git"), Some("main"));
+ fs.insert_branches(Path::new("/project/.git"), &["main", "feature", "feature2"]);
+
+ // Worktree inside the custom managed directory.
+ fs.add_linked_worktree_for_repo(
+ Path::new("/project/.git"),
+ true,
+ GitWorktree {
+ path: PathBuf::from("/custom-worktrees/project/feature/project"),
+ ref_name: Some("refs/heads/feature".into()),
+ sha: "abc123".into(),
+ is_main: false,
+ is_bare: false,
+ },
+ )
+ .await;
+
+ // Worktree outside the custom managed directory (at the default
+ // `../worktrees` location, which is not what the setting says).
+ fs.add_linked_worktree_for_repo(
+ Path::new("/project/.git"),
+ true,
+ GitWorktree {
+ path: PathBuf::from("/worktrees/project/feature2/project"),
+ ref_name: Some("refs/heads/feature2".into()),
+ sha: "def456".into(),
+ is_main: false,
+ is_bare: false,
+ },
+ )
+ .await;
+
+ let project = Project::test(
+ fs.clone(),
+ [
+ Path::new("/project"),
+ Path::new("/custom-worktrees/project/feature/project"),
+ Path::new("/worktrees/project/feature2/project"),
+ ],
+ cx,
+ )
+ .await;
+ project
+ .update(cx, |project, cx| project.git_scans_complete(cx))
+ .await;
+
+ let multi_workspace =
+ cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
+ let workspace = multi_workspace
+ .read_with(cx, |mw, _cx| mw.workspace().clone())
+ .unwrap();
+
+ cx.run_until_parked();
+
+ workspace.read_with(cx, |_workspace, cx| {
+ // Worktree inside the custom managed directory SHOULD be archivable.
+ let plan = build_root_plan(
+ Path::new("/custom-worktrees/project/feature/project"),
+ None,
+ std::slice::from_ref(&workspace),
+ cx,
+ );
+ assert!(
+ plan.is_some(),
+ "build_root_plan should return Some for a worktree inside \
+ the custom worktree_directory",
+ );
+
+ // Worktree at the default location SHOULD NOT be archivable
+ // because the setting points elsewhere.
+ let plan = build_root_plan(
+ Path::new("/worktrees/project/feature2/project"),
+ None,
+ std::slice::from_ref(&workspace),
+ cx,
+ );
+ assert!(
+ plan.is_none(),
+ "build_root_plan should return None for a worktree outside \
+ the custom worktree_directory, even if it would match the default",
+ );
+ });
+ }
+
#[gpui::test]
async fn test_remove_root_deletes_directory_and_git_metadata(cx: &mut TestAppContext) {
init_test(cx);
@@ -1070,7 +1354,7 @@ mod tests {
Path::new("/project/.git"),
true,
GitWorktree {
- path: PathBuf::from("/linked-worktree"),
+ path: PathBuf::from("/worktrees/project/feature/project"),
ref_name: Some("refs/heads/feature".into()),
sha: "abc123".into(),
is_main: false,
@@ -1081,7 +1365,10 @@ mod tests {
let project = Project::test(
fs.clone(),
- [Path::new("/project"), Path::new("/linked-worktree")],
+ [
+ Path::new("/project"),
+ Path::new("/worktrees/project/feature/project"),
+ ],
cx,
)
.await;
@@ -1101,14 +1388,18 @@ mod tests {
let root = workspace
.read_with(cx, |_workspace, cx| {
build_root_plan(
- Path::new("/linked-worktree"),
+ Path::new("/worktrees/project/feature/project"),
+ None,
std::slice::from_ref(&workspace),
cx,
)
})
.expect("should produce a root plan for the linked worktree");
- assert!(fs.is_dir(Path::new("/linked-worktree")).await);
+ assert!(
+ fs.is_dir(Path::new("/worktrees/project/feature/project"))
+ .await
+ );
// Remove the root.
let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, cx).await));
@@ -1116,10 +1407,10 @@ mod tests {
cx.run_until_parked();
- // The FakeFs directory should be gone (removed by the FakeGitRepository
- // backend's remove_worktree implementation).
+ // The FakeFs directory should be gone.
assert!(
- !fs.is_dir(Path::new("/linked-worktree")).await,
+ !fs.is_dir(Path::new("/worktrees/project/feature/project"))
+ .await,
"linked worktree directory should be removed from FakeFs"
);
}
@@ -1144,7 +1435,7 @@ mod tests {
Path::new("/project/.git"),
true,
GitWorktree {
- path: PathBuf::from("/linked-worktree"),
+ path: PathBuf::from("/worktrees/project/feature/project"),
ref_name: Some("refs/heads/feature".into()),
sha: "abc123".into(),
is_main: false,
@@ -1155,7 +1446,10 @@ mod tests {
let project = Project::test(
fs.clone(),
- [Path::new("/project"), Path::new("/linked-worktree")],
+ [
+ Path::new("/project"),
+ Path::new("/worktrees/project/feature/project"),
+ ],
cx,
)
.await;
@@ -1174,7 +1468,8 @@ mod tests {
let root = workspace
.read_with(cx, |_workspace, cx| {
build_root_plan(
- Path::new("/linked-worktree"),
+ Path::new("/worktrees/project/feature/project"),
+ None,
std::slice::from_ref(&workspace),
cx,
)
@@ -1185,7 +1480,7 @@ mod tests {
// remove_root, simulating the directory being deleted externally.
fs.as_ref()
.remove_dir(
- Path::new("/linked-worktree"),
+ Path::new("/worktrees/project/feature/project"),
fs::RemoveOptions {
recursive: true,
ignore_if_not_exists: false,
@@ -1193,51 +1488,127 @@ mod tests {
)
.await
.unwrap();
- assert!(!fs.as_ref().is_dir(Path::new("/linked-worktree")).await);
+ assert!(
+ !fs.as_ref()
+ .is_dir(Path::new("/worktrees/project/feature/project"))
+ .await
+ );
- // remove_root should still succeed — the std::fs::remove_dir_all
- // handles NotFound, and git worktree remove handles a missing
- // working tree directory.
+ // remove_root should still succeed — fs.remove_dir with
+ // ignore_if_not_exists handles NotFound, and git worktree remove
+ // handles a missing working tree directory.
let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, cx).await));
task.await
.expect("remove_root should succeed even when directory is already gone");
}
- #[test]
- fn test_remove_dir_all_deletes_real_directory() {
- let tmp = TempDir::new().unwrap();
- let worktree_dir = tmp.path().join("linked-worktree");
- std::fs::create_dir_all(worktree_dir.join("src")).unwrap();
- std::fs::write(worktree_dir.join("src/main.rs"), "fn main() {}").unwrap();
- std::fs::write(worktree_dir.join("README.md"), "# Hello").unwrap();
-
- assert!(worktree_dir.is_dir());
-
- // This is the same pattern used in remove_root_after_worktree_removal.
- match std::fs::remove_dir_all(&worktree_dir) {
- Ok(()) => {}
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => {}
- Err(error) => panic!("unexpected error: {error}"),
- }
+ #[gpui::test]
+ async fn test_remove_root_returns_error_and_rolls_back_on_remove_dir_failure(
+ cx: &mut TestAppContext,
+ ) {
+ init_test(cx);
+
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_tree(
+ "/project",
+ json!({
+ ".git": {},
+ "src": { "main.rs": "fn main() {}" }
+ }),
+ )
+ .await;
+ fs.set_branch_name(Path::new("/project/.git"), Some("main"));
+ fs.insert_branches(Path::new("/project/.git"), &["main", "feature"]);
+ fs.add_linked_worktree_for_repo(
+ Path::new("/project/.git"),
+ true,
+ GitWorktree {
+ path: PathBuf::from("/worktrees/project/feature/project"),
+ ref_name: Some("refs/heads/feature".into()),
+ sha: "abc123".into(),
+ is_main: false,
+ is_bare: false,
+ },
+ )
+ .await;
+
+ let project = Project::test(
+ fs.clone(),
+ [
+ Path::new("/project"),
+ Path::new("/worktrees/project/feature/project"),
+ ],
+ cx,
+ )
+ .await;
+ project
+ .update(cx, |project, cx| project.git_scans_complete(cx))
+ .await;
+
+ let multi_workspace =
+ cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
+ let workspace = multi_workspace
+ .read_with(cx, |mw, _cx| mw.workspace().clone())
+ .unwrap();
+
+ cx.run_until_parked();
+
+ let root = workspace
+ .read_with(cx, |_workspace, cx| {
+ build_root_plan(
+ Path::new("/worktrees/project/feature/project"),
+ None,
+ std::slice::from_ref(&workspace),
+ cx,
+ )
+ })
+ .expect("should produce a root plan for the linked worktree");
+
+ // Replace the worktree directory with a file so that fs.remove_dir
+ // fails with a "not a directory" error.
+ let worktree_path = Path::new("/worktrees/project/feature/project");
+ fs.remove_dir(
+ worktree_path,
+ fs::RemoveOptions {
+ recursive: true,
+ ignore_if_not_exists: false,
+ },
+ )
+ .await
+ .unwrap();
+ fs.create_file(worktree_path, fs::CreateOptions::default())
+ .await
+ .unwrap();
assert!(
- !worktree_dir.exists(),
- "worktree directory should be deleted"
+ fs.is_file(worktree_path).await,
+ "path should now be a file, not a directory"
);
- }
- #[test]
- fn test_remove_dir_all_handles_not_found() {
- let tmp = TempDir::new().unwrap();
- let nonexistent = tmp.path().join("does-not-exist");
+ let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, cx).await));
+ let result = task.await;
- assert!(!nonexistent.exists());
+ assert!(
+ result.is_err(),
+ "remove_root should return an error when fs.remove_dir fails"
+ );
+ let error_message = format!("{:#}", result.unwrap_err());
+ assert!(
+ error_message.contains("failed to delete worktree directory"),
+ "error should mention the directory deletion failure, got: {error_message}"
+ );
- // Should not panic — NotFound is handled gracefully.
- match std::fs::remove_dir_all(&nonexistent) {
- Ok(()) => panic!("expected NotFound error"),
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => {}
- Err(error) => panic!("unexpected error: {error}"),
- }
+ cx.run_until_parked();
+
+ // After rollback, the worktree should be re-added to the project.
+ let has_worktree = project.read_with(cx, |project, cx| {
+ project
+ .worktrees(cx)
+ .any(|wt| wt.read(cx).abs_path().as_ref() == worktree_path)
+ });
+ assert!(
+ has_worktree,
+ "rollback should have re-added the worktree to the project"
+ );
}
}
@@ -439,6 +439,10 @@ impl Server {
.add_request_handler(forward_mutating_project_request::<proto::GitCreateWorktree>)
.add_request_handler(disallow_guest_request::<proto::GitRemoveWorktree>)
.add_request_handler(disallow_guest_request::<proto::GitRenameWorktree>)
+ .add_request_handler(forward_mutating_project_request::<proto::GitEditRef>)
+ .add_request_handler(forward_mutating_project_request::<proto::GitRepairWorktrees>)
+ .add_request_handler(disallow_guest_request::<proto::GitCreateArchiveCheckpoint>)
+ .add_request_handler(disallow_guest_request::<proto::GitRestoreArchiveCheckpoint>)
.add_request_handler(forward_mutating_project_request::<proto::CheckForPushedCommits>)
.add_request_handler(forward_mutating_project_request::<proto::ToggleLspLogs>)
.add_message_handler(broadcast_project_message_from_host::<proto::LanguageServerLog>)
@@ -10,8 +10,8 @@ use git::{
repository::{
AskPassDelegate, Branch, CommitDataReader, CommitDetails, CommitOptions,
CreateWorktreeTarget, FetchOptions, GRAPH_CHUNK_SIZE, GitRepository,
- GitRepositoryCheckpoint, InitialGraphCommitData, LogOrder, LogSource, PushOptions, Remote,
- RepoPath, ResetMode, SearchCommitArgs, Worktree,
+ GitRepositoryCheckpoint, InitialGraphCommitData, LogOrder, LogSource, PushOptions, RefEdit,
+ Remote, RepoPath, ResetMode, SearchCommitArgs, Worktree,
},
stash::GitStash,
status::{
@@ -109,6 +109,20 @@ impl FakeGitRepository {
.boxed()
}
+ fn edit_ref(&self, edit: RefEdit) -> BoxFuture<'_, Result<()>> {
+ self.with_state_async(true, move |state| {
+ match edit {
+ RefEdit::Update { ref_name, commit } => {
+ state.refs.insert(ref_name, commit);
+ }
+ RefEdit::Delete { ref_name } => {
+ state.refs.remove(&ref_name);
+ }
+ }
+ Ok(())
+ })
+ }
+
/// Scans `.git/worktrees/*/gitdir` to find the admin entry directory for a
/// worktree at the given checkout path. Used when the working tree directory
/// has already been deleted and we can't read its `.git` pointer file.
@@ -1437,17 +1451,11 @@ impl GitRepository for FakeGitRepository {
}
fn update_ref(&self, ref_name: String, commit: String) -> BoxFuture<'_, Result<()>> {
- self.with_state_async(true, move |state| {
- state.refs.insert(ref_name, commit);
- Ok(())
- })
+ self.edit_ref(RefEdit::Update { ref_name, commit })
}
fn delete_ref(&self, ref_name: String) -> BoxFuture<'_, Result<()>> {
- self.with_state_async(true, move |state| {
- state.refs.remove(&ref_name);
- Ok(())
- })
+ self.edit_ref(RefEdit::Delete { ref_name })
}
fn repair_worktrees(&self) -> BoxFuture<'_, Result<()>> {
@@ -1043,6 +1043,25 @@ pub struct RealGitRepository {
is_trusted: Arc<AtomicBool>,
}
+#[derive(Debug)]
+pub enum RefEdit {
+ Update { ref_name: String, commit: String },
+ Delete { ref_name: String },
+}
+
+impl RefEdit {
+ fn into_args(self) -> Vec<OsString> {
+ match self {
+ Self::Update { ref_name, commit } => {
+ vec!["update-ref".into(), ref_name.into(), commit.into()]
+ }
+ Self::Delete { ref_name } => {
+ vec!["update-ref".into(), "-d".into(), ref_name.into()]
+ }
+ }
+ }
+}
+
impl RealGitRepository {
pub fn new(
dotgit_path: &Path,
@@ -1089,6 +1108,17 @@ impl RealGitRepository {
))
}
+ fn edit_ref(&self, edit: RefEdit) -> BoxFuture<'_, Result<()>> {
+ let git_binary = self.git_binary();
+ self.executor
+ .spawn(async move {
+ let args = edit.into_args();
+ git_binary?.run(&args).await?;
+ Ok(())
+ })
+ .boxed()
+ }
+
async fn any_git_binary_help_output(&self) -> SharedString {
if let Some(output) = self.any_git_binary_help_output.lock().clone() {
return output;
@@ -2316,25 +2346,11 @@ impl GitRepository for RealGitRepository {
}
fn update_ref(&self, ref_name: String, commit: String) -> BoxFuture<'_, Result<()>> {
- let git_binary = self.git_binary();
- self.executor
- .spawn(async move {
- let args: Vec<OsString> = vec!["update-ref".into(), ref_name.into(), commit.into()];
- git_binary?.run(&args).await?;
- Ok(())
- })
- .boxed()
+ self.edit_ref(RefEdit::Update { ref_name, commit })
}
fn delete_ref(&self, ref_name: String) -> BoxFuture<'_, Result<()>> {
- let git_binary = self.git_binary();
- self.executor
- .spawn(async move {
- let args: Vec<OsString> = vec!["update-ref".into(), "-d".into(), ref_name.into()];
- git_binary?.run(&args).await?;
- Ok(())
- })
- .boxed()
+ self.edit_ref(RefEdit::Delete { ref_name })
}
fn repair_worktrees(&self) -> BoxFuture<'_, Result<()>> {
@@ -17,7 +17,7 @@ use buffer_diff::{BufferDiff, BufferDiffEvent};
use client::ProjectId;
use collections::HashMap;
pub use conflict_set::{ConflictRegion, ConflictSet, ConflictSetSnapshot, ConflictSetUpdate};
-use fs::Fs;
+use fs::{Fs, RemoveOptions};
use futures::{
FutureExt, StreamExt,
channel::{
@@ -563,7 +563,9 @@ impl GitStore {
client.add_entity_request_handler(Self::handle_reset);
client.add_entity_request_handler(Self::handle_show);
client.add_entity_request_handler(Self::handle_create_checkpoint);
+ client.add_entity_request_handler(Self::handle_create_archive_checkpoint);
client.add_entity_request_handler(Self::handle_restore_checkpoint);
+ client.add_entity_request_handler(Self::handle_restore_archive_checkpoint);
client.add_entity_request_handler(Self::handle_compare_checkpoints);
client.add_entity_request_handler(Self::handle_diff_checkpoints);
client.add_entity_request_handler(Self::handle_load_commit_diff);
@@ -589,6 +591,8 @@ impl GitStore {
client.add_entity_request_handler(Self::handle_remove_worktree);
client.add_entity_request_handler(Self::handle_rename_worktree);
client.add_entity_request_handler(Self::handle_get_head_sha);
+ client.add_entity_request_handler(Self::handle_edit_ref);
+ client.add_entity_request_handler(Self::handle_repair_worktrees);
}
pub fn is_local(&self) -> bool {
@@ -2519,6 +2523,46 @@ impl GitStore {
Ok(proto::GitGetHeadShaResponse { sha: head_sha })
}
+ async fn handle_edit_ref(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::GitEditRef>,
+ mut cx: AsyncApp,
+ ) -> Result<proto::Ack> {
+ let repository_id = RepositoryId::from_proto(envelope.payload.repository_id);
+ let repository_handle = Self::repository_for_request(&this, repository_id, &mut cx)?;
+ let ref_name = envelope.payload.ref_name;
+ let commit = match envelope.payload.action {
+ Some(proto::git_edit_ref::Action::UpdateToCommit(sha)) => Some(sha),
+ Some(proto::git_edit_ref::Action::Delete(_)) => None,
+ None => anyhow::bail!("GitEditRef missing action"),
+ };
+
+ repository_handle
+ .update(&mut cx, |repository_handle, _| {
+ repository_handle.edit_ref(ref_name, commit)
+ })
+ .await??;
+
+ Ok(proto::Ack {})
+ }
+
+ async fn handle_repair_worktrees(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::GitRepairWorktrees>,
+ mut cx: AsyncApp,
+ ) -> Result<proto::Ack> {
+ let repository_id = RepositoryId::from_proto(envelope.payload.repository_id);
+ let repository_handle = Self::repository_for_request(&this, repository_id, &mut cx)?;
+
+ repository_handle
+ .update(&mut cx, |repository_handle, _| {
+ repository_handle.repair_worktrees()
+ })
+ .await??;
+
+ Ok(proto::Ack {})
+ }
+
async fn handle_get_branches(
this: Entity<Self>,
envelope: TypedEnvelope<proto::GitGetBranches>,
@@ -2705,6 +2749,26 @@ impl GitStore {
})
}
+ async fn handle_create_archive_checkpoint(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::GitCreateArchiveCheckpoint>,
+ mut cx: AsyncApp,
+ ) -> Result<proto::GitCreateArchiveCheckpointResponse> {
+ let repository_id = RepositoryId::from_proto(envelope.payload.repository_id);
+ let repository_handle = Self::repository_for_request(&this, repository_id, &mut cx)?;
+
+ let (staged_commit_sha, unstaged_commit_sha) = repository_handle
+ .update(&mut cx, |repository, _| {
+ repository.create_archive_checkpoint()
+ })
+ .await??;
+
+ Ok(proto::GitCreateArchiveCheckpointResponse {
+ staged_commit_sha,
+ unstaged_commit_sha,
+ })
+ }
+
async fn handle_restore_checkpoint(
this: Entity<Self>,
envelope: TypedEnvelope<proto::GitRestoreCheckpoint>,
@@ -2726,6 +2790,25 @@ impl GitStore {
Ok(proto::Ack {})
}
+ async fn handle_restore_archive_checkpoint(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::GitRestoreArchiveCheckpoint>,
+ mut cx: AsyncApp,
+ ) -> Result<proto::Ack> {
+ let repository_id = RepositoryId::from_proto(envelope.payload.repository_id);
+ let repository_handle = Self::repository_for_request(&this, repository_id, &mut cx)?;
+ let staged_commit_sha = envelope.payload.staged_commit_sha;
+ let unstaged_commit_sha = envelope.payload.unstaged_commit_sha;
+
+ repository_handle
+ .update(&mut cx, |repository, _| {
+ repository.restore_archive_checkpoint(staged_commit_sha, unstaged_commit_sha)
+ })
+ .await??;
+
+ Ok(proto::Ack {})
+ }
+
async fn handle_compare_checkpoints(
this: Entity<Self>,
envelope: TypedEnvelope<proto::GitCompareCheckpoints>,
@@ -6147,59 +6230,86 @@ impl Repository {
})
}
- pub fn update_ref(
+ fn edit_ref(
&mut self,
ref_name: String,
- commit: String,
+ commit: Option<String>,
) -> oneshot::Receiver<Result<()>> {
+ let id = self.id;
self.send_job(None, move |repo, _cx| async move {
match repo {
- RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
- backend.update_ref(ref_name, commit).await
- }
- RepositoryState::Remote(_) => {
- anyhow::bail!("update_ref is not supported for remote repositories")
+ RepositoryState::Local(LocalRepositoryState { backend, .. }) => match commit {
+ Some(commit) => backend.update_ref(ref_name, commit).await,
+ None => backend.delete_ref(ref_name).await,
+ },
+ RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => {
+ let action = match commit {
+ Some(sha) => proto::git_edit_ref::Action::UpdateToCommit(sha),
+ None => {
+ proto::git_edit_ref::Action::Delete(proto::git_edit_ref::DeleteRef {})
+ }
+ };
+ client
+ .request(proto::GitEditRef {
+ project_id: project_id.0,
+ repository_id: id.to_proto(),
+ ref_name,
+ action: Some(action),
+ })
+ .await?;
+ Ok(())
}
}
})
}
+ pub fn update_ref(
+ &mut self,
+ ref_name: String,
+ commit: String,
+ ) -> oneshot::Receiver<Result<()>> {
+ self.edit_ref(ref_name, Some(commit))
+ }
+
pub fn delete_ref(&mut self, ref_name: String) -> oneshot::Receiver<Result<()>> {
- self.send_job(None, move |repo, _cx| async move {
- match repo {
- RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
- backend.delete_ref(ref_name).await
- }
- RepositoryState::Remote(_) => {
- anyhow::bail!("delete_ref is not supported for remote repositories")
- }
- }
- })
+ self.edit_ref(ref_name, None)
}
pub fn repair_worktrees(&mut self) -> oneshot::Receiver<Result<()>> {
+ let id = self.id;
self.send_job(None, move |repo, _cx| async move {
match repo {
RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
backend.repair_worktrees().await
}
- RepositoryState::Remote(_) => {
- anyhow::bail!("repair_worktrees is not supported for remote repositories")
+ RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => {
+ client
+ .request(proto::GitRepairWorktrees {
+ project_id: project_id.0,
+ repository_id: id.to_proto(),
+ })
+ .await?;
+ Ok(())
}
}
})
}
pub fn create_archive_checkpoint(&mut self) -> oneshot::Receiver<Result<(String, String)>> {
+ let id = self.id;
self.send_job(None, move |repo, _cx| async move {
match repo {
RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
backend.create_archive_checkpoint().await
}
- RepositoryState::Remote(_) => {
- anyhow::bail!(
- "create_archive_checkpoint is not supported for remote repositories"
- )
+ RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => {
+ let response = client
+ .request(proto::GitCreateArchiveCheckpoint {
+ project_id: project_id.0,
+ repository_id: id.to_proto(),
+ })
+ .await?;
+ Ok((response.staged_commit_sha, response.unstaged_commit_sha))
}
}
})
@@ -6210,6 +6320,7 @@ impl Repository {
staged_sha: String,
unstaged_sha: String,
) -> oneshot::Receiver<Result<()>> {
+ let id = self.id;
self.send_job(None, move |repo, _cx| async move {
match repo {
RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
@@ -6217,10 +6328,16 @@ impl Repository {
.restore_archive_checkpoint(staged_sha, unstaged_sha)
.await
}
- RepositoryState::Remote(_) => {
- anyhow::bail!(
- "restore_archive_checkpoint is not supported for remote repositories"
- )
+ RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => {
+ client
+ .request(proto::GitRestoreArchiveCheckpoint {
+ project_id: project_id.0,
+ repository_id: id.to_proto(),
+ staged_commit_sha: staged_sha,
+ unstaged_commit_sha: unstaged_sha,
+ })
+ .await?;
+ Ok(())
}
}
})
@@ -6232,7 +6349,34 @@ impl Repository {
Some(format!("git worktree remove: {}", path.display()).into()),
move |repo, _cx| async move {
match repo {
- RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
+ RepositoryState::Local(LocalRepositoryState { backend, fs, .. }) => {
+ // When forcing, delete the worktree directory ourselves before
+ // invoking git. `git worktree remove` can remove the admin
+ // metadata in `.git/worktrees/<name>` but fail to delete the
+ // working directory (it continues past directory-removal errors),
+ // leaving an orphaned folder on disk. Deleting first guarantees
+ // the directory is gone, and `git worktree remove --force`
+ // tolerates a missing working tree while cleaning up the admin
+ // entry. We keep this inside the `Local` arm so that for remote
+ // projects the deletion runs on the remote machine (where the
+ // `GitRemoveWorktree` RPC is handled against the local repo on
+ // the headless server) using its own filesystem.
+ //
+ // Non-force removals are left untouched: `git worktree remove`
+ // must see the dirty working tree to refuse the operation.
+ if force {
+ fs.remove_dir(
+ &path,
+ RemoveOptions {
+ recursive: true,
+ ignore_if_not_exists: true,
+ },
+ )
+ .await
+ .with_context(|| {
+ format!("failed to delete worktree directory '{}'", path.display())
+ })?;
+ }
backend.remove_worktree(path, force).await
}
RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => {
@@ -577,6 +577,22 @@ message GitGetHeadShaResponse {
optional string sha = 1;
}
+message GitEditRef {
+ uint64 project_id = 1;
+ uint64 repository_id = 2;
+ string ref_name = 3;
+ oneof action {
+ string update_to_commit = 4;
+ DeleteRef delete = 5;
+ }
+ message DeleteRef {}
+}
+
+message GitRepairWorktrees {
+ uint64 project_id = 1;
+ uint64 repository_id = 2;
+}
+
message GitWorktreesResponse {
repeated Worktree worktrees = 1;
}
@@ -607,12 +623,29 @@ message GitCreateCheckpointResponse {
bytes commit_sha = 1;
}
+message GitCreateArchiveCheckpoint {
+ uint64 project_id = 1;
+ uint64 repository_id = 2;
+}
+
+message GitCreateArchiveCheckpointResponse {
+ string staged_commit_sha = 1;
+ string unstaged_commit_sha = 2;
+}
+
message GitRestoreCheckpoint {
uint64 project_id = 1;
uint64 repository_id = 2;
bytes commit_sha = 3;
}
+message GitRestoreArchiveCheckpoint {
+ uint64 project_id = 1;
+ uint64 repository_id = 2;
+ string staged_commit_sha = 3;
+ string unstaged_commit_sha = 4;
+}
+
message GitCompareCheckpoints {
uint64 project_id = 1;
uint64 repository_id = 2;
@@ -476,7 +476,12 @@ message Envelope {
GitDiffCheckpoints git_diff_checkpoints = 438;
GitDiffCheckpointsResponse git_diff_checkpoints_response = 439;
GitGetHeadSha git_get_head_sha = 440;
- GitGetHeadShaResponse git_get_head_sha_response = 441; // current max
+ GitGetHeadShaResponse git_get_head_sha_response = 441;
+ GitRepairWorktrees git_repair_worktrees = 442;
+ GitEditRef git_edit_ref = 443;
+ GitCreateArchiveCheckpoint git_create_archive_checkpoint = 444;
+ GitCreateArchiveCheckpointResponse git_create_archive_checkpoint_response = 445;
+ GitRestoreArchiveCheckpoint git_restore_archive_checkpoint = 446; // current max
}
reserved 87 to 88;
@@ -296,7 +296,10 @@ messages!(
(GitFileHistoryResponse, Background),
(GitCreateCheckpoint, Background),
(GitCreateCheckpointResponse, Background),
+ (GitCreateArchiveCheckpoint, Background),
+ (GitCreateArchiveCheckpointResponse, Background),
(GitRestoreCheckpoint, Background),
+ (GitRestoreArchiveCheckpoint, Background),
(GitCompareCheckpoints, Background),
(GitCompareCheckpointsResponse, Background),
(GitDiffCheckpoints, Background),
@@ -353,6 +356,8 @@ messages!(
(GitGetWorktrees, Background),
(GitGetHeadSha, Background),
(GitGetHeadShaResponse, Background),
+ (GitEditRef, Background),
+ (GitRepairWorktrees, Background),
(GitWorktreesResponse, Background),
(GitCreateWorktree, Background),
(GitRemoveWorktree, Background),
@@ -524,7 +529,12 @@ request_messages!(
(GitShow, GitCommitDetails),
(GitFileHistory, GitFileHistoryResponse),
(GitCreateCheckpoint, GitCreateCheckpointResponse),
+ (
+ GitCreateArchiveCheckpoint,
+ GitCreateArchiveCheckpointResponse
+ ),
(GitRestoreCheckpoint, Ack),
+ (GitRestoreArchiveCheckpoint, Ack),
(GitCompareCheckpoints, GitCompareCheckpointsResponse),
(GitDiffCheckpoints, GitDiffCheckpointsResponse),
(GitReset, Ack),
@@ -561,6 +571,8 @@ request_messages!(
(RemoteStarted, Ack),
(GitGetWorktrees, GitWorktreesResponse),
(GitGetHeadSha, GitGetHeadShaResponse),
+ (GitEditRef, Ack),
+ (GitRepairWorktrees, Ack),
(GitCreateWorktree, Ack),
(GitRemoveWorktree, Ack),
(GitRenameWorktree, Ack),
@@ -753,6 +765,10 @@ entity_messages!(
NewExternalAgentVersionAvailable,
GitGetWorktrees,
GitGetHeadSha,
+ GitEditRef,
+ GitRepairWorktrees,
+ GitCreateArchiveCheckpoint,
+ GitRestoreArchiveCheckpoint,
GitCreateWorktree,
GitRemoveWorktree,
GitRenameWorktree,
@@ -594,7 +594,7 @@ pub fn dismiss_connection_modal(workspace: &Entity<Workspace>, cx: &mut gpui::As
/// Creates a [`RemoteClient`] by reusing an existing connection from the
/// global pool. No interactive UI is shown. This should only be called
/// when [`remote::has_active_connection`] returns `true`.
-fn connect_reusing_pool(
+pub fn connect_reusing_pool(
connection_options: RemoteConnectionOptions,
cx: &mut App,
) -> Task<Result<Option<Entity<RemoteClient>>>> {
@@ -1622,6 +1622,98 @@ async fn test_remote_root_repo_common_dir(cx: &mut TestAppContext, server_cx: &m
assert_eq!(common_dir, None);
}
+#[gpui::test]
+async fn test_remote_archive_git_operations_are_supported(
+ cx: &mut TestAppContext,
+ server_cx: &mut TestAppContext,
+) {
+ let fs = FakeFs::new(server_cx.executor());
+ fs.insert_tree(
+ "/project",
+ json!({
+ ".git": {},
+ "file.txt": "content",
+ }),
+ )
+ .await;
+ fs.set_branch_name(Path::new("/project/.git"), Some("main"));
+ fs.set_head_for_repo(
+ Path::new("/project/.git"),
+ &[("file.txt", "content".into())],
+ "head-sha",
+ );
+
+ let (project, _headless) = init_test(&fs, cx, server_cx).await;
+ project
+ .update(cx, |project, cx| {
+ project.find_or_create_worktree(Path::new("/project"), true, cx)
+ })
+ .await
+ .expect("should open remote worktree");
+ cx.run_until_parked();
+
+ let repository = project.read_with(cx, |project, cx| {
+ project
+ .active_repository(cx)
+ .expect("remote project should have an active repository")
+ });
+
+ let head_sha = cx
+ .update(|cx| repository.update(cx, |repository, _| repository.head_sha()))
+ .await
+ .expect("head_sha request should complete")
+ .expect("head_sha should succeed")
+ .expect("HEAD should exist");
+
+ cx.run_until_parked();
+
+ cx.update(|cx| {
+ repository.update(cx, |repository, _| {
+ repository.update_ref("refs/zed-tests/archive-checkpoint".to_string(), head_sha)
+ })
+ })
+ .await
+ .expect("update_ref request should complete")
+ .expect("update_ref should succeed for remote repository");
+
+ cx.run_until_parked();
+
+ cx.update(|cx| {
+ repository.update(cx, |repository, _| {
+ repository.delete_ref("refs/zed-tests/archive-checkpoint".to_string())
+ })
+ })
+ .await
+ .expect("delete_ref request should complete")
+ .expect("delete_ref should succeed for remote repository");
+
+ cx.run_until_parked();
+
+ cx.update(|cx| repository.update(cx, |repository, _| repository.repair_worktrees()))
+ .await
+ .expect("repair_worktrees request should complete")
+ .expect("repair_worktrees should succeed for remote repository");
+
+ cx.run_until_parked();
+
+ let (staged_commit_sha, unstaged_commit_sha) = cx
+ .update(|cx| repository.update(cx, |repository, _| repository.create_archive_checkpoint()))
+ .await
+ .expect("create_archive_checkpoint request should complete")
+ .expect("create_archive_checkpoint should succeed for remote repository");
+
+ cx.run_until_parked();
+
+ cx.update(|cx| {
+ repository.update(cx, |repository, _| {
+ repository.restore_archive_checkpoint(staged_commit_sha, unstaged_commit_sha)
+ })
+ })
+ .await
+ .expect("restore_archive_checkpoint request should complete")
+ .expect("restore_archive_checkpoint should succeed for remote repository");
+}
+
#[gpui::test]
async fn test_remote_git_diffs(cx: &mut TestAppContext, server_cx: &mut TestAppContext) {
let text_2 = "
@@ -33,6 +33,7 @@ use ui::utils::platform_title_bar_height;
use serde::{Deserialize, Serialize};
use settings::Settings as _;
+use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use std::mem;
use std::path::{Path, PathBuf};
@@ -364,11 +365,7 @@ pub struct Sidebar {
/// Updated only in response to explicit user actions (clicking a
/// thread, confirming in the thread switcher, etc.) — never from
/// background data changes. Used to sort the thread switcher popup.
- thread_last_accessed: HashMap<acp::SessionId, DateTime<Utc>>,
- /// Updated when the user presses a key to send or queue a message.
- /// Used for sorting threads in the sidebar and as a secondary sort
- /// key in the thread switcher.
- thread_last_message_sent_or_queued: HashMap<agent_ui::ThreadId, DateTime<Utc>>,
+ thread_last_accessed: HashMap<ThreadId, DateTime<Utc>>,
thread_switcher: Option<Entity<ThreadSwitcher>>,
_thread_switcher_subscriptions: Vec<gpui::Subscription>,
pending_thread_activation: Option<agent_ui::ThreadId>,
@@ -462,7 +459,6 @@ impl Sidebar {
hovered_thread_index: None,
thread_last_accessed: HashMap::new(),
- thread_last_message_sent_or_queued: HashMap::new(),
thread_switcher: None,
_thread_switcher_subscriptions: Vec::new(),
pending_thread_activation: None,
@@ -676,7 +672,7 @@ impl Sidebar {
this.update_entries(cx);
}
AgentPanelEvent::MessageSentOrQueued { thread_id } => {
- this.record_thread_message_sent(thread_id);
+ this.record_thread_message_sent_or_queued(thread_id, cx);
this.update_entries(cx);
}
},
@@ -1168,8 +1164,8 @@ impl Sidebar {
}
threads.sort_by(|a, b| {
- let a_time = self.display_time(&a.metadata);
- let b_time = self.display_time(&b.metadata);
+ let a_time = Self::thread_display_time(&a.metadata);
+ let b_time = Self::thread_display_time(&b.metadata);
b_time.cmp(&a_time)
});
} else {
@@ -1320,8 +1316,6 @@ impl Sidebar {
notified_threads.retain(|id| current_thread_ids.contains(id));
self.thread_last_accessed
- .retain(|id, _| current_session_ids.contains(id));
- self.thread_last_message_sent_or_queued
.retain(|id, _| current_thread_ids.contains(id));
self.contents = SidebarContents {
@@ -2301,7 +2295,7 @@ impl Sidebar {
session_id: metadata.session_id.clone(),
workspace: workspace.clone(),
});
- self.record_thread_access(&metadata.session_id);
+ self.record_thread_access(&metadata.thread_id);
if metadata.session_id.is_some() {
self.pending_thread_activation = Some(metadata.thread_id);
@@ -2370,7 +2364,7 @@ impl Sidebar {
session_id: target_session_id.clone(),
workspace: workspace_for_entry.clone(),
});
- sidebar.record_thread_access(&target_session_id);
+ sidebar.record_thread_access(&metadata_thread_id);
sidebar.update_entries(cx);
});
}
@@ -2602,10 +2596,18 @@ impl Sidebar {
let mut path_replacements: Vec<(PathBuf, PathBuf)> = Vec::new();
for row in &archived_worktrees {
- match thread_worktree_archive::restore_worktree_via_git(row, &mut *cx).await {
+ match thread_worktree_archive::restore_worktree_via_git(
+ row,
+ metadata.remote_connection.as_ref(),
+ &mut *cx,
+ )
+ .await
+ {
Ok(restored_path) => {
thread_worktree_archive::cleanup_archived_worktree_record(
- row, &mut *cx,
+ row,
+ metadata.remote_connection.as_ref(),
+ &mut *cx,
)
.await;
path_replacements.push((row.worktree_path.clone(), restored_path));
@@ -2882,7 +2884,12 @@ impl Sidebar {
.folder_paths()
.ordered_paths()
.filter_map(|path| {
- thread_worktree_archive::build_root_plan(path, &workspaces, cx)
+ thread_worktree_archive::build_root_plan(
+ path,
+ metadata.remote_connection.as_ref(),
+ &workspaces,
+ cx,
+ )
})
.filter(|plan| {
thread_id.map_or(true, |tid| {
@@ -3387,22 +3394,37 @@ impl Sidebar {
}
}
- fn record_thread_access(&mut self, session_id: &Option<acp::SessionId>) {
- if let Some(sid) = session_id {
- self.thread_last_accessed.insert(sid.clone(), Utc::now());
- }
+ fn record_thread_access(&mut self, id: &ThreadId) {
+ self.thread_last_accessed.insert(*id, Utc::now());
+ }
+
+ fn record_thread_message_sent_or_queued(
+ &mut self,
+ thread_id: &agent_ui::ThreadId,
+ cx: &mut App,
+ ) {
+ let store = ThreadMetadataStore::global(cx);
+ store.update(cx, |store, cx| {
+ store.update_interacted_at(thread_id, Utc::now(), cx);
+ })
}
- fn record_thread_message_sent(&mut self, thread_id: &agent_ui::ThreadId) {
- self.thread_last_message_sent_or_queued
- .insert(*thread_id, Utc::now());
+ fn thread_display_time(metadata: &ThreadMetadata) -> DateTime<Utc> {
+ metadata.interacted_at.unwrap_or(metadata.updated_at)
}
- fn display_time(&self, metadata: &ThreadMetadata) -> DateTime<Utc> {
- self.thread_last_message_sent_or_queued
- .get(&metadata.thread_id)
- .copied()
- .unwrap_or(metadata.updated_at)
+ /// The sort order used by the ctrl-tab switcher
+ fn thread_cmp_for_switcher(&self, left: &ThreadMetadata, right: &ThreadMetadata) -> Ordering {
+ let sort_time = |x: &ThreadMetadata| {
+ self.thread_last_accessed
+ .get(&x.thread_id)
+ .copied()
+ .or(x.interacted_at)
+ .unwrap_or(x.updated_at)
+ };
+
+ // .reverse() = most recent first
+ sort_time(left).cmp(&sort_time(right)).reverse()
}
fn mru_threads_for_switcher(&self, cx: &App) -> Vec<ThreadSwitcherEntry> {
@@ -3436,7 +3458,8 @@ impl Sidebar {
}?;
let notified = self.contents.is_thread_notified(&thread.metadata.thread_id);
let timestamp: SharedString =
- format_history_entry_timestamp(self.display_time(&thread.metadata)).into();
+ format_history_entry_timestamp(Self::thread_display_time(&thread.metadata))
+ .into();
Some(ThreadSwitcherEntry {
session_id,
title: thread.metadata.display_title(),
@@ -3465,31 +3488,7 @@ impl Sidebar {
})
.collect();
- entries.sort_by(|a, b| {
- let a_accessed = self.thread_last_accessed.get(&a.session_id);
- let b_accessed = self.thread_last_accessed.get(&b.session_id);
-
- match (a_accessed, b_accessed) {
- (Some(a_time), Some(b_time)) => b_time.cmp(a_time),
- (Some(_), None) => std::cmp::Ordering::Less,
- (None, Some(_)) => std::cmp::Ordering::Greater,
- (None, None) => {
- let a_sent = self
- .thread_last_message_sent_or_queued
- .get(&a.metadata.thread_id);
- let b_sent = self
- .thread_last_message_sent_or_queued
- .get(&b.metadata.thread_id);
-
- match (a_sent, b_sent) {
- (Some(a_time), Some(b_time)) => b_time.cmp(a_time),
- (Some(_), None) => std::cmp::Ordering::Less,
- (None, Some(_)) => std::cmp::Ordering::Greater,
- (None, None) => b.metadata.updated_at.cmp(&a.metadata.updated_at),
- }
- }
- }
- });
+ entries.sort_by(|a, b| self.thread_cmp_for_switcher(&a.metadata, &b.metadata));
entries
}
@@ -3585,7 +3584,7 @@ impl Sidebar {
mw.retain_active_workspace(cx);
});
}
- this.record_thread_access(&metadata.session_id);
+ this.record_thread_access(&metadata.thread_id);
this.active_entry = Some(ActiveEntry {
thread_id: metadata.thread_id,
session_id: metadata.session_id.clone(),
@@ -3703,7 +3702,7 @@ impl Sidebar {
.title_bar_background
.blend(color.panel_background.opacity(0.25));
- let timestamp = format_history_entry_timestamp(self.display_time(&thread.metadata));
+ let timestamp = format_history_entry_timestamp(Self::thread_display_time(&thread.metadata));
let is_remote = thread.workspace.is_remote(cx);
@@ -3,7 +3,9 @@ use acp_thread::{AcpThread, PermissionOptions, StubAgentConnection};
use agent::ThreadStore;
use agent_ui::{
ThreadId,
- test_support::{active_session_id, open_thread_with_connection, send_message},
+ test_support::{
+ active_session_id, active_thread_id, open_thread_with_connection, send_message,
+ },
thread_metadata_store::{ThreadMetadata, WorktreePaths},
};
use chrono::DateTime;
@@ -212,6 +214,7 @@ async fn save_n_test_threads(
Some(format!("Thread {}", i + 1).into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, i).unwrap(),
None,
+ None,
project,
cx,
)
@@ -229,6 +232,7 @@ async fn save_test_thread_metadata(
Some("Test".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
project,
cx,
)
@@ -245,17 +249,103 @@ async fn save_named_thread_metadata(
Some(SharedString::from(title.to_string())),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
project,
cx,
);
cx.run_until_parked();
}
+/// Spins up a fresh remote project backed by a headless server sharing
+/// `server_fs`, opens the given worktree path on it, and returns the
+/// project together with the headless entity (which the caller must keep
+/// alive for the duration of the test) and the `RemoteConnectionOptions`
+/// used for the fake server. Passing those options back into
+/// `reuse_opts` on a subsequent call makes the new project share the
+/// same `RemoteConnectionIdentity`, matching how Zed treats multiple
+/// projects on the same SSH host.
+async fn start_remote_project(
+ server_fs: &Arc<FakeFs>,
+ worktree_path: &Path,
+ app_state: &Arc<workspace::AppState>,
+ reuse_opts: Option<&remote::RemoteConnectionOptions>,
+ cx: &mut TestAppContext,
+ server_cx: &mut TestAppContext,
+) -> (
+ Entity<project::Project>,
+ Entity<remote_server::HeadlessProject>,
+ remote::RemoteConnectionOptions,
+) {
+ // Bare `_` on the guard so it's dropped immediately; holding onto it
+ // would deadlock `connect_mock` below since the client waits on the
+ // guard before completing the mock handshake.
+ let (opts, server_session) = match reuse_opts {
+ Some(existing) => {
+ let (session, _) = remote::RemoteClient::fake_server_with_opts(existing, cx, server_cx);
+ (existing.clone(), session)
+ }
+ None => {
+ let (opts, session, _) = remote::RemoteClient::fake_server(cx, server_cx);
+ (opts, session)
+ }
+ };
+
+ server_cx.update(remote_server::HeadlessProject::init);
+ let server_executor = server_cx.executor();
+ let fs = server_fs.clone();
+ let headless = server_cx.new(|cx| {
+ remote_server::HeadlessProject::new(
+ remote_server::HeadlessAppState {
+ session: server_session,
+ fs,
+ http_client: Arc::new(http_client::BlockedHttpClient),
+ node_runtime: node_runtime::NodeRuntime::unavailable(),
+ languages: Arc::new(language::LanguageRegistry::new(server_executor.clone())),
+ extension_host_proxy: Arc::new(extension::ExtensionHostProxy::new()),
+ startup_time: std::time::Instant::now(),
+ },
+ false,
+ cx,
+ )
+ });
+
+ let remote_client = remote::RemoteClient::connect_mock(opts.clone(), cx).await;
+ let project = cx.update(|cx| {
+ let project_client = client::Client::new(
+ Arc::new(clock::FakeSystemClock::new()),
+ http_client::FakeHttpClient::with_404_response(),
+ cx,
+ );
+ let user_store = cx.new(|cx| client::UserStore::new(project_client.clone(), cx));
+ project::Project::remote(
+ remote_client,
+ project_client,
+ node_runtime::NodeRuntime::unavailable(),
+ user_store,
+ app_state.languages.clone(),
+ app_state.fs.clone(),
+ false,
+ cx,
+ )
+ });
+
+ project
+ .update(cx, |project, cx| {
+ project.find_or_create_worktree(worktree_path, true, cx)
+ })
+ .await
+ .expect("should open remote worktree");
+ cx.run_until_parked();
+
+ (project, headless, opts)
+}
+
fn save_thread_metadata(
session_id: acp::SessionId,
title: Option<SharedString>,
updated_at: DateTime<Utc>,
created_at: Option<DateTime<Utc>>,
+ interacted_at: Option<DateTime<Utc>>,
project: &Entity<project::Project>,
cx: &mut TestAppContext,
) {
@@ -275,6 +365,7 @@ fn save_thread_metadata(
title,
updated_at,
created_at,
+ interacted_at,
worktree_paths,
archived: false,
remote_connection,
@@ -309,6 +400,7 @@ fn save_thread_metadata_with_main_paths(
title: Some(title),
updated_at,
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_path_lists(main_worktree_paths, folder_paths).unwrap(),
archived: false,
remote_connection: None,
@@ -561,6 +653,7 @@ async fn test_single_workspace_with_saved_threads(cx: &mut TestAppContext) {
Some("Fix crash in project panel".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 3, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -570,6 +663,7 @@ async fn test_single_workspace_with_saved_threads(cx: &mut TestAppContext) {
Some("Add inline diff view".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -602,6 +696,7 @@ async fn test_workspace_lifecycle(cx: &mut TestAppContext) {
Some("Thread A1".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -880,6 +975,7 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) {
title: Some("Completed thread".into()),
updated_at: Utc::now(),
created_at: Some(Utc::now()),
+ interacted_at: None,
archived: false,
remote_connection: None,
},
@@ -904,6 +1000,7 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) {
title: Some("Running thread".into()),
updated_at: Utc::now(),
created_at: Some(Utc::now()),
+ interacted_at: None,
archived: false,
remote_connection: None,
},
@@ -928,6 +1025,7 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) {
title: Some("Error thread".into()),
updated_at: Utc::now(),
created_at: Some(Utc::now()),
+ interacted_at: None,
archived: false,
remote_connection: None,
},
@@ -953,6 +1051,7 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) {
title: Some("Waiting thread".into()),
updated_at: Utc::now(),
created_at: Some(Utc::now()),
+ interacted_at: None,
archived: false,
remote_connection: None,
},
@@ -978,6 +1077,7 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) {
title: Some("Notified thread".into()),
updated_at: Utc::now(),
created_at: Some(Utc::now()),
+ interacted_at: None,
archived: false,
remote_connection: None,
},
@@ -1644,6 +1744,7 @@ async fn test_search_narrows_visible_threads_to_matches(cx: &mut TestAppContext)
Some(title.into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, hour, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -1695,6 +1796,7 @@ async fn test_search_matches_regardless_of_case(cx: &mut TestAppContext) {
Some("Fix Crash In Project Panel".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -1738,6 +1840,7 @@ async fn test_escape_clears_search_and_restores_full_list(cx: &mut TestAppContex
Some(title.into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, hour, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
)
@@ -1798,6 +1901,7 @@ async fn test_search_only_shows_workspace_headers_with_matches(cx: &mut TestAppC
Some(title.into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, hour, 0, 0).unwrap(),
None,
+ None,
&project_a,
cx,
)
@@ -1822,6 +1926,7 @@ async fn test_search_only_shows_workspace_headers_with_matches(cx: &mut TestAppC
Some(title.into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, hour, 0, 0).unwrap(),
None,
+ None,
&project_b,
cx,
)
@@ -1886,6 +1991,7 @@ async fn test_search_matches_workspace_name(cx: &mut TestAppContext) {
Some(title.into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, hour, 0, 0).unwrap(),
None,
+ None,
&project_a,
cx,
)
@@ -1910,6 +2016,7 @@ async fn test_search_matches_workspace_name(cx: &mut TestAppContext) {
Some(title.into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, hour, 0, 0).unwrap(),
None,
+ None,
&project_b,
cx,
)
@@ -2003,6 +2110,7 @@ async fn test_search_finds_threads_hidden_behind_view_more(cx: &mut TestAppConte
Some(title.into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, i).unwrap(),
None,
+ None,
&project,
cx,
)
@@ -2049,6 +2157,7 @@ async fn test_search_finds_threads_inside_collapsed_groups(cx: &mut TestAppConte
Some("Important thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -2100,6 +2209,7 @@ async fn test_search_then_keyboard_navigate_and_confirm(cx: &mut TestAppContext)
Some(title.into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, hour, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
)
@@ -2170,6 +2280,7 @@ async fn test_confirm_on_historical_thread_activates_workspace(cx: &mut TestAppC
Some("Historical Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 6, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -2229,6 +2340,7 @@ async fn test_confirm_on_historical_thread_preserves_historical_timestamp_and_or
Some("Newer Historical Thread".into()),
newer_timestamp,
Some(newer_timestamp),
+ None,
&project,
cx,
);
@@ -2240,6 +2352,7 @@ async fn test_confirm_on_historical_thread_preserves_historical_timestamp_and_or
Some("Older Historical Thread".into()),
older_timestamp,
Some(older_timestamp),
+ None,
&project,
cx,
);
@@ -2351,6 +2464,7 @@ async fn test_confirm_on_historical_thread_in_new_project_group_opens_real_threa
Some("Historical Thread in New Group".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 6, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project_b,
cx,
);
@@ -2459,6 +2573,7 @@ async fn test_click_clears_selection_and_focus_in_restores_it(cx: &mut TestAppCo
Some("Thread A".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -2468,6 +2583,7 @@ async fn test_click_clears_selection_and_focus_in_restores_it(cx: &mut TestAppCo
Some("Thread B".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -3261,6 +3377,7 @@ async fn test_two_worktree_workspaces_absorbed_when_main_added(cx: &mut TestAppC
Some("Thread A".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project_a,
cx,
);
@@ -3269,6 +3386,7 @@ async fn test_two_worktree_workspaces_absorbed_when_main_added(cx: &mut TestAppC
Some("Thread B".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 1).unwrap(),
None,
+ None,
&project_b,
cx,
);
@@ -4122,6 +4240,7 @@ async fn test_activate_archived_thread_with_saved_paths_activates_matching_works
title: Some("Archived Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[PathBuf::from(
"/project-b",
)])),
@@ -4190,6 +4309,7 @@ async fn test_activate_archived_thread_cwd_fallback_with_matching_workspace(
title: Some("CWD Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[
std::path::PathBuf::from("/project-b"),
])),
@@ -4256,6 +4376,7 @@ async fn test_activate_archived_thread_no_paths_no_cwd_uses_active_workspace(
title: Some("Contextless Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::default(),
archived: false,
remote_connection: None,
@@ -4312,6 +4433,7 @@ async fn test_activate_archived_thread_saved_paths_opens_new_workspace(cx: &mut
title: Some("New WS Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&path_list_b),
archived: false,
remote_connection: None,
@@ -4367,6 +4489,7 @@ async fn test_activate_archived_thread_reuses_workspace_in_another_window(cx: &m
title: Some("Cross Window Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[PathBuf::from(
"/project-b",
)])),
@@ -4447,6 +4570,7 @@ async fn test_activate_archived_thread_reuses_workspace_in_another_window_with_t
title: Some("Cross Window Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[PathBuf::from(
"/project-b",
)])),
@@ -4530,6 +4654,7 @@ async fn test_activate_archived_thread_prefers_current_window_for_matching_paths
title: Some("Current Window Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[PathBuf::from(
"/project-a",
)])),
@@ -4664,6 +4789,7 @@ async fn test_archive_thread_uses_next_threads_own_workspace(cx: &mut TestAppCon
Some("Thread 2".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&main_project,
cx,
);
@@ -4676,6 +4802,7 @@ async fn test_archive_thread_uses_next_threads_own_workspace(cx: &mut TestAppCon
Some("Thread 1".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&worktree_project,
cx,
);
@@ -4757,7 +4884,7 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
.await;
fs.insert_tree(
- "/wt-feature-a",
+ "/worktrees/project/feature-a/project",
serde_json::json!({
".git": "gitdir: /project/.git/worktrees/feature-a",
"src": {},
@@ -4769,7 +4896,7 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
Path::new("/project/.git"),
false,
git::repository::Worktree {
- path: PathBuf::from("/wt-feature-a"),
+ path: PathBuf::from("/worktrees/project/feature-a/project"),
ref_name: Some("refs/heads/feature-a".into()),
sha: "abc".into(),
is_main: false,
@@ -4781,7 +4908,12 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
cx.update(|cx| <dyn fs::Fs>::set_global(fs.clone(), cx));
let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await;
- let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await;
+ let worktree_project = project::Project::test(
+ fs.clone(),
+ ["/worktrees/project/feature-a/project".as_ref()],
+ cx,
+ )
+ .await;
main_project
.update(cx, |p, cx| p.git_scans_complete(cx))
@@ -4804,6 +4936,7 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
Some("Main Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&main_project,
cx,
);
@@ -4815,6 +4948,7 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
Some("Worktree Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&worktree_project,
cx,
);
@@ -4851,7 +4985,8 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
// The linked worktree checkout directory should also be removed from disk.
assert!(
- !fs.is_dir(Path::new("/wt-feature-a")).await,
+ !fs.is_dir(Path::new("/worktrees/project/feature-a/project"))
+ .await,
"linked worktree directory should be removed from disk after archiving its last thread"
);
@@ -4885,7 +5020,7 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
});
assert_eq!(
archived_paths.paths(),
- &[PathBuf::from("/wt-feature-a")],
+ &[PathBuf::from("/worktrees/project/feature-a/project")],
"archived thread must retain its folder_paths for restore"
);
}
@@ -4979,6 +5114,7 @@ async fn test_restore_worktree_when_branch_has_moved(cx: &mut TestAppContext) {
unstaged_commit_hash: unstaged_hash,
original_commit_hash: "original-sha".to_string(),
},
+ None,
&mut cx,
)
.await
@@ -5087,6 +5223,7 @@ async fn test_restore_worktree_when_branch_has_not_moved(cx: &mut TestAppContext
unstaged_commit_hash: unstaged_hash,
original_commit_hash: "original-sha".to_string(),
},
+ None,
&mut cx,
)
.await
@@ -5187,6 +5324,7 @@ async fn test_restore_worktree_when_branch_does_not_exist(cx: &mut TestAppContex
unstaged_commit_hash: unstaged_hash,
original_commit_hash: "original-sha".to_string(),
},
+ None,
&mut cx,
)
.await
@@ -5275,6 +5413,7 @@ async fn test_restore_worktree_thread_uses_main_repo_project_group_key(cx: &mut
Some("Worktree Thread C".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&worktree_project,
cx,
);
@@ -5421,6 +5560,7 @@ async fn test_archive_last_worktree_thread_not_blocked_by_remote_thread_at_same_
Some("Main Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&main_project,
cx,
);
@@ -5432,6 +5572,7 @@ async fn test_archive_last_worktree_thread_not_blocked_by_remote_thread_at_same_
Some("Local Worktree Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&worktree_project,
cx,
);
@@ -5449,6 +5590,7 @@ async fn test_archive_last_worktree_thread_not_blocked_by_remote_thread_at_same_
title: Some("Remote Worktree Thread".into()),
updated_at: chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[PathBuf::from(
"/wt-feature-a",
)])),
@@ -5588,6 +5730,7 @@ async fn test_linked_worktree_threads_not_duplicated_across_groups(cx: &mut Test
Some("Worktree Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&worktree_project,
cx,
);
@@ -5614,6 +5757,16 @@ async fn test_linked_worktree_threads_not_duplicated_across_groups(cx: &mut Test
);
}
+fn thread_id_for(session_id: &acp::SessionId, cx: &mut TestAppContext) -> ThreadId {
+ cx.read(|cx| {
+ ThreadMetadataStore::global(cx)
+ .read(cx)
+ .entry_by_session(session_id)
+ .map(|m| m.thread_id)
+ .expect("thread metadata should exist")
+ })
+}
+
#[gpui::test]
async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
let project = init_test_project_with_agent_panel("/my-project", cx).await;
@@ -5622,7 +5775,7 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
let (sidebar, panel) = setup_sidebar_with_agent_panel(&multi_workspace, cx);
let switcher_ids =
- |sidebar: &Entity<Sidebar>, cx: &mut gpui::VisualTestContext| -> Vec<acp::SessionId> {
+ |sidebar: &Entity<Sidebar>, cx: &mut gpui::VisualTestContext| -> Vec<ThreadId> {
sidebar.read_with(cx, |sidebar, cx| {
let switcher = sidebar
.thread_switcher
@@ -5632,13 +5785,13 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
.read(cx)
.entries()
.iter()
- .map(|e| e.session_id.clone())
+ .map(|e| e.metadata.thread_id)
.collect()
})
};
let switcher_selected_id =
- |sidebar: &Entity<Sidebar>, cx: &mut gpui::VisualTestContext| -> acp::SessionId {
+ |sidebar: &Entity<Sidebar>, cx: &mut gpui::VisualTestContext| -> ThreadId {
sidebar.read_with(cx, |sidebar, cx| {
let switcher = sidebar
.thread_switcher
@@ -5647,8 +5800,8 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
let s = switcher.read(cx);
s.selected_entry()
.expect("should have selection")
- .session_id
- .clone()
+ .metadata
+ .thread_id
})
};
@@ -5662,11 +5815,13 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
open_thread_with_connection(&panel, connection_c, cx);
send_message(&panel, cx);
let session_id_c = active_session_id(&panel, cx);
+ let thread_id_c = active_thread_id(&panel, cx);
save_thread_metadata(
session_id_c.clone(),
Some("Thread C".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap()),
+ None,
&project,
cx,
);
@@ -5678,11 +5833,13 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
open_thread_with_connection(&panel, connection_b, cx);
send_message(&panel, cx);
let session_id_b = active_session_id(&panel, cx);
+ let thread_id_b = active_thread_id(&panel, cx);
save_thread_metadata(
session_id_b.clone(),
Some("Thread B".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap()),
+ None,
&project,
cx,
);
@@ -5694,11 +5851,13 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
open_thread_with_connection(&panel, connection_a, cx);
send_message(&panel, cx);
let session_id_a = active_session_id(&panel, cx);
+ let thread_id_a = active_thread_id(&panel, cx);
save_thread_metadata(
session_id_a.clone(),
Some("Thread A".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 3, 0, 0, 0).unwrap(),
Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 3, 0, 0, 0).unwrap()),
+ None,
&project,
cx,
);
@@ -5720,14 +5879,10 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
// then B, then C.
assert_eq!(
switcher_ids(&sidebar, cx),
- vec![
- session_id_a.clone(),
- session_id_b.clone(),
- session_id_c.clone()
- ],
+ vec![thread_id_a, thread_id_b, thread_id_c,],
);
// First ctrl-tab selects the second entry (B).
- assert_eq!(switcher_selected_id(&sidebar, cx), session_id_b);
+ assert_eq!(switcher_selected_id(&sidebar, cx), thread_id_b);
// Dismiss the switcher without confirming.
sidebar.update_in(cx, |sidebar, _window, cx| {
@@ -5754,7 +5909,7 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
.update(cx, |s, cx| s.cycle_selection(cx));
});
cx.run_until_parked();
- assert_eq!(switcher_selected_id(&sidebar, cx), session_id_c);
+ assert_eq!(switcher_selected_id(&sidebar, cx), thread_id_c);
assert!(sidebar.update(cx, |sidebar, _cx| sidebar.thread_last_accessed.is_empty()));
@@ -5781,7 +5936,7 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
.cloned()
.collect::<Vec<_>>();
assert_eq!(last_accessed.len(), 1);
- assert!(last_accessed.contains(&session_id_c));
+ assert!(last_accessed.contains(&thread_id_c));
assert!(
is_active_session(&sidebar, &session_id_c),
"active_entry should be Thread({session_id_c:?})"
@@ -5795,11 +5950,7 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
assert_eq!(
switcher_ids(&sidebar, cx),
- vec![
- session_id_c.clone(),
- session_id_a.clone(),
- session_id_b.clone()
- ],
+ vec![thread_id_c, thread_id_a, thread_id_b],
);
// Confirm on Thread A.
@@ -5817,8 +5968,8 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
.cloned()
.collect::<Vec<_>>();
assert_eq!(last_accessed.len(), 2);
- assert!(last_accessed.contains(&session_id_c));
- assert!(last_accessed.contains(&session_id_a));
+ assert!(last_accessed.contains(&thread_id_c));
+ assert!(last_accessed.contains(&thread_id_a));
assert!(
is_active_session(&sidebar, &session_id_a),
"active_entry should be Thread({session_id_a:?})"
@@ -5832,11 +5983,7 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
assert_eq!(
switcher_ids(&sidebar, cx),
- vec![
- session_id_a.clone(),
- session_id_c.clone(),
- session_id_b.clone(),
- ],
+ vec![thread_id_a, thread_id_c, thread_id_b,],
);
sidebar.update_in(cx, |sidebar, _window, cx| {
@@ -5860,9 +6007,9 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
.cloned()
.collect::<Vec<_>>();
assert_eq!(last_accessed.len(), 3);
- assert!(last_accessed.contains(&session_id_c));
- assert!(last_accessed.contains(&session_id_a));
- assert!(last_accessed.contains(&session_id_b));
+ assert!(last_accessed.contains(&thread_id_c));
+ assert!(last_accessed.contains(&thread_id_a));
+ assert!(last_accessed.contains(&thread_id_b));
assert!(
is_active_session(&sidebar, &session_id_b),
"active_entry should be Thread({session_id_b:?})"
@@ -5876,6 +6023,7 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
Some("Historical Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 6, 1, 0, 0, 0).unwrap(),
Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 6, 1, 0, 0, 0).unwrap()),
+ None,
&project,
cx,
);
@@ -5894,16 +6042,12 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
// last_message_sent_or_queued. So for the accessed threads (tier 1) the
// sort key is last_accessed_at; for Historical Thread (tier 3) it's created_at.
let session_id_hist = acp::SessionId::new(Arc::from("thread-historical"));
+ let thread_id_hist = thread_id_for(&session_id_hist, cx);
let ids = switcher_ids(&sidebar, cx);
assert_eq!(
ids,
- vec![
- session_id_b.clone(),
- session_id_a.clone(),
- session_id_c.clone(),
- session_id_hist.clone()
- ],
+ vec![thread_id_b, thread_id_a, thread_id_c, thread_id_hist],
);
sidebar.update_in(cx, |sidebar, _window, cx| {
@@ -5917,6 +6061,7 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
Some("Old Historical Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2023, 6, 1, 0, 0, 0).unwrap(),
Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2023, 6, 1, 0, 0, 0).unwrap()),
+ None,
&project,
cx,
);
@@ -5929,15 +6074,16 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) {
// Both historical threads have no access or message times. They should
// appear after accessed threads, sorted by created_at (newest first).
let session_id_old_hist = acp::SessionId::new(Arc::from("thread-old-historical"));
+ let thread_id_old_hist = thread_id_for(&session_id_old_hist, cx);
let ids = switcher_ids(&sidebar, cx);
assert_eq!(
ids,
vec![
- session_id_b,
- session_id_a,
- session_id_c,
- session_id_hist,
- session_id_old_hist,
+ thread_id_b,
+ thread_id_a,
+ thread_id_c,
+ thread_id_hist,
+ thread_id_old_hist,
],
);
@@ -5959,6 +6105,7 @@ async fn test_archive_thread_keeps_metadata_but_hides_from_sidebar(cx: &mut Test
Some("Thread To Archive".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -6214,6 +6361,7 @@ async fn test_unarchive_first_thread_in_group_does_not_create_spurious_draft(
title: Some("Unarchived Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&path_list_b),
archived: true,
remote_connection: None,
@@ -6306,6 +6454,7 @@ async fn test_unarchive_into_new_workspace_does_not_create_duplicate_real_thread
title: Some("Unarchived Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&path_list_b),
archived: true,
remote_connection: None,
@@ -6531,6 +6680,7 @@ async fn test_unarchive_into_inactive_existing_workspace_does_not_leave_active_d
title: Some("Restored In Inactive Workspace".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[
PathBuf::from("/project-b"),
])),
@@ -6936,6 +7086,7 @@ async fn test_archived_threads_excluded_from_sidebar_entries(cx: &mut TestAppCon
Some("Visible Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -6946,6 +7097,7 @@ async fn test_archived_threads_excluded_from_sidebar_entries(cx: &mut TestAppCon
Some("Archived Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -7091,6 +7243,7 @@ async fn test_archive_last_thread_on_linked_worktree_does_not_create_new_thread_
Some("Ochre Drift Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&worktree_project,
cx,
);
@@ -7102,6 +7255,7 @@ async fn test_archive_last_thread_on_linked_worktree_does_not_create_new_thread_
Some("Main Project Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&main_project,
cx,
);
@@ -7257,6 +7411,7 @@ async fn test_archive_last_thread_on_linked_worktree_with_no_siblings_leaves_gro
Some("Ochre Drift Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&worktree_project,
cx,
);
@@ -7373,6 +7528,7 @@ async fn test_unarchive_linked_worktree_thread_into_project_group_shows_only_res
title: Some("Unarchived Linked Thread".into()),
updated_at: Utc::now(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_path_lists(
main_paths.clone(),
folder_paths.clone(),
@@ -7556,6 +7712,7 @@ async fn test_archive_thread_on_linked_worktree_selects_sibling_thread(cx: &mut
Some("Ochre Drift Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
None,
+ None,
&worktree_project,
cx,
);
@@ -7567,6 +7724,7 @@ async fn test_archive_thread_on_linked_worktree_selects_sibling_thread(cx: &mut
Some("Main Project Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&main_project,
cx,
);
@@ -8034,6 +8192,7 @@ async fn test_legacy_thread_with_canonical_path_opens_main_repo_workspace(cx: &m
title: Some("Legacy Main Thread".into()),
updated_at: chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[PathBuf::from(
"/project",
)])),
@@ -8492,6 +8651,7 @@ async fn test_non_archive_thread_paths_migrate_on_worktree_add_and_remove(cx: &m
Some("Historical 1".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -8500,6 +8660,7 @@ async fn test_non_archive_thread_paths_migrate_on_worktree_add_and_remove(cx: &m
Some("Historical 2".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 1).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -8667,6 +8828,7 @@ async fn test_worktree_add_only_regroups_threads_for_changed_workspace(cx: &mut
Some("Main Thread".into()),
time_main,
Some(time_main),
+ None,
&main_project,
cx,
);
@@ -8675,6 +8837,7 @@ async fn test_worktree_add_only_regroups_threads_for_changed_workspace(cx: &mut
Some("Worktree Thread".into()),
time_wt,
Some(time_wt),
+ None,
&worktree_project,
cx,
);
@@ -9018,6 +9181,7 @@ mod property_test {
title: Some(title),
updated_at,
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_path_lists(main_worktree_paths, path_list).unwrap(),
archived: false,
remote_connection: None,
@@ -9072,7 +9236,15 @@ mod property_test {
chrono::TimeZone::with_ymd_and_hms(&chrono::Utc, 2024, 1, 1, 0, 0, 0)
.unwrap()
+ chrono::Duration::seconds(state.thread_counter as i64);
- save_thread_metadata(session_id, Some(title), updated_at, None, &project, cx);
+ save_thread_metadata(
+ session_id,
+ Some(title),
+ updated_at,
+ None,
+ None,
+ &project,
+ cx,
+ );
}
}
Operation::SaveWorktreeThread { worktree_index } => {
@@ -9891,6 +10063,7 @@ async fn test_remote_project_integration_does_not_briefly_render_as_separate_pro
Some("Main Thread".into()),
chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
None,
+ None,
&project,
cx,
);
@@ -9916,6 +10089,7 @@ async fn test_remote_project_integration_does_not_briefly_render_as_separate_pro
title: Some("Worktree Thread".into()),
updated_at: chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 1).unwrap(),
created_at: None,
+ interacted_at: None,
worktree_paths: WorktreePaths::from_path_lists(
main_worktree_paths,
PathList::new(&[PathBuf::from("/project-wt-1")]),
@@ -10100,7 +10274,7 @@ async fn test_archive_removes_worktree_even_when_workspace_paths_diverge(cx: &mu
.await;
fs.insert_tree(
- "/wt-feature-a",
+ "/worktrees/project/feature-a/project",
serde_json::json!({
".git": "gitdir: /project/.git/worktrees/feature-a",
"src": {
@@ -10114,7 +10288,7 @@ async fn test_archive_removes_worktree_even_when_workspace_paths_diverge(cx: &mu
Path::new("/project/.git"),
false,
git::repository::Worktree {
- path: PathBuf::from("/wt-feature-a"),
+ path: PathBuf::from("/worktrees/project/feature-a/project"),
ref_name: Some("refs/heads/feature-a".into()),
sha: "abc".into(),
is_main: false,
@@ -10126,7 +10300,12 @@ async fn test_archive_removes_worktree_even_when_workspace_paths_diverge(cx: &mu
cx.update(|cx| <dyn fs::Fs>::set_global(fs.clone(), cx));
let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await;
- let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await;
+ let worktree_project = project::Project::test(
+ fs.clone(),
+ ["/worktrees/project/feature-a/project".as_ref()],
+ cx,
+ )
+ .await;
main_project
.update(cx, |p, cx| p.git_scans_complete(cx))