Detailed changes
@@ -5886,9 +5886,9 @@ mod tests {
// MultiWorkspace should still have exactly one workspace (no worktree created).
multi_workspace
- .read_with(cx, |multi_workspace, _cx| {
+ .read_with(cx, |multi_workspace, cx| {
assert_eq!(
- multi_workspace.workspaces().count(),
+ multi_workspace.workspaces(cx).len(),
1,
"LocalProject should not create a new workspace"
);
@@ -6430,14 +6430,15 @@ mod tests {
.read_with(cx, |multi_workspace, cx| {
// There should be more than one workspace now (the original + the new worktree).
assert!(
- multi_workspace.workspaces().count() > 1,
+ multi_workspace.workspaces(cx).len() > 1,
"expected a new workspace to have been created, found {}",
- multi_workspace.workspaces().count(),
+ multi_workspace.workspaces(cx).len(),
);
// Check the newest workspace's panel for the correct agent.
let new_workspace = multi_workspace
- .workspaces()
+ .workspaces(cx)
+ .into_iter()
.find(|ws| ws.entity_id() != workspace.entity_id())
.expect("should find the new workspace");
let new_panel = new_workspace
@@ -7342,7 +7343,7 @@ mod tests {
"workspace project should still be remote, not local"
);
assert_eq!(
- multi_workspace.workspaces().count(),
+ multi_workspace.workspaces(cx).len(),
1,
"existing remote workspace should be reused, not a new one created"
);
@@ -401,7 +401,7 @@ fn resolve_agent_connection_stores(
let mut stores = Vec::new();
let mut included_local_store = false;
- for workspace in multi_workspace.read(cx).workspaces() {
+ for workspace in multi_workspace.read(cx).workspaces(cx) {
let workspace = workspace.read(cx);
let project = workspace.project().read(cx);
@@ -701,7 +701,7 @@ pub fn all_open_workspaces(cx: &App) -> Vec<Entity<Workspace>> {
.flat_map(|multi_workspace| {
multi_workspace
.read(cx)
- .map(|multi_workspace| multi_workspace.workspaces().cloned().collect::<Vec<_>>())
+ .map(|multi_workspace| multi_workspace.workspaces(cx))
.unwrap_or_default()
})
.collect()
@@ -370,7 +370,8 @@ impl ThreadsArchiveView {
.and_then(|mw| mw.upgrade())
.map(|mw| {
mw.read(cx)
- .workspaces()
+ .workspaces(cx)
+ .iter()
.filter_map(|ws| ws.read(cx).database_id())
.collect()
})
@@ -380,7 +380,7 @@ pub fn init(cx: &mut App) {
multi_workspace
.update(cx, |multi_workspace, window, cx| {
let window_project_groups: Vec<ProjectGroupKey> =
- multi_workspace.project_group_keys().cloned().collect();
+ multi_workspace.project_group_keys(cx);
let workspace = multi_workspace.workspace().clone();
workspace.update(cx, |workspace, cx| {
@@ -1966,9 +1966,9 @@ impl RecentProjectsDelegate {
if let Some(group) = multi_workspace
.project_groups()
.iter()
- .find(|g| g.key == key_for_remove)
+ .find(|g| g.read(cx).key == key_for_remove)
{
- let group_id = group.id;
+ let group_id = group.read(cx).id;
multi_workspace
.remove_project_group(group_id, window, cx)
.detach_and_log_err(cx);
@@ -3754,7 +3754,8 @@ fn all_projects(
.into_iter()
.flat_map(|multi_workspace| {
multi_workspace
- .workspaces()
+ .workspaces(cx)
+ .into_iter()
.map(|workspace| workspace.read(cx).project().clone())
.collect::<Vec<_>>()
}),
@@ -97,10 +97,6 @@ struct SerializedSidebar {
#[serde(default)]
width: Option<f32>,
#[serde(default)]
- collapsed_groups: Vec<ProjectGroupId>,
- #[serde(default)]
- expanded_groups: Vec<(ProjectGroupId, usize)>,
- #[serde(default)]
active_view: SerializedSidebarView,
}
@@ -241,8 +237,7 @@ impl ThreadEntry {
#[derive(Clone)]
enum ListEntry {
ProjectHeader {
- group_id: ProjectGroupId,
- key: ProjectGroupKey,
+ group: Entity<ProjectGroup>,
label: SharedString,
highlight_positions: Vec<usize>,
has_running_threads: bool,
@@ -252,15 +247,14 @@ enum ListEntry {
},
Thread(ThreadEntry),
ViewMore {
- group_id: ProjectGroupId,
- key: ProjectGroupKey,
+ group: Entity<ProjectGroup>,
is_fully_expanded: bool,
},
DraftThread {
/// `None` for placeholder entries in empty groups with no open
/// workspace. `Some` for drafts backed by an AgentPanel.
draft_id: Option<DraftId>,
- key: project::ProjectGroupKey,
+ group: Entity<ProjectGroup>,
workspace: Option<Entity<Workspace>>,
worktrees: Vec<WorktreeInfo>,
},
@@ -277,8 +271,8 @@ impl ListEntry {
fn reachable_workspaces<'a>(
&'a self,
- multi_workspace: &'a workspace::MultiWorkspace,
- _cx: &'a App,
+ _multi_workspace: &'a workspace::MultiWorkspace,
+ cx: &'a App,
) -> Vec<Entity<Workspace>> {
match self {
ListEntry::Thread(thread) => match &thread.workspace {
@@ -286,10 +280,12 @@ impl ListEntry {
ThreadEntryWorkspace::Closed { .. } => Vec::new(),
},
ListEntry::DraftThread { workspace, .. } => workspace.iter().cloned().collect(),
- ListEntry::ProjectHeader { group_id, .. } => multi_workspace
- .workspaces_for_project_group(*group_id)
- .map(|ws| ws.to_vec())
- .unwrap_or_default(),
+ ListEntry::ProjectHeader { group, .. } => {
+ let group_id = group.read(cx).id;
+ _multi_workspace
+ .workspaces_for_project_group(group_id, cx)
+ .unwrap_or_default()
+ }
ListEntry::ViewMore { .. } => Vec::new(),
}
}
@@ -457,8 +453,7 @@ pub struct Sidebar {
/// Tracks which sidebar entry is currently active (highlighted).
active_entry: Option<ActiveEntry>,
hovered_thread_index: Option<usize>,
- collapsed_groups: HashSet<ProjectGroupId>,
- expanded_groups: HashMap<ProjectGroupId, usize>,
+
/// Updated only in response to explicit user actions (clicking a
/// thread, confirming in the thread switcher, etc.) — never from
/// background data changes. Used to sort the thread switcher popup.
@@ -533,7 +528,7 @@ impl Sidebar {
})
.detach();
- let workspaces: Vec<_> = multi_workspace.read(cx).workspaces().cloned().collect();
+ let workspaces = multi_workspace.read(cx).workspaces(cx);
cx.defer_in(window, move |this, window, cx| {
for workspace in &workspaces {
this.subscribe_to_workspace(workspace, window, cx);
@@ -551,8 +546,7 @@ impl Sidebar {
selection: None,
active_entry: None,
hovered_thread_index: None,
- collapsed_groups: HashSet::new(),
- expanded_groups: HashMap::new(),
+
thread_last_accessed: HashMap::new(),
thread_last_message_sent_or_queued: HashMap::new(),
thread_switcher: None,
@@ -824,7 +818,7 @@ impl Sidebar {
return;
};
let mw = multi_workspace.read(cx);
- let workspaces: Vec<_> = mw.workspaces().cloned().collect();
+ let workspaces = mw.workspaces(cx);
let active_workspace = Some(mw.workspace().clone());
let agent_server_store = workspaces
@@ -902,11 +896,11 @@ impl Sidebar {
(icon, icon_from_external_svg)
};
- let groups: Vec<ProjectGroup> = mw.project_groups().to_vec();
+ let groups = mw.project_groups().to_vec();
let mut all_paths: Vec<PathBuf> = groups
.iter()
- .flat_map(|g| g.key.path_list().paths().iter().cloned())
+ .flat_map(|g| g.read(cx).key.path_list().paths().iter().cloned())
.collect();
all_paths.sort();
all_paths.dedup();
@@ -918,9 +912,10 @@ impl Sidebar {
all_paths.into_iter().zip(path_details).collect();
for group in &groups {
+ let group_entity = group.clone();
+ let group = group.read(cx);
let group_key = &group.key;
let group_workspaces = &group.workspaces;
- let group_id = group.id;
if group_key.path_list().paths().is_empty() {
continue;
@@ -928,7 +923,7 @@ impl Sidebar {
let label = group_key.display_name(&path_detail_map);
- let is_collapsed = self.collapsed_groups.contains(&group_id);
+ let is_collapsed = !group.expanded;
let should_load_threads = !is_collapsed || !query.is_empty();
let is_active = active_workspace
@@ -1169,8 +1164,7 @@ impl Sidebar {
project_header_indices.push(entries.len());
entries.push(ListEntry::ProjectHeader {
- group_id,
- key: group_key.clone(),
+ group: group_entity.clone(),
label,
highlight_positions: workspace_highlight_positions,
has_running_threads,
@@ -1186,8 +1180,7 @@ impl Sidebar {
} else {
project_header_indices.push(entries.len());
entries.push(ListEntry::ProjectHeader {
- group_id,
- key: group_key.clone(),
+ group: group_entity.clone(),
label,
highlight_positions: Vec::new(),
has_running_threads,
@@ -1219,7 +1212,7 @@ impl Sidebar {
if !has_threads && group_draft_ids.is_empty() {
entries.push(ListEntry::DraftThread {
draft_id: None,
- key: group_key.clone(),
+ group: group_entity.clone(),
workspace: group_workspaces.first().cloned(),
worktrees: Vec::new(),
});
@@ -1232,7 +1225,7 @@ impl Sidebar {
let worktrees = worktree_info_from_thread_paths(&ws_worktree_paths);
entries.push(ListEntry::DraftThread {
draft_id: Some(*draft_id),
- key: group_key.clone(),
+ group: group_entity.clone(),
workspace: Some(ws.clone()),
worktrees,
});
@@ -1242,7 +1235,7 @@ impl Sidebar {
let total = threads.len();
- let extra_batches = self.expanded_groups.get(&group_id).copied().unwrap_or(0);
+ let extra_batches = group.visible_thread_count.unwrap_or(0);
let threads_to_show =
DEFAULT_THREADS_SHOWN + (extra_batches * DEFAULT_THREADS_SHOWN);
let count = threads_to_show.min(total);
@@ -1281,8 +1274,7 @@ impl Sidebar {
if total > DEFAULT_THREADS_SHOWN {
entries.push(ListEntry::ViewMore {
- group_id,
- key: group_key.clone(),
+ group: group_entity.clone(),
is_fully_expanded,
});
}
@@ -1370,45 +1362,49 @@ impl Sidebar {
let rendered = match entry {
ListEntry::ProjectHeader {
- group_id,
- key,
+ group,
label,
highlight_positions,
has_running_threads,
waiting_thread_count,
is_active: is_active_group,
has_threads,
- } => self.render_project_header(
- ix,
- false,
- *group_id,
- key,
- label,
- highlight_positions,
- *has_running_threads,
- *waiting_thread_count,
- *is_active_group,
- is_selected,
- *has_threads,
- cx,
- ),
+ } => {
+ let group_id = group.read(cx).id;
+ let key = group.read(cx).key.clone();
+ self.render_project_header(
+ ix,
+ false,
+ group_id,
+ &key,
+ label,
+ highlight_positions,
+ *has_running_threads,
+ *waiting_thread_count,
+ *is_active_group,
+ is_selected,
+ *has_threads,
+ group.clone(),
+ cx,
+ )
+ }
ListEntry::Thread(thread) => self.render_thread(ix, thread, is_active, is_selected, cx),
ListEntry::ViewMore {
- group_id,
- key,
+ group,
is_fully_expanded,
- } => self.render_view_more(ix, *group_id, key, *is_fully_expanded, is_selected, cx),
+ } => self.render_view_more(ix, group.clone(), *is_fully_expanded, is_selected, cx),
ListEntry::DraftThread {
draft_id,
- key,
+ group,
workspace,
worktrees,
} => {
+ let key = group.read(cx).key.clone();
let group_has_threads = self
.contents
.entries
.iter()
- .any(|e| matches!(e, ListEntry::ProjectHeader { key: hk, has_threads: true, .. } if hk == key));
+ .any(|e| matches!(e, ListEntry::ProjectHeader { has_threads: true, group: g, .. } if g.read(cx).key == key));
// Count drafts in the AgentPanel for this group's workspaces.
let sibling_draft_count = workspace
.as_ref()
@@ -1419,7 +1415,7 @@ impl Sidebar {
self.render_draft_thread(
ix,
*draft_id,
- key,
+ &key,
workspace.as_ref(),
is_active,
worktrees,
@@ -1479,6 +1475,7 @@ impl Sidebar {
is_active: bool,
is_focused: bool,
has_threads: bool,
+ group_entity: Entity<ProjectGroup>,
cx: &mut Context<Self>,
) -> AnyElement {
let host = key.host();
@@ -1488,16 +1485,16 @@ impl Sidebar {
let disclosure_id = SharedString::from(format!("disclosure-{ix}"));
let group_name = SharedString::from(format!("{id_prefix}header-group-{ix}"));
- let is_collapsed = self.collapsed_groups.contains(&group_id);
+ let is_collapsed = !group_entity.read(cx).expanded;
let (disclosure_icon, disclosure_tooltip) = if is_collapsed {
(IconName::ChevronRight, "Expand Project")
} else {
(IconName::ChevronDown, "Collapse Project")
};
- let key_for_toggle = group_id;
- let key_for_collapse = group_id;
- let view_more_expanded = self.expanded_groups.contains_key(&group_id);
+ let group_entity_for_toggle = group_entity.clone();
+ let group_entity_for_collapse = group_entity.clone();
+ let view_more_expanded = group_entity.read(cx).visible_thread_count.is_some();
let label = if highlight_positions.is_empty() {
Label::new(label.clone())
@@ -1562,9 +1559,12 @@ impl Sidebar {
.icon_size(IconSize::Small)
.icon_color(Color::Custom(cx.theme().colors().icon_muted.opacity(0.5)))
.tooltip(Tooltip::text(disclosure_tooltip))
- .on_click(cx.listener(move |this, _, window, cx| {
- this.selection = None;
- this.toggle_collapse(key_for_toggle, window, cx);
+ .on_click(cx.listener({
+ let group_entity = group_entity_for_toggle;
+ move |this, _, window, cx| {
+ this.selection = None;
+ this.toggle_collapse(group_entity.clone(), window, cx);
+ }
})),
)
.child(label)
@@ -1625,14 +1625,15 @@ impl Sidebar {
)
.icon_size(IconSize::Small)
.tooltip(Tooltip::text("Collapse Displayed Threads"))
- .on_click(cx.listener(
+ .on_click(cx.listener({
+ let group_entity = group_entity_for_collapse.clone();
move |this, _, _window, cx| {
this.selection = None;
- this.expanded_groups.remove(&key_for_collapse);
+ group_entity.update(cx, |g, _| g.visible_thread_count = None);
this.serialize(cx);
this.update_entries(cx);
- },
- )),
+ }
+ })),
)
})
.child({
@@ -1654,10 +1655,9 @@ impl Sidebar {
cx,
)
})
- .on_click(cx.listener({
- let key = key.clone();
+ .on_click(cx.listener(
move |this, _, window, cx| {
- this.collapsed_groups.remove(&group_id);
+ group_entity.update(cx, |g, _| g.expanded = true);
this.selection = None;
// If the active workspace belongs to this
// group, use it (preserves linked worktree
@@ -1681,8 +1681,8 @@ impl Sidebar {
} else {
this.open_workspace_and_create_draft(&key, window, cx);
}
- }
- }))
+ },
+ ))
}),
)
.map(|this| {
@@ -1859,8 +1859,7 @@ impl Sidebar {
}
let ListEntry::ProjectHeader {
- group_id,
- key,
+ group,
label,
highlight_positions,
has_running_threads,
@@ -1872,14 +1871,17 @@ impl Sidebar {
return None;
};
+ let group_id = group.read(cx).id;
+ let key = group.read(cx).key.clone();
+
let is_focused = self.focus_handle.is_focused(window);
let is_selected = is_focused && self.selection == Some(header_idx);
let header_element = self.render_project_header(
header_idx,
true,
- *group_id,
- key,
+ group_id,
+ &key,
&label,
&highlight_positions,
*has_running_threads,
@@ -1887,6 +1889,7 @@ impl Sidebar {
*is_active,
*has_threads,
is_selected,
+ group.clone(),
cx,
);
@@ -1926,15 +1929,11 @@ impl Sidebar {
fn toggle_collapse(
&mut self,
- group_id: ProjectGroupId,
+ group: Entity<ProjectGroup>,
_window: &mut Window,
cx: &mut Context<Self>,
) {
- if self.collapsed_groups.contains(&group_id) {
- self.collapsed_groups.remove(&group_id);
- } else {
- self.collapsed_groups.insert(group_id);
- }
+ group.update(cx, |g, _| g.expanded = !g.expanded);
self.serialize(cx);
self.update_entries(cx);
}
@@ -2106,8 +2105,9 @@ impl Sidebar {
};
match entry {
- ListEntry::ProjectHeader { group_id, .. } => {
- self.toggle_collapse(*group_id, window, cx);
+ ListEntry::ProjectHeader { group, .. } => {
+ let group = group.clone();
+ self.toggle_collapse(group, window, cx);
}
ListEntry::Thread(thread) => {
let metadata = thread.metadata.clone();
@@ -2133,24 +2133,25 @@ impl Sidebar {
}
}
ListEntry::ViewMore {
- group_id,
+ group,
is_fully_expanded,
..
} => {
+ let group = group.clone();
if *is_fully_expanded {
- self.reset_thread_group_expansion(*group_id, cx);
+ self.reset_thread_group_expansion(group, cx);
} else {
- self.expand_thread_group(*group_id, cx);
+ self.expand_thread_group(group, cx);
}
}
ListEntry::DraftThread {
draft_id,
- key,
+ group,
workspace,
..
} => {
let draft_id = *draft_id;
- let key = key.clone();
+ let key = group.read(cx).key.clone();
let workspace = workspace.clone();
if let Some(draft_id) = draft_id {
if let Some(workspace) = workspace {
@@ -2179,9 +2180,9 @@ impl Sidebar {
.find_map(|window| {
let workspace = window.read(cx).ok().and_then(|multi_workspace| {
multi_workspace
- .workspaces()
+ .workspaces(cx)
+ .into_iter()
.find(|workspace| predicate(workspace, cx))
- .cloned()
})?;
Some((window, workspace))
})
@@ -2195,9 +2196,9 @@ impl Sidebar {
self.multi_workspace.upgrade().and_then(|multi_workspace| {
multi_workspace
.read(cx)
- .workspaces()
+ .workspaces(cx)
+ .into_iter()
.find(|workspace| predicate(workspace, cx))
- .cloned()
})
}
@@ -2583,9 +2584,9 @@ impl Sidebar {
let Some(ix) = self.selection else { return };
match self.contents.entries.get(ix) {
- Some(ListEntry::ProjectHeader { group_id, .. }) => {
- if self.collapsed_groups.contains(group_id) {
- self.collapsed_groups.remove(group_id);
+ Some(ListEntry::ProjectHeader { group, .. }) => {
+ if !group.read(cx).expanded {
+ group.update(cx, |g, _| g.expanded = true);
self.update_entries(cx);
} else if ix + 1 < self.contents.entries.len() {
self.selection = Some(ix + 1);
@@ -2606,9 +2607,9 @@ impl Sidebar {
let Some(ix) = self.selection else { return };
match self.contents.entries.get(ix) {
- Some(ListEntry::ProjectHeader { group_id, .. }) => {
- if !self.collapsed_groups.contains(group_id) {
- self.collapsed_groups.insert(*group_id);
+ Some(ListEntry::ProjectHeader { group, .. }) => {
+ if group.read(cx).expanded {
+ group.update(cx, |g, _| g.expanded = false);
self.update_entries(cx);
}
}
@@ -2616,11 +2617,11 @@ impl Sidebar {
ListEntry::Thread(_) | ListEntry::ViewMore { .. } | ListEntry::DraftThread { .. },
) => {
for i in (0..ix).rev() {
- if let Some(ListEntry::ProjectHeader { group_id, .. }) =
+ if let Some(ListEntry::ProjectHeader { group, .. }) =
self.contents.entries.get(i)
{
self.selection = Some(i);
- self.collapsed_groups.insert(*group_id);
+ group.update(cx, |g, _| g.expanded = false);
self.update_entries(cx);
break;
}
@@ -2653,14 +2654,14 @@ impl Sidebar {
};
if let Some(header_ix) = header_ix {
- if let Some(ListEntry::ProjectHeader { group_id, .. }) =
+ if let Some(ListEntry::ProjectHeader { group, .. }) =
self.contents.entries.get(header_ix)
{
- if self.collapsed_groups.contains(group_id) {
- self.collapsed_groups.remove(group_id);
+ if !group.read(cx).expanded {
+ group.update(cx, |g, _| g.expanded = true);
} else {
self.selection = Some(header_ix);
- self.collapsed_groups.insert(*group_id);
+ group.update(cx, |g, _| g.expanded = false);
}
self.update_entries(cx);
}
@@ -2674,8 +2675,8 @@ impl Sidebar {
cx: &mut Context<Self>,
) {
for entry in &self.contents.entries {
- if let ListEntry::ProjectHeader { group_id, .. } = entry {
- self.collapsed_groups.insert(*group_id);
+ if let ListEntry::ProjectHeader { group, .. } = entry {
+ group.update(cx, |g, _| g.expanded = false);
}
}
self.update_entries(cx);
@@ -2687,7 +2688,11 @@ impl Sidebar {
_window: &mut Window,
cx: &mut Context<Self>,
) {
- self.collapsed_groups.clear();
+ for entry in &self.contents.entries {
+ if let ListEntry::ProjectHeader { group, .. } = entry {
+ group.update(cx, |g, _| g.expanded = true);
+ }
+ }
self.update_entries(cx);
}
@@ -2696,7 +2701,7 @@ impl Sidebar {
return;
};
- let workspaces: Vec<_> = multi_workspace.read(cx).workspaces().cloned().collect();
+ let workspaces = multi_workspace.read(cx).workspaces(cx);
for workspace in workspaces {
if let Some(agent_panel) = workspace.read(cx).panel::<AgentPanel>(cx) {
let cancelled =
@@ -2731,13 +2736,7 @@ impl Sidebar {
let mut workspaces = self
.multi_workspace
.upgrade()
- .map(|multi_workspace| {
- multi_workspace
- .read(cx)
- .workspaces()
- .cloned()
- .collect::<Vec<_>>()
- })
+ .map(|multi_workspace| multi_workspace.read(cx).workspaces(cx))
.unwrap_or_default();
for workspace in thread_worktree_archive::all_open_workspaces(cx) {
if !workspaces.contains(&workspace) {
@@ -3138,9 +3137,9 @@ impl Sidebar {
.entries
.iter()
.filter_map(|entry| match entry {
- ListEntry::ProjectHeader { label, key, .. } => {
+ ListEntry::ProjectHeader { label, group, .. } => {
current_header_label = Some(label.clone());
- current_header_key = Some(key.clone());
+ current_header_key = Some(group.read(cx).key.clone());
None
}
ListEntry::Thread(thread) => {
@@ -3578,7 +3577,7 @@ impl Sidebar {
let window_project_groups: Vec<ProjectGroupKey> = multi_workspace
.as_ref()
- .map(|mw| mw.read(cx).project_group_keys().cloned().collect())
+ .map(|mw| mw.read(cx).project_group_keys(cx))
.unwrap_or_default();
let popover_handle = self.recent_projects_popover_handle.clone();
@@ -3620,8 +3619,7 @@ impl Sidebar {
fn render_view_more(
&self,
ix: usize,
- group_id: ProjectGroupId,
- _key: &ProjectGroupKey,
+ group: Entity<ProjectGroup>,
is_fully_expanded: bool,
is_selected: bool,
cx: &mut Context<Self>,
@@ -3641,9 +3639,9 @@ impl Sidebar {
.on_click(cx.listener(move |this, _, _window, cx| {
this.selection = None;
if is_fully_expanded {
- this.reset_thread_group_expansion(group_id, cx);
+ this.reset_thread_group_expansion(group.clone(), cx);
} else {
- this.expand_thread_group(group_id, cx);
+ this.expand_thread_group(group.clone(), cx);
}
}))
.into_any_element()
@@ -3851,18 +3849,6 @@ impl Sidebar {
Some(multi_workspace.project_group_key_for_workspace(multi_workspace.workspace(), cx))
}
- fn active_project_group_id(&self, cx: &App) -> Option<ProjectGroupId> {
- let multi_workspace = self.multi_workspace.upgrade()?;
- let multi_workspace = multi_workspace.read(cx);
- let active_key =
- multi_workspace.project_group_key_for_workspace(multi_workspace.workspace(), cx);
- multi_workspace
- .project_groups()
- .iter()
- .find(|g| g.key == active_key)
- .map(|g| g.id)
- }
-
fn active_project_header_position(&self, cx: &App) -> Option<usize> {
let active_key = self.active_project_group_key(cx)?;
self.contents
@@ -3871,7 +3857,7 @@ impl Sidebar {
.position(|&entry_ix| {
matches!(
&self.contents.entries[entry_ix],
- ListEntry::ProjectHeader { key, .. } if *key == active_key
+ ListEntry::ProjectHeader { group, .. } if group.read(cx).key == active_key
)
})
}
@@ -3900,16 +3886,15 @@ impl Sidebar {
};
let header_entry_ix = self.contents.project_header_indices[next_pos];
- let Some(ListEntry::ProjectHeader { group_id, key, .. }) =
+ let Some(ListEntry::ProjectHeader { group, .. }) =
self.contents.entries.get(header_entry_ix)
else {
return;
};
- let group_id = *group_id;
- let key = key.clone();
+ let key = group.read(cx).key.clone();
// Uncollapse the target group so that threads become visible.
- self.collapsed_groups.remove(&group_id);
+ group.update(cx, |g, _| g.expanded = true);
if let Some(workspace) = self.multi_workspace.upgrade().and_then(|mw| {
mw.read(cx)
@@ -4012,43 +3997,66 @@ impl Sidebar {
self.cycle_thread_impl(false, window, cx);
}
- fn expand_thread_group(&mut self, group_id: ProjectGroupId, cx: &mut Context<Self>) {
- let current = self.expanded_groups.get(&group_id).copied().unwrap_or(0);
- self.expanded_groups.insert(group_id, current + 1);
+ fn expand_thread_group(&mut self, group: Entity<ProjectGroup>, cx: &mut Context<Self>) {
+ group.update(cx, |g, _| {
+ let current = g.visible_thread_count.unwrap_or(0);
+ g.visible_thread_count = Some(current + 1);
+ });
self.serialize(cx);
self.update_entries(cx);
}
- fn reset_thread_group_expansion(&mut self, group_id: ProjectGroupId, cx: &mut Context<Self>) {
- self.expanded_groups.remove(&group_id);
+ fn reset_thread_group_expansion(
+ &mut self,
+ group: Entity<ProjectGroup>,
+ cx: &mut Context<Self>,
+ ) {
+ group.update(cx, |g, _| g.visible_thread_count = None);
self.serialize(cx);
self.update_entries(cx);
}
- fn collapse_thread_group(&mut self, group_id: ProjectGroupId, cx: &mut Context<Self>) {
- match self.expanded_groups.get(&group_id).copied() {
+ fn collapse_thread_group(&mut self, group: Entity<ProjectGroup>, cx: &mut Context<Self>) {
+ let should_update = group.update(cx, |g, _| match g.visible_thread_count {
Some(batches) if batches > 1 => {
- self.expanded_groups.insert(group_id, batches - 1);
+ g.visible_thread_count = Some(batches - 1);
+ true
}
Some(_) => {
- self.expanded_groups.remove(&group_id);
+ g.visible_thread_count = None;
+ true
}
- None => return,
+ None => false,
+ });
+ if !should_update {
+ return;
}
self.serialize(cx);
self.update_entries(cx);
}
+ fn active_project_group_entity(&self, cx: &App) -> Option<Entity<ProjectGroup>> {
+ let multi_workspace = self.multi_workspace.upgrade()?;
+ let multi_workspace = multi_workspace.read(cx);
+ let active_key =
+ multi_workspace.project_group_key_for_workspace(multi_workspace.workspace(), cx);
+ multi_workspace
+ .project_groups()
+ .iter()
+ .find(|g| g.read(cx).key == active_key)
+ .cloned()
+ }
+
fn on_show_more_threads(
&mut self,
_: &ShowMoreThreads,
_window: &mut Window,
cx: &mut Context<Self>,
) {
- let Some(group_id) = self.active_project_group_id(cx) else {
+ let Some(group) = self.active_project_group_entity(cx) else {
return;
};
- self.expand_thread_group(group_id, cx);
+ self.expand_thread_group(group, cx);
}
fn on_show_fewer_threads(
@@ -4057,10 +4065,10 @@ impl Sidebar {
_window: &mut Window,
cx: &mut Context<Self>,
) {
- let Some(group_id) = self.active_project_group_id(cx) else {
+ let Some(group) = self.active_project_group_entity(cx) else {
return;
};
- self.collapse_thread_group(group_id, cx);
+ self.collapse_thread_group(group, cx);
}
fn on_new_thread(
@@ -4662,12 +4670,6 @@ impl WorkspaceSidebar for Sidebar {
fn serialized_state(&self, _cx: &App) -> Option<String> {
let serialized = SerializedSidebar {
width: Some(f32::from(self.width)),
- collapsed_groups: self.collapsed_groups.iter().copied().collect(),
- expanded_groups: self
- .expanded_groups
- .iter()
- .map(|(id, count)| (*id, *count))
- .collect(),
active_view: match self.view {
SidebarView::ThreadList => SerializedSidebarView::ThreadList,
SidebarView::Archive(_) => SerializedSidebarView::Archive,
@@ -4686,8 +4688,6 @@ impl WorkspaceSidebar for Sidebar {
if let Some(width) = serialized.width {
self.width = px(width).clamp(MIN_WIDTH, MAX_WIDTH);
}
- self.collapsed_groups = serialized.collapsed_groups.into_iter().collect();
- self.expanded_groups = serialized.expanded_groups.into_iter().collect();
if serialized.active_view == SerializedSidebarView::Archive {
cx.defer_in(window, |this, window, cx| {
this.show_archive(window, cx);
@@ -4866,7 +4866,7 @@ pub fn dump_workspace_info(
let multi_workspace = workspace.multi_workspace().and_then(|weak| weak.upgrade());
let workspaces: Vec<gpui::Entity<Workspace>> = match &multi_workspace {
- Some(mw) => mw.read(cx).workspaces().cloned().collect(),
+ Some(mw) => mw.read(cx).workspaces(cx),
None => vec![this_entity.clone()],
};
let active_workspace = multi_workspace
@@ -4876,7 +4876,7 @@ pub fn dump_workspace_info(
writeln!(output, "MultiWorkspace: {} workspace(s)", workspaces.len()).ok();
if let Some(mw) = &multi_workspace {
- let keys: Vec<_> = mw.read(cx).project_group_keys().cloned().collect();
+ let keys = mw.read(cx).project_group_keys(cx);
writeln!(output, "Project group keys ({}):", keys.len()).ok();
for key in keys {
writeln!(output, " - {key:?}").ok();
@@ -8,3 +8,5 @@ cc e5848f13ac6e2dd83819558a65f5836e5196ff6a2dc5289032e28b4cabf2bf83 # shrinks to
cc 83c95196677f617710761c1163124d3eec7db0e99c0c85fb1fd6d26ce5a6fb2a # shrinks to TestSidebarInvariantsArgs = TestSidebarInvariantsArgs { __seed: 17358113519749946733, raw_operations: [225, 16] }
cc 8c6876d3e226c22e3eb57fa40f9b8d8f0cb6b4d21ea15a9af643b028d44693c8 # shrinks to TestSidebarInvariantsArgs = TestSidebarInvariantsArgs { __seed: 8906097005873738186, raw_operations: [153, 112] }
cc 562a84b1c8fad8b9109ade8380a54910fbf558b7d86b5d3785b66b933762fee6 # shrinks to TestSidebarInvariantsArgs = TestSidebarInvariantsArgs { __seed: 7154971574717061752, raw_operations: [201, 208] }
+cc 40a2be420305542abe5787cd164c180f535c114c303a2c3b32d29a0a8e84c850 # shrinks to TestSidebarInvariantsArgs = TestSidebarInvariantsArgs { __seed: 14009161356256274518, raw_operations: [100, 35, 68, 120] }
+cc fce8ac881649c71fcd7f7279c270726a9a7586f85f4bc0e89091a91d13c1e179 # shrinks to TestSidebarInvariantsArgs = TestSidebarInvariantsArgs { __seed: 4523907077365709442, raw_operations: [105, 16] }
@@ -343,15 +343,11 @@ fn visible_entries_as_strings(
match entry {
ListEntry::ProjectHeader {
label,
- group_id,
+ group,
highlight_positions: _,
..
} => {
- let icon = if sidebar.collapsed_groups.contains(group_id) {
- ">"
- } else {
- "v"
- };
+ let icon = if !group.read(_cx).expanded { ">" } else { "v" };
format!("{} [{}]{}", icon, label, selected)
}
ListEntry::Thread(thread) => {
@@ -407,20 +403,9 @@ async fn test_serialization_round_trip(cx: &mut TestAppContext) {
save_n_test_threads(3, &project, cx).await;
- let project_group_key = project.read_with(cx, |project, cx| project.project_group_key(cx));
-
- // Set a custom width, collapse the group, and expand "View More".
- sidebar.update_in(cx, |sidebar, window, cx| {
+ // Set a custom width.
+ sidebar.update_in(cx, |sidebar, _window, cx| {
sidebar.set_width(Some(px(420.0)), cx);
- let group_id = multi_workspace
- .read(cx)
- .project_groups()
- .iter()
- .find(|g| g.key == project_group_key)
- .unwrap()
- .id;
- sidebar.toggle_collapse(group_id, window, cx);
- sidebar.expanded_groups.insert(group_id, 2);
});
cx.run_until_parked();
@@ -438,35 +423,12 @@ async fn test_serialization_round_trip(cx: &mut TestAppContext) {
});
cx.run_until_parked();
- // Assert all serialized fields match.
- let (width1, collapsed1, expanded1) = sidebar.read_with(cx, |s, _| {
- (
- s.width,
- s.collapsed_groups.clone(),
- s.expanded_groups.clone(),
- )
- });
- let (width2, collapsed2, expanded2) = sidebar2.read_with(cx, |s, _| {
- (
- s.width,
- s.collapsed_groups.clone(),
- s.expanded_groups.clone(),
- )
- });
+ // Assert width matches.
+ let width1 = sidebar.read_with(cx, |s, _| s.width);
+ let width2 = sidebar2.read_with(cx, |s, _| s.width);
assert_eq!(width1, width2);
- assert_eq!(collapsed1, collapsed2);
- assert_eq!(expanded1, expanded2);
assert_eq!(width1, px(420.0));
- let group_id = multi_workspace.read_with(cx, |mw, _| {
- mw.project_groups()
- .iter()
- .find(|g| g.key == project_group_key)
- .unwrap()
- .id
- });
- assert!(collapsed1.contains(&group_id));
- assert_eq!(expanded1.get(&group_id), Some(&2));
}
#[gpui::test]
@@ -482,8 +444,6 @@ async fn test_restore_serialized_archive_view_does_not_panic(cx: &mut TestAppCon
let serialized = serde_json::to_string(&SerializedSidebar {
width: Some(400.0),
- collapsed_groups: Vec::new(),
- expanded_groups: Vec::new(),
active_view: SerializedSidebarView::Archive,
})
.expect("serialization should succeed");
@@ -700,12 +660,12 @@ async fn test_view_more_batched_expansion(cx: &mut TestAppContext) {
save_n_test_threads(17, &project, cx).await;
let project_group_key = project.read_with(cx, |project, cx| project.project_group_key(cx));
- let group_id = multi_workspace.read_with(cx, |mw, _| {
+ let group = multi_workspace.read_with(cx, |mw, cx| {
mw.project_groups()
.iter()
- .find(|g| g.key == project_group_key)
+ .find(|g| g.read(cx).key == project_group_key)
.unwrap()
- .id
+ .clone()
});
multi_workspace.update_in(cx, |_, _window, cx| cx.notify());
@@ -731,8 +691,8 @@ async fn test_view_more_batched_expansion(cx: &mut TestAppContext) {
// Expand again by one batch
sidebar.update_in(cx, |s, _window, cx| {
- let current = s.expanded_groups.get(&group_id).copied().unwrap_or(0);
- s.expanded_groups.insert(group_id, current + 1);
+ let current = group.read(cx).visible_thread_count.unwrap_or(0);
+ group.update(cx, |g, _| g.visible_thread_count = Some(current + 1));
s.update_entries(cx);
});
cx.run_until_parked();
@@ -744,8 +704,8 @@ async fn test_view_more_batched_expansion(cx: &mut TestAppContext) {
// Expand one more time - should show all 17 threads with Collapse button
sidebar.update_in(cx, |s, _window, cx| {
- let current = s.expanded_groups.get(&group_id).copied().unwrap_or(0);
- s.expanded_groups.insert(group_id, current + 1);
+ let current = group.read(cx).visible_thread_count.unwrap_or(0);
+ group.update(cx, |g, _| g.visible_thread_count = Some(current + 1));
s.update_entries(cx);
});
cx.run_until_parked();
@@ -758,7 +718,7 @@ async fn test_view_more_batched_expansion(cx: &mut TestAppContext) {
// Click collapse - should go back to showing 5 threads
sidebar.update_in(cx, |s, _window, cx| {
- s.expanded_groups.remove(&group_id);
+ group.update(cx, |g, _| g.visible_thread_count = None);
s.update_entries(cx);
});
cx.run_until_parked();
@@ -779,12 +739,12 @@ async fn test_collapse_and_expand_group(cx: &mut TestAppContext) {
save_n_test_threads(1, &project, cx).await;
let project_group_key = project.read_with(cx, |project, cx| project.project_group_key(cx));
- let group_id = multi_workspace.read_with(cx, |mw, _| {
+ let group = multi_workspace.read_with(cx, |mw, cx| {
mw.project_groups()
.iter()
- .find(|g| g.key == project_group_key)
+ .find(|g| g.read(cx).key == project_group_key)
.unwrap()
- .id
+ .clone()
});
multi_workspace.update_in(cx, |_, _window, cx| cx.notify());
@@ -801,7 +761,7 @@ async fn test_collapse_and_expand_group(cx: &mut TestAppContext) {
// Collapse
sidebar.update_in(cx, |s, window, cx| {
- s.toggle_collapse(group_id, window, cx);
+ s.toggle_collapse(group.clone(), window, cx);
});
cx.run_until_parked();
@@ -815,7 +775,7 @@ async fn test_collapse_and_expand_group(cx: &mut TestAppContext) {
// Expand
sidebar.update_in(cx, |s, window, cx| {
- s.toggle_collapse(group_id, window, cx);
+ s.toggle_collapse(group.clone(), window, cx);
});
cx.run_until_parked();
@@ -840,18 +800,32 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) {
let expanded_path = PathList::new(&[std::path::PathBuf::from("/expanded")]);
let collapsed_path = PathList::new(&[std::path::PathBuf::from("/collapsed")]);
- let expanded_group_id = project::ProjectGroupId::new();
- let collapsed_group_id = project::ProjectGroupId::new();
+ let expanded_group = cx.update(|_window, cx| {
+ cx.new(|_| ProjectGroup {
+ id: project::ProjectGroupId::new(),
+ key: project::ProjectGroupKey::new(None, expanded_path.clone()),
+ workspaces: Vec::new(),
+ expanded: true,
+ visible_thread_count: None,
+ })
+ });
+ let collapsed_group = cx.update(|_window, cx| {
+ cx.new(|_| ProjectGroup {
+ id: project::ProjectGroupId::new(),
+ key: project::ProjectGroupKey::new(None, collapsed_path.clone()),
+ workspaces: Vec::new(),
+ expanded: false,
+ visible_thread_count: None,
+ })
+ });
sidebar.update_in(cx, |s, _window, _cx| {
- s.collapsed_groups.insert(collapsed_group_id);
s.contents
.notified_threads
.insert(acp::SessionId::new(Arc::from("t-5")));
s.contents.entries = vec![
// Expanded project header
ListEntry::ProjectHeader {
- group_id: expanded_group_id,
- key: project::ProjectGroupKey::new(None, expanded_path.clone()),
+ group: expanded_group.clone(),
label: "expanded-project".into(),
highlight_positions: Vec::new(),
has_running_threads: false,
@@ -977,14 +951,12 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) {
}),
// View More entry
ListEntry::ViewMore {
- group_id: expanded_group_id,
- key: project::ProjectGroupKey::new(None, expanded_path.clone()),
+ group: expanded_group.clone(),
is_fully_expanded: false,
},
// Collapsed project header
ListEntry::ProjectHeader {
- group_id: collapsed_group_id,
- key: project::ProjectGroupKey::new(None, collapsed_path.clone()),
+ group: collapsed_group.clone(),
label: "collapsed-project".into(),
highlight_positions: Vec::new(),
has_running_threads: false,
@@ -2243,16 +2215,16 @@ async fn test_click_clears_selection_and_focus_in_restores_it(cx: &mut TestAppCo
// visual feedback during mouse interaction instead.
sidebar.update_in(cx, |sidebar, window, cx| {
sidebar.selection = None;
- let group_id = sidebar
+ let group = sidebar
.contents
.entries
.iter()
.find_map(|e| match e {
- ListEntry::ProjectHeader { group_id, .. } => Some(*group_id),
+ ListEntry::ProjectHeader { group, .. } => Some(group.clone()),
_ => None,
})
.unwrap();
- sidebar.toggle_collapse(group_id, window, cx);
+ sidebar.toggle_collapse(group, window, cx);
});
assert_eq!(sidebar.read_with(cx, |sidebar, _| sidebar.selection), None);
@@ -2501,7 +2473,7 @@ async fn test_focused_thread_tracks_user_intent(cx: &mut TestAppContext) {
// Switching workspaces via the multi_workspace (simulates clicking
// a workspace header) should clear focused_thread.
multi_workspace.update_in(cx, |mw, window, cx| {
- let workspace = mw.workspaces().find(|w| *w == &workspace_b).cloned();
+ let workspace = mw.workspaces(cx).into_iter().find(|w| *w == workspace_b);
if let Some(workspace) = workspace {
mw.activate(workspace, window, cx);
}
@@ -2657,10 +2629,10 @@ async fn test_group_level_folder_add_cascades_to_all_workspaces(cx: &mut TestApp
cx.run_until_parked();
// Both workspaces should be in one group with key [/project-a].
- let group_id = multi_workspace.read_with(cx, |mw, _| {
+ let group_id = multi_workspace.read_with(cx, |mw, cx| {
assert_eq!(mw.project_groups().len(), 1);
- assert_eq!(mw.project_groups()[0].workspaces.len(), 2);
- mw.project_groups()[0].id
+ assert_eq!(mw.project_groups()[0].read(cx).workspaces.len(), 2);
+ mw.project_groups()[0].read(cx).id
});
// Add /project-b via the group-level API.
@@ -2670,10 +2642,19 @@ async fn test_group_level_folder_add_cascades_to_all_workspaces(cx: &mut TestApp
cx.run_until_parked();
// The group key should be updated.
- multi_workspace.read_with(cx, |mw, _| {
+ multi_workspace.read_with(cx, |mw, cx| {
assert_eq!(mw.project_groups().len(), 1, "still one group");
- assert_eq!(mw.project_groups()[0].id, group_id, "same group ID");
- let paths = mw.project_groups()[0].key.path_list().paths().to_vec();
+ assert_eq!(
+ mw.project_groups()[0].read(cx).id,
+ group_id,
+ "same group ID"
+ );
+ let paths = mw.project_groups()[0]
+ .read(cx)
+ .key
+ .path_list()
+ .paths()
+ .to_vec();
assert!(
paths.contains(&PathBuf::from("/project-a"))
&& paths.contains(&PathBuf::from("/project-b")),
@@ -2709,10 +2690,10 @@ async fn test_individual_workspace_folder_change_moves_workspace_to_new_group(
});
cx.run_until_parked();
- multi_workspace.read_with(cx, |mw, _| {
+ multi_workspace.read_with(cx, |mw, cx| {
assert_eq!(mw.project_groups().len(), 1, "one group to start");
assert_eq!(
- mw.project_groups()[0].workspaces.len(),
+ mw.project_groups()[0].read(cx).workspaces.len(),
2,
"two workspaces in it"
);
@@ -2729,12 +2710,12 @@ async fn test_individual_workspace_folder_change_moves_workspace_to_new_group(
// project_a's workspace should have moved to a new group.
// project_b's workspace should stay in the old group, unchanged.
- multi_workspace.read_with(cx, |mw, _| {
+ multi_workspace.read_with(cx, |mw, cx| {
assert_eq!(mw.project_groups().len(), 2, "should now have 2 groups");
let mut group_sizes: Vec<usize> = mw
.project_groups()
.iter()
- .map(|g| g.workspaces.len())
+ .map(|g| g.read(cx).workspaces.len())
.collect();
group_sizes.sort();
assert_eq!(
@@ -2774,7 +2755,7 @@ async fn test_individual_workspace_change_merges_into_existing_group(cx: &mut Te
cx.run_until_parked();
// Should have 2 groups: one [/project-a], one [/project-a, /project-b].
- multi_workspace.read_with(cx, |mw, _| {
+ multi_workspace.read_with(cx, |mw, _cx| {
assert_eq!(mw.project_groups().len(), 2);
});
@@ -2788,14 +2769,14 @@ async fn test_individual_workspace_change_merges_into_existing_group(cx: &mut Te
cx.run_until_parked();
// Both workspaces should now be in one group.
- multi_workspace.read_with(cx, |mw, _| {
+ multi_workspace.read_with(cx, |mw, cx| {
assert_eq!(
mw.project_groups().len(),
1,
"should have merged into 1 group"
);
assert_eq!(
- mw.project_groups()[0].workspaces.len(),
+ mw.project_groups()[0].read(cx).workspaces.len(),
2,
"both workspaces in the merged group"
);
@@ -3045,7 +3026,7 @@ async fn test_cmd_n_shows_new_thread_entry_in_absorbed_worktree(cx: &mut TestApp
// Switch to the worktree workspace.
multi_workspace.update_in(cx, |mw, window, cx| {
- let workspace = mw.workspaces().nth(1).unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().nth(1).unwrap();
mw.activate(workspace, window, cx);
});
@@ -3617,11 +3598,11 @@ async fn test_absorbed_worktree_running_thread_shows_live_status(cx: &mut TestAp
// Switch back to the main workspace before setting up the sidebar.
multi_workspace.update_in(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace, window, cx);
});
- // Start a thread in the worktree workspace's panel and keep it
+ // Start a thread in the worktree workspace's panel
// generating (don't resolve it).
let connection = StubAgentConnection::new();
open_thread_with_connection(&worktree_panel, connection.clone(), cx);
@@ -3709,7 +3690,7 @@ async fn test_absorbed_worktree_completion_triggers_notification(cx: &mut TestAp
let worktree_panel = add_agent_panel(&worktree_workspace, cx);
multi_workspace.update_in(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace, window, cx);
});
@@ -3804,7 +3785,7 @@ async fn test_clicking_worktree_thread_opens_workspace_when_none_exists(cx: &mut
// Only 1 workspace should exist.
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1,
);
@@ -3819,13 +3800,13 @@ async fn test_clicking_worktree_thread_opens_workspace_when_none_exists(cx: &mut
cx.run_until_parked();
// A new workspace should have been created for the worktree path.
- let new_workspace = multi_workspace.read_with(cx, |mw, _| {
+ let new_workspace = multi_workspace.read_with(cx, |mw, cx| {
assert_eq!(
- mw.workspaces().count(),
+ mw.workspaces(cx).len(),
2,
"confirming a worktree thread without a workspace should open one",
);
- mw.workspaces().nth(1).unwrap().clone()
+ mw.workspaces(cx).into_iter().nth(1).unwrap()
});
let new_path_list =
@@ -4029,7 +4010,7 @@ async fn test_clicking_absorbed_worktree_thread_activates_worktree_workspace(
// Activate the main workspace before setting up the sidebar.
let main_workspace = multi_workspace.update_in(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace.clone(), window, cx);
workspace
});
@@ -4102,7 +4083,7 @@ async fn test_activate_archived_thread_with_saved_paths_activates_matching_works
mw.test_add_workspace(project_b.clone(), window, cx)
});
let workspace_a =
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).into_iter().next().unwrap());
// Save a thread with path_list pointing to project-b.
let session_id = acp::SessionId::new(Arc::from("archived-1"));
@@ -4110,7 +4091,7 @@ async fn test_activate_archived_thread_with_saved_paths_activates_matching_works
// Ensure workspace A is active.
multi_workspace.update_in(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace, window, cx);
});
cx.run_until_parked();
@@ -4174,11 +4155,11 @@ async fn test_activate_archived_thread_cwd_fallback_with_matching_workspace(
mw.test_add_workspace(project_b, window, cx)
});
let workspace_a =
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).into_iter().next().unwrap());
// Start with workspace A active.
multi_workspace.update_in(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace, window, cx);
});
cx.run_until_parked();
@@ -4302,7 +4283,7 @@ async fn test_activate_archived_thread_saved_paths_opens_new_workspace(cx: &mut
let session_id = acp::SessionId::new(Arc::from("archived-new-ws"));
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1,
"should start with one workspace"
);
@@ -4326,7 +4307,7 @@ async fn test_activate_archived_thread_saved_paths_opens_new_workspace(cx: &mut
cx.run_until_parked();
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
2,
"should have opened a second workspace for the archived thread's saved paths"
);
@@ -4383,14 +4364,14 @@ async fn test_activate_archived_thread_reuses_workspace_in_another_window(cx: &m
assert_eq!(
multi_workspace_a
- .read_with(cx_a, |mw, _| mw.workspaces().count())
+ .read_with(cx_a, |mw, cx| mw.workspaces(cx).len())
.unwrap(),
1,
"should not add the other window's workspace into the current window"
);
assert_eq!(
multi_workspace_b
- .read_with(cx_a, |mw, _| mw.workspaces().count())
+ .read_with(cx_a, |mw, cx| mw.workspaces(cx).len())
.unwrap(),
1,
"should reuse the existing workspace in the other window"
@@ -4462,14 +4443,14 @@ async fn test_activate_archived_thread_reuses_workspace_in_another_window_with_t
assert_eq!(
multi_workspace_a
- .read_with(cx_a, |mw, _| mw.workspaces().count())
+ .read_with(cx_a, |mw, cx| mw.workspaces(cx).len())
.unwrap(),
1,
"should not add the other window's workspace into the current window"
);
assert_eq!(
multi_workspace_b
- .read_with(cx_a, |mw, _| mw.workspaces().count())
+ .read_with(cx_a, |mw, cx| mw.workspaces(cx).len())
.unwrap(),
1,
"should reuse the existing workspace in the other window"
@@ -4555,14 +4536,14 @@ async fn test_activate_archived_thread_prefers_current_window_for_matching_paths
});
assert_eq!(
multi_workspace_a
- .read_with(cx_a, |mw, _| mw.workspaces().count())
+ .read_with(cx_a, |mw, cx| mw.workspaces(cx).len())
.unwrap(),
1,
"current window should continue reusing its existing workspace"
);
assert_eq!(
multi_workspace_b
- .read_with(cx_a, |mw, _| mw.workspaces().count())
+ .read_with(cx_a, |mw, cx| mw.workspaces(cx).len())
.unwrap(),
1,
"other windows should not be activated just because they also match the saved paths"
@@ -4633,12 +4614,12 @@ async fn test_archive_thread_uses_next_threads_own_workspace(cx: &mut TestAppCon
// Activate main workspace so the sidebar tracks the main panel.
multi_workspace.update_in(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace, window, cx);
});
let main_workspace =
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).into_iter().next().unwrap());
let main_panel = add_agent_panel(&main_workspace, cx);
let _worktree_panel = add_agent_panel(&worktree_workspace, cx);
@@ -4823,7 +4804,7 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
// Should have 2 workspaces.
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
2,
"should start with 2 workspaces (main + linked worktree)"
);
@@ -4843,7 +4824,7 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
// The linked worktree workspace should have been removed.
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1,
"linked worktree workspace should be removed after archiving its last thread"
);
@@ -5646,7 +5627,7 @@ async fn test_archive_last_thread_on_linked_worktree_does_not_create_new_thread_
// Set up both workspaces with agent panels.
let main_workspace =
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).into_iter().next().unwrap());
let _main_panel = add_agent_panel(&main_workspace, cx);
let worktree_panel = add_agent_panel(&worktree_workspace, cx);
@@ -5814,7 +5795,7 @@ async fn test_archive_last_thread_on_linked_worktree_with_no_siblings_creates_dr
});
let main_workspace =
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).into_iter().next().unwrap());
let _main_panel = add_agent_panel(&main_workspace, cx);
let worktree_panel = add_agent_panel(&worktree_workspace, cx);
@@ -5933,7 +5914,7 @@ async fn test_archive_thread_on_linked_worktree_selects_sibling_thread(cx: &mut
});
let main_workspace =
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone());
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).into_iter().next().unwrap());
let _main_panel = add_agent_panel(&main_workspace, cx);
let worktree_panel = add_agent_panel(&worktree_workspace, cx);
@@ -6092,7 +6073,7 @@ async fn test_linked_worktree_workspace_reachable_and_dismissable(cx: &mut TestA
// Switch back to the main workspace.
multi_workspace.update_in(cx, |mw, window, cx| {
- let main_ws = mw.workspaces().next().unwrap().clone();
+ let main_ws = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(main_ws, window, cx);
});
cx.run_until_parked();
@@ -6138,7 +6119,7 @@ async fn test_linked_worktree_workspace_reachable_and_dismissable(cx: &mut TestA
});
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
2
);
@@ -6149,7 +6130,7 @@ async fn test_linked_worktree_workspace_reachable_and_dismissable(cx: &mut TestA
cx.run_until_parked();
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
2,
"dismissing a draft no longer removes the linked worktree workspace"
);
@@ -6306,7 +6287,7 @@ async fn test_transient_workspace_lifecycle(cx: &mut TestAppContext) {
let workspace_a = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
assert!(!multi_workspace.read_with(cx, |mw, _| mw.sidebar_open()));
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1
);
assert!(multi_workspace.read_with(cx, |mw, _| mw.workspace() == &workspace_a));
@@ -6314,7 +6295,7 @@ async fn test_transient_workspace_lifecycle(cx: &mut TestAppContext) {
// Add B — replaces A as the transient workspace.
let workspace_b = add_test_project("/project-b", &fs, &multi_workspace, cx).await;
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1
);
assert!(multi_workspace.read_with(cx, |mw, _| mw.workspace() == &workspace_b));
@@ -6322,7 +6303,7 @@ async fn test_transient_workspace_lifecycle(cx: &mut TestAppContext) {
// Add C — replaces B as the transient workspace.
let workspace_c = add_test_project("/project-c", &fs, &multi_workspace, cx).await;
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1
);
assert!(multi_workspace.read_with(cx, |mw, _| mw.workspace() == &workspace_c));
@@ -6343,7 +6324,7 @@ async fn test_transient_workspace_retained(cx: &mut TestAppContext) {
// Add B — retained since sidebar is open.
let workspace_a = add_test_project("/project-b", &fs, &multi_workspace, cx).await;
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
2
);
@@ -6351,7 +6332,7 @@ async fn test_transient_workspace_retained(cx: &mut TestAppContext) {
multi_workspace.update_in(cx, |mw, window, cx| mw.activate(workspace_a, window, cx));
cx.run_until_parked();
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
2
);
@@ -6359,14 +6340,14 @@ async fn test_transient_workspace_retained(cx: &mut TestAppContext) {
multi_workspace.update_in(cx, |mw, window, cx| mw.close_sidebar(window, cx));
cx.run_until_parked();
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
2
);
// Add C — added as new transient workspace. (switching from retained, to transient)
let workspace_c = add_test_project("/project-c", &fs, &multi_workspace, cx).await;
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
3
);
assert!(multi_workspace.read_with(cx, |mw, _| mw.workspace() == &workspace_c));
@@ -6374,7 +6355,7 @@ async fn test_transient_workspace_retained(cx: &mut TestAppContext) {
// Add D — replaces C as the transient workspace (Have retained and transient workspaces, transient workspace is dropped)
let workspace_d = add_test_project("/project-d", &fs, &multi_workspace, cx).await;
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
3
);
assert!(multi_workspace.read_with(cx, |mw, _| mw.workspace() == &workspace_d));
@@ -6391,7 +6372,7 @@ async fn test_transient_workspace_promotion(cx: &mut TestAppContext) {
// Add B — replaces A as the transient workspace (A is discarded).
let workspace_b = add_test_project("/project-b", &fs, &multi_workspace, cx).await;
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1
);
assert!(multi_workspace.read_with(cx, |mw, _| mw.workspace() == &workspace_b));
@@ -6402,10 +6383,12 @@ async fn test_transient_workspace_promotion(cx: &mut TestAppContext) {
});
cx.run_until_parked();
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1
);
- assert!(multi_workspace.read_with(cx, |mw, _| mw.workspaces().any(|w| w == &workspace_b)));
+ assert!(multi_workspace.read_with(cx, |mw, cx| {
+ mw.workspaces(cx).iter().any(|w| w == &workspace_b)
+ }));
// Close sidebar — the retained B remains.
multi_workspace.update_in(cx, |mw, window, cx| {
@@ -6415,7 +6398,7 @@ async fn test_transient_workspace_promotion(cx: &mut TestAppContext) {
// Add C — added as new transient workspace.
let workspace_c = add_test_project("/project-c", &fs, &multi_workspace, cx).await;
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
2
);
assert!(multi_workspace.read_with(cx, |mw, _| mw.workspace() == &workspace_c));
@@ -6507,7 +6490,7 @@ async fn test_legacy_thread_with_canonical_path_opens_main_repo_workspace(cx: &m
// Verify only 1 workspace before clicking.
assert_eq!(
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).len()),
1,
);
@@ -6660,18 +6643,13 @@ async fn test_linked_worktree_workspace_reachable_after_adding_unrelated_project
// Force a full sidebar rebuild with all groups expanded.
sidebar.update_in(cx, |sidebar, _window, cx| {
- sidebar.collapsed_groups.clear();
- let group_ids: Vec<project::ProjectGroupId> = sidebar
- .contents
- .entries
- .iter()
- .filter_map(|entry| match entry {
- ListEntry::ProjectHeader { group_id, .. } => Some(*group_id),
- _ => None,
- })
- .collect();
- for group_id in group_ids {
- sidebar.expanded_groups.insert(group_id, 10_000);
+ for entry in &sidebar.contents.entries {
+ if let ListEntry::ProjectHeader { group, .. } = entry {
+ group.update(cx, |g, _| {
+ g.expanded = true;
+ g.visible_thread_count = Some(10_000);
+ });
+ }
}
sidebar.update_entries(cx);
});
@@ -6683,7 +6661,8 @@ async fn test_linked_worktree_workspace_reachable_after_adding_unrelated_project
let (all_ids, reachable_ids) = sidebar.read_with(cx, |sidebar, cx| {
let mw = multi_workspace.read(cx);
- let all: HashSet<gpui::EntityId> = mw.workspaces().map(|ws| ws.entity_id()).collect();
+ let all: HashSet<gpui::EntityId> =
+ mw.workspaces(cx).iter().map(|ws| ws.entity_id()).collect();
let reachable: HashSet<gpui::EntityId> = sidebar
.contents
.entries
@@ -6884,7 +6863,8 @@ async fn test_project_header_click_restores_last_viewed(cx: &mut TestAppContext)
// Now switch BACK to project-a by activating its workspace.
let workspace_a = multi_workspace.read_with(cx, |mw, cx| {
- mw.workspaces()
+ mw.workspaces(cx)
+ .into_iter()
.find(|ws| {
ws.read(cx)
.project()
@@ -6898,7 +6878,6 @@ async fn test_project_header_click_restores_last_viewed(cx: &mut TestAppContext)
})
})
.unwrap()
- .clone()
});
multi_workspace.update_in(cx, |mw, window, cx| {
mw.activate(workspace_a.clone(), window, cx);
@@ -7252,10 +7231,9 @@ mod property_test {
let (workspace, project) = multi_workspace.read_with(cx, |mw, cx| {
let group = mw.project_groups().get(project_group_index).unwrap();
let ws = mw
- .workspaces_for_project_group(group.id)
- .and_then(|ws| ws.first())
- .unwrap_or(mw.workspace())
- .clone();
+ .workspaces_for_project_group(group.read(cx).id, cx)
+ .and_then(|ws| ws.into_iter().next())
+ .unwrap_or(mw.workspace().clone());
let project = ws.read(cx).project().clone();
(ws, project)
});
@@ -7377,12 +7355,11 @@ mod property_test {
}
}
Operation::SwitchToProjectGroup { index } => {
- let workspace = multi_workspace.read_with(cx, |mw, _cx| {
+ let workspace = multi_workspace.read_with(cx, |mw, cx| {
let group = mw.project_groups().get(index).unwrap();
- mw.workspaces_for_project_group(group.id)
- .and_then(|ws| ws.first())
- .unwrap_or(mw.workspace())
- .clone()
+ mw.workspaces_for_project_group(group.read(cx).id, cx)
+ .and_then(|ws| ws.into_iter().next())
+ .unwrap_or(mw.workspace().clone())
});
multi_workspace.update_in(cx, |mw, window, cx| {
mw.activate(workspace, window, cx);
@@ -7392,8 +7369,9 @@ mod property_test {
project_group_index,
} => {
// Get the main worktree path from the project group key.
- let main_path = multi_workspace.read_with(cx, |mw, _| {
- let key = mw.project_group_keys().nth(project_group_index).unwrap();
+ let main_path = multi_workspace.read_with(cx, |mw, cx| {
+ let keys = mw.project_group_keys(cx);
+ let key = keys.get(project_group_index).unwrap();
key.path_list()
.paths()
.first()
@@ -7444,12 +7422,11 @@ mod property_test {
.await;
// Re-scan the main workspace's project so it discovers the new worktree.
- let main_workspace = multi_workspace.read_with(cx, |mw, _cx| {
+ let main_workspace = multi_workspace.read_with(cx, |mw, cx| {
let group = mw.project_groups().get(project_group_index).unwrap();
- mw.workspaces_for_project_group(group.id)
- .and_then(|ws| ws.first())
+ mw.workspaces_for_project_group(group.read(cx).id, cx)
+ .and_then(|ws| ws.into_iter().next())
.unwrap()
- .clone()
});
let main_project: Entity<project::Project> =
main_workspace.read_with(cx, |ws, _| ws.project().clone());
@@ -7465,11 +7442,10 @@ mod property_test {
Operation::AddWorktreeToProject {
project_group_index,
} => {
- let workspace = multi_workspace.read_with(cx, |mw, _cx| {
+ let workspace = multi_workspace.read_with(cx, |mw, cx| {
let group = mw.project_groups().get(project_group_index).unwrap();
- mw.workspaces_for_project_group(group.id)
- .and_then(|ws| ws.first())
- .cloned()
+ mw.workspaces_for_project_group(group.read(cx).id, cx)
+ .and_then(|ws| ws.into_iter().next())
});
let Some(workspace) = workspace else { return };
let project: Entity<project::Project> =
@@ -7494,11 +7470,10 @@ mod property_test {
Operation::RemoveWorktreeFromProject {
project_group_index,
} => {
- let workspace = multi_workspace.read_with(cx, |mw, _cx| {
+ let workspace = multi_workspace.read_with(cx, |mw, cx| {
let group = mw.project_groups().get(project_group_index).unwrap();
- mw.workspaces_for_project_group(group.id)
- .and_then(|ws| ws.first())
- .cloned()
+ mw.workspaces_for_project_group(group.read(cx).id, cx)
+ .and_then(|ws| ws.into_iter().next())
});
let Some(workspace) = workspace else { return };
let project: Entity<project::Project> =
@@ -7526,18 +7501,13 @@ mod property_test {
fn update_sidebar(sidebar: &Entity<Sidebar>, cx: &mut gpui::VisualTestContext) {
sidebar.update_in(cx, |sidebar, _window, cx| {
- sidebar.collapsed_groups.clear();
- let group_ids: Vec<project::ProjectGroupId> = sidebar
- .contents
- .entries
- .iter()
- .filter_map(|entry| match entry {
- ListEntry::ProjectHeader { group_id, .. } => Some(*group_id),
- _ => None,
- })
- .collect();
- for group_id in group_ids {
- sidebar.expanded_groups.insert(group_id, 10_000);
+ for entry in &sidebar.contents.entries {
+ if let ListEntry::ProjectHeader { group, .. } = entry {
+ group.update(cx, |g, _| {
+ g.expanded = true;
+ g.visible_thread_count = Some(10_000);
+ });
+ }
}
sidebar.update_entries(cx);
});
@@ -7590,17 +7560,18 @@ mod property_test {
// Every project group key in the multi-workspace that has a
// non-empty path list should appear as a ProjectHeader in the
// sidebar.
- let expected_keys: HashSet<&project::ProjectGroupKey> = mw
- .project_group_keys()
+ let expected_keys: HashSet<project::ProjectGroupKey> = mw
+ .project_group_keys(cx)
+ .into_iter()
.filter(|k| !k.path_list().paths().is_empty())
.collect();
- let sidebar_keys: HashSet<&project::ProjectGroupKey> = sidebar
+ let sidebar_keys: HashSet<project::ProjectGroupKey> = sidebar
.contents
.entries
.iter()
.filter_map(|entry| match entry {
- ListEntry::ProjectHeader { key, .. } => Some(key),
+ ListEntry::ProjectHeader { group, .. } => Some(group.read(cx).key.clone()),
_ => None,
})
.collect();
@@ -7624,11 +7595,7 @@ mod property_test {
let Some(multi_workspace) = sidebar.multi_workspace.upgrade() else {
anyhow::bail!("sidebar should still have an associated multi-workspace");
};
- let workspaces = multi_workspace
- .read(cx)
- .workspaces()
- .cloned()
- .collect::<Vec<_>>();
+ let workspaces = multi_workspace.read(cx).workspaces(cx);
let thread_store = ThreadMetadataStore::global(cx);
let sidebar_thread_ids: HashSet<acp::SessionId> = sidebar
@@ -7653,14 +7620,14 @@ mod property_test {
.push(workspace.clone());
}
- for group_key in mw.project_group_keys() {
+ for group_key in mw.project_group_keys(cx) {
let path_list = group_key.path_list().clone();
if path_list.paths().is_empty() {
continue;
}
let group_workspaces = workspaces_by_group
- .get(group_key)
+ .get(&group_key)
.map(|ws| ws.as_slice())
.unwrap_or_default();
@@ -7827,7 +7794,8 @@ mod property_test {
.collect();
let all_workspace_ids: HashSet<gpui::EntityId> = multi_workspace
- .workspaces()
+ .workspaces(cx)
+ .iter()
.map(|ws| ws.entity_id())
.collect();
@@ -7906,7 +7874,7 @@ mod property_test {
for &raw_op in &raw_operations {
let project_group_count =
- multi_workspace.read_with(cx, |mw, _| mw.project_group_keys().count());
+ multi_workspace.read_with(cx, |mw, cx| mw.project_group_keys(cx).len());
let operation = state.generate_operation(raw_op, project_group_count);
executed.push(format!("{:?}", operation));
perform_operation(operation, &mut state, &multi_workspace, &sidebar, cx).await;
@@ -8163,16 +8131,16 @@ async fn test_remote_project_integration_does_not_briefly_render_as_separate_pro
cx.run_until_parked();
- let new_workspace = multi_workspace.read_with(cx, |mw, _| {
+ let new_workspace = multi_workspace.read_with(cx, |mw, cx| {
assert_eq!(
- mw.workspaces().count(),
+ mw.workspaces(cx).len(),
2,
"confirming a closed remote thread should open a second workspace"
);
- mw.workspaces()
+ mw.workspaces(cx)
+ .into_iter()
.find(|workspace| workspace.entity_id() != mw.workspace().entity_id())
.unwrap()
- .clone()
});
server_fs
@@ -715,7 +715,7 @@ impl TitleBar {
.multi_workspace
.as_ref()
.and_then(|mw| mw.upgrade())
- .map(|mw| mw.read(cx).project_group_keys().cloned().collect())
+ .map(|mw| mw.read(cx).project_group_keys(cx))
.unwrap_or_default();
PopoverMenu::new("recent-projects-menu")
@@ -772,7 +772,7 @@ impl TitleBar {
.multi_workspace
.as_ref()
.and_then(|mw| mw.upgrade())
- .map(|mw| mw.read(cx).project_group_keys().cloned().collect())
+ .map(|mw| mw.read(cx).project_group_keys(cx))
.unwrap_or_default();
PopoverMenu::new("sidebar-title-recent-projects-menu")
@@ -761,7 +761,7 @@ impl VimGlobals {
if let Some(multi_workspace) = window.downcast::<MultiWorkspace>() {
multi_workspace
.update(cx, |multi_workspace, _, cx| {
- for workspace in multi_workspace.workspaces() {
+ for workspace in multi_workspace.workspaces(cx) {
workspace.update(cx, |workspace, cx| {
Vim::update_globals(cx, |globals, cx| {
globals.register_workspace(workspace, cx)
@@ -257,7 +257,6 @@ impl<T: Sidebar> SidebarHandle for Entity<T> {
}
}
-#[derive(Clone)]
pub struct ProjectGroup {
pub id: ProjectGroupId,
pub key: ProjectGroupKey,
@@ -266,9 +265,15 @@ pub struct ProjectGroup {
pub visible_thread_count: Option<usize>,
}
+pub enum ProjectGroupEvent {
+ KeyChanged,
+}
+
+impl EventEmitter<ProjectGroupEvent> for ProjectGroup {}
+
pub struct MultiWorkspace {
window_id: WindowId,
- project_groups: Vec<ProjectGroup>,
+ project_groups: Vec<Entity<ProjectGroup>>,
active_workspace: Entity<Workspace>,
sidebar: Option<Box<dyn SidebarHandle>>,
sidebar_open: bool,
@@ -442,7 +447,7 @@ impl MultiWorkspace {
self.retain_active_workspace(cx);
let sidebar_focus_handle = self.sidebar.as_ref().map(|s| s.focus_handle(cx));
for group in &self.project_groups {
- for workspace in &group.workspaces {
+ for workspace in &group.read(cx).workspaces.clone() {
workspace.update(cx, |workspace, _cx| {
workspace.set_sidebar_focus_handle(sidebar_focus_handle.clone());
});
@@ -455,7 +460,7 @@ impl MultiWorkspace {
pub fn close_sidebar(&mut self, window: &mut Window, cx: &mut Context<Self>) {
self.sidebar_open = false;
for group in &self.project_groups {
- for workspace in &group.workspaces {
+ for workspace in &group.read(cx).workspaces.clone() {
workspace.update(cx, |workspace, _cx| {
workspace.set_sidebar_focus_handle(None);
});
@@ -491,9 +496,8 @@ impl MultiWorkspace {
pub fn close_window(&mut self, _: &CloseWindow, window: &mut Window, cx: &mut Context<Self>) {
cx.spawn_in(window, async move |this, cx| {
- let workspaces = this.update(cx, |multi_workspace, _cx| {
- multi_workspace.workspaces().cloned().collect::<Vec<_>>()
- })?;
+ let workspaces =
+ this.update(cx, |multi_workspace, cx| multi_workspace.workspaces(cx))?;
for workspace in workspaces {
let should_continue = workspace
@@ -556,18 +560,21 @@ impl MultiWorkspace {
}
// Check if the workspace's key already matches its group
- if let Some(group) = self.group_for_workspace(workspace) {
- if group.key == new_key {
+ if let Some(group) = self.group_for_workspace(workspace, cx) {
+ if group.read(cx).key == new_key {
return;
}
}
// Remove the workspace from its current group
- for group in &mut self.project_groups {
- group.workspaces.retain(|w| w != workspace);
+ for group in &self.project_groups {
+ group.update(cx, |g, _| {
+ g.workspaces.retain(|w| w != workspace);
+ });
}
// Clean up empty groups
- self.project_groups.retain(|g| !g.workspaces.is_empty());
+ self.project_groups
+ .retain(|g| !g.read(cx).workspaces.is_empty());
// Add the workspace to the group matching its new key (or create one)
self.ensure_workspace_in_group(workspace.clone(), new_key, cx);
@@ -581,56 +588,121 @@ impl MultiWorkspace {
workspace: &Entity<Workspace>,
cx: &App,
) -> ProjectGroupKey {
- self.group_for_workspace(workspace)
- .map(|g| g.key.clone())
+ self.group_for_workspace(workspace, cx)
+ .map(|g| g.read(cx).key.clone())
.unwrap_or_else(|| workspace.read(cx).project_group_key(cx))
}
- pub fn restore_project_groups(&mut self, groups: Vec<(ProjectGroupId, ProjectGroupKey)>) {
- let mut restored: Vec<ProjectGroup> = Vec::new();
- for (id, key) in groups {
+ pub fn restore_project_groups(
+ &mut self,
+ groups: Vec<(ProjectGroupId, ProjectGroupKey, bool, Option<usize>)>,
+ cx: &mut Context<Self>,
+ ) {
+ let mut restored: Vec<Entity<ProjectGroup>> = Vec::new();
+ for (id, key, expanded, visible_thread_count) in groups {
if key.path_list().paths().is_empty() {
continue;
}
- if restored.iter().any(|g| g.id == id) {
+ if restored.iter().any(|g| g.read(cx).id == id) {
continue;
}
- restored.push(ProjectGroup {
+ let group = cx.new(|_| ProjectGroup {
id,
key,
workspaces: Vec::new(),
- expanded: true,
- visible_thread_count: None,
+ expanded,
+ visible_thread_count,
});
+ self._subscriptions
+ .push(cx.subscribe(&group, Self::handle_project_group_event));
+ restored.push(group);
}
for existing in &self.project_groups {
- if !restored.iter().any(|g| g.id == existing.id) {
+ if !restored
+ .iter()
+ .any(|g| g.read(cx).id == existing.read(cx).id)
+ {
restored.push(existing.clone());
}
}
self.project_groups = restored;
}
- pub fn project_group_keys(&self) -> impl Iterator<Item = &ProjectGroupKey> {
- self.project_groups.iter().map(|g| &g.key)
+ fn handle_project_group_event(
+ &mut self,
+ changed_group: Entity<ProjectGroup>,
+ event: &ProjectGroupEvent,
+ cx: &mut Context<Self>,
+ ) {
+ match event {
+ ProjectGroupEvent::KeyChanged => self.merge_group_if_duplicate(changed_group, cx),
+ }
}
- pub fn project_groups(&self) -> &[ProjectGroup] {
- &self.project_groups
+ fn merge_group_if_duplicate(
+ &mut self,
+ changed_group: Entity<ProjectGroup>,
+ cx: &mut Context<Self>,
+ ) {
+ let changed_key = changed_group.read(cx).key.clone();
+ let changed_id = changed_group.read(cx).id;
+
+ let merge_target = self
+ .project_groups
+ .iter()
+ .find(|g| {
+ let g_ref = g.read(cx);
+ g_ref.id != changed_id && g_ref.key == changed_key
+ })
+ .cloned();
+
+ let Some(target) = merge_target else {
+ return;
+ };
+
+ // Move all workspaces from the changed group into the target.
+ let workspaces_to_move = changed_group.read(cx).workspaces.clone();
+ target.update(cx, |t, _| {
+ for workspace in workspaces_to_move {
+ if !t.workspaces.contains(&workspace) {
+ t.workspaces.push(workspace);
+ }
+ }
+ });
+ changed_group.update(cx, |g, _| {
+ g.workspaces.clear();
+ });
+
+ // Remove the now-empty changed group.
+ self.project_groups.retain(|g| g.read(cx).id != changed_id);
+
+ self.serialize(cx);
+ cx.notify();
+ }
+
+ pub fn project_group_keys(&self, cx: &App) -> Vec<ProjectGroupKey> {
+ self.project_groups
+ .iter()
+ .map(|g| g.read(cx).key.clone())
+ .collect()
}
- pub fn group(&self, id: ProjectGroupId) -> Option<&ProjectGroup> {
- self.project_groups.iter().find(|g| g.id == id)
+ pub fn project_groups(&self) -> &[Entity<ProjectGroup>] {
+ &self.project_groups
}
- pub fn group_mut(&mut self, id: ProjectGroupId) -> Option<&mut ProjectGroup> {
- self.project_groups.iter_mut().find(|g| g.id == id)
+ pub fn group(&self, id: ProjectGroupId, cx: &App) -> Option<&Entity<ProjectGroup>> {
+ self.project_groups.iter().find(|g| g.read(cx).id == id)
}
- pub fn group_for_workspace(&self, workspace: &Entity<Workspace>) -> Option<&ProjectGroup> {
+ pub fn group_for_workspace(
+ &self,
+ workspace: &Entity<Workspace>,
+ cx: &App,
+ ) -> Option<&Entity<ProjectGroup>> {
self.project_groups
.iter()
- .find(|g| g.workspaces.contains(workspace))
+ .find(|g| g.read(cx).workspaces.contains(workspace))
}
pub(crate) fn ensure_workspace_in_group(
@@ -639,26 +711,40 @@ impl MultiWorkspace {
key: ProjectGroupKey,
cx: &mut Context<Self>,
) {
- if let Some(group) = self.project_groups.iter_mut().find(|g| g.key == key) {
- if !group.workspaces.contains(&workspace) {
- group.workspaces.push(workspace.clone());
+ if let Some(group) = self
+ .project_groups
+ .iter()
+ .find(|g| g.read(cx).key == key)
+ .cloned()
+ {
+ let already_has = group.read(cx).workspaces.contains(&workspace);
+ if !already_has {
+ group.update(cx, |g, _| {
+ g.workspaces.push(workspace.clone());
+ });
cx.emit(MultiWorkspaceEvent::WorkspaceAdded(workspace));
}
return;
}
- let group = ProjectGroup {
+ let group = cx.new(|_| ProjectGroup {
id: ProjectGroupId::new(),
key,
expanded: true,
visible_thread_count: None,
workspaces: vec![workspace.clone()],
- };
+ });
+ self._subscriptions
+ .push(cx.subscribe(&group, Self::handle_project_group_event));
self.project_groups.insert(0, group);
cx.emit(MultiWorkspaceEvent::WorkspaceAdded(workspace));
}
- pub fn workspaces_for_project_group(&self, id: ProjectGroupId) -> Option<&[Entity<Workspace>]> {
- self.group(id).map(|g| g.workspaces.as_slice())
+ pub fn workspaces_for_project_group(
+ &self,
+ id: ProjectGroupId,
+ cx: &App,
+ ) -> Option<Vec<Entity<Workspace>>> {
+ self.group(id, cx).map(|g| g.read(cx).workspaces.clone())
}
pub fn remove_folder_from_project_group(
@@ -667,17 +753,20 @@ impl MultiWorkspace {
path: &Path,
cx: &mut Context<Self>,
) {
- let Some(group) = self.group_mut(group_id) else {
+ let Some(group) = self.group(group_id, cx).cloned() else {
return;
};
- let new_path_list = group.key.path_list().without_path(path);
+ let new_path_list = group.read(cx).key.path_list().without_path(path);
if new_path_list.is_empty() {
return;
}
- group.key = ProjectGroupKey::new(group.key.host(), new_path_list);
- let workspaces: Vec<_> = group.workspaces.clone();
+ let workspaces = group.update(cx, |g, cx| {
+ g.key = ProjectGroupKey::new(g.key.host(), new_path_list);
+ cx.emit(ProjectGroupEvent::KeyChanged);
+ g.workspaces.clone()
+ });
for workspace in workspaces {
let project = workspace.read(cx).project().clone();
@@ -729,15 +818,18 @@ impl MultiWorkspace {
new_paths: Vec<PathBuf>,
cx: &mut Context<Self>,
) {
- let Some(group) = self.group_mut(group_id) else {
+ let Some(group) = self.group(group_id, cx).cloned() else {
return;
};
- let mut all_paths: Vec<PathBuf> = group.key.path_list().paths().to_vec();
- all_paths.extend(new_paths.iter().cloned());
- let new_path_list = PathList::new(&all_paths);
- group.key = ProjectGroupKey::new(group.key.host(), new_path_list);
- let workspaces: Vec<_> = group.workspaces.clone();
+ let workspaces = group.update(cx, |g, cx| {
+ let mut all_paths: Vec<PathBuf> = g.key.path_list().paths().to_vec();
+ all_paths.extend(new_paths.iter().cloned());
+ let new_path_list = PathList::new(&all_paths);
+ g.key = ProjectGroupKey::new(g.key.host(), new_path_list);
+ cx.emit(ProjectGroupEvent::KeyChanged);
+ g.workspaces.clone()
+ });
for workspace in workspaces {
let project = workspace.read(cx).project().clone();
@@ -760,9 +852,12 @@ impl MultiWorkspace {
window: &mut Window,
cx: &mut Context<Self>,
) -> Task<Result<bool>> {
- let pos = self.project_groups.iter().position(|g| g.id == group_id);
+ let pos = self
+ .project_groups
+ .iter()
+ .position(|g| g.read(cx).id == group_id);
let workspaces: Vec<_> = pos
- .map(|p| self.project_groups[p].workspaces.clone())
+ .map(|p| self.project_groups[p].read(cx).workspaces.clone())
.unwrap_or_default();
// Compute the neighbor while the group is still in the list.
@@ -770,11 +865,11 @@ impl MultiWorkspace {
self.project_groups
.get(pos + 1)
.or_else(|| pos.checked_sub(1).and_then(|i| self.project_groups.get(i)))
- .map(|g| g.key.clone())
+ .map(|g| g.read(cx).key.clone())
});
// Now remove the group.
- self.project_groups.retain(|g| g.id != group_id);
+ self.project_groups.retain(|g| g.read(cx).id != group_id);
self.remove(
workspaces,
@@ -817,13 +912,12 @@ impl MultiWorkspace {
) -> Option<Entity<Workspace>> {
self.project_groups
.iter()
- .flat_map(|g| &g.workspaces)
+ .flat_map(|g| g.read(cx).workspaces.clone())
.find(|ws| {
let key = ws.read(cx).project_group_key(cx);
key.host().as_ref() == host
&& PathList::new(&ws.read(cx).root_paths(cx)) == *path_list
})
- .cloned()
}
/// Finds an existing workspace whose paths match, or creates a new one.
@@ -948,13 +1042,20 @@ impl MultiWorkspace {
&self.active_workspace
}
- pub fn workspaces(&self) -> impl Iterator<Item = &Entity<Workspace>> {
- let grouped = self.project_groups.iter().flat_map(|g| &g.workspaces);
- let active = std::iter::once(&self.active_workspace);
+ pub fn workspaces(&self, cx: &App) -> Vec<Entity<Workspace>> {
let mut seen = HashSet::new();
- grouped
- .chain(active)
- .filter(move |ws| seen.insert(ws.entity_id()))
+ let mut result = Vec::new();
+ for group in &self.project_groups {
+ for workspace in &group.read(cx).workspaces {
+ if seen.insert(workspace.entity_id()) {
+ result.push(workspace.clone());
+ }
+ }
+ }
+ if seen.insert(self.active_workspace.entity_id()) {
+ result.push(self.active_workspace.clone());
+ }
+ result
}
/// Adds a workspace to this window as persistent without changing which
@@ -962,7 +1063,7 @@ impl MultiWorkspace {
/// persistent list regardless of sidebar state — it's used for system-
/// initiated additions like deserialization and worktree discovery.
pub fn add(&mut self, workspace: Entity<Workspace>, window: &Window, cx: &mut Context<Self>) {
- if self.group_for_workspace(&workspace).is_some() {
+ if self.group_for_workspace(&workspace, cx).is_some() {
return;
}
let key = workspace.read(cx).project_group_key(cx);
@@ -987,7 +1088,7 @@ impl MultiWorkspace {
}
// If the workspace isn't in any group yet, subscribe and optionally group it
- if self.group_for_workspace(&workspace).is_none() {
+ if self.group_for_workspace(&workspace, cx).is_none() {
Self::subscribe_to_workspace(&workspace, window, cx);
self.sync_sidebar_to_workspace(&workspace, cx);
let weak_self = cx.weak_entity();
@@ -1011,7 +1112,7 @@ impl MultiWorkspace {
/// the sidebar is closed. No-op if the workspace is already persistent.
pub fn retain_active_workspace(&mut self, cx: &mut Context<Self>) {
let workspace = self.active_workspace.clone();
- if self.group_for_workspace(&workspace).is_none() {
+ if self.group_for_workspace(&workspace, cx).is_none() {
let key = workspace.read(cx).project_group_key(cx);
self.ensure_workspace_in_group(workspace, key, cx);
self.serialize(cx);
@@ -1027,7 +1128,7 @@ impl MultiWorkspace {
}
let active = self.active_workspace.clone();
for group in std::mem::take(&mut self.project_groups) {
- for workspace in group.workspaces {
+ for workspace in group.read(cx).workspaces.clone() {
if workspace != active {
self.detach_workspace(&workspace, cx);
}
@@ -1041,11 +1142,14 @@ impl MultiWorkspace {
/// so the workspace still appears in the recent-projects list.
fn detach_workspace(&mut self, workspace: &Entity<Workspace>, cx: &mut Context<Self>) {
// Remove workspace from its group
- for group in &mut self.project_groups {
- group.workspaces.retain(|w| w != workspace);
+ for group in &self.project_groups {
+ group.update(cx, |g, _| {
+ g.workspaces.retain(|w| w != workspace);
+ });
}
// Remove empty groups
- self.project_groups.retain(|g| !g.workspaces.is_empty());
+ self.project_groups
+ .retain(|g| !g.read(cx).workspaces.is_empty());
cx.emit(MultiWorkspaceEvent::WorkspaceRemoved(workspace.entity_id()));
workspace.update(cx, |workspace, _cx| {
workspace.session_id.take();
@@ -1084,8 +1188,12 @@ impl MultiWorkspace {
.project_groups
.iter()
.map(|g| {
+ let g = g.read(cx);
crate::persistence::model::SerializedProjectGroup::from_group(
- g.id, &g.key,
+ g.id,
+ &g.key,
+ g.expanded,
+ g.visible_thread_count,
)
})
.collect::<Vec<_>>(),
@@ -1227,6 +1335,7 @@ impl MultiWorkspace {
#[cfg(any(test, feature = "test-support"))]
pub fn assert_project_group_key_integrity(&self, cx: &App) -> anyhow::Result<()> {
for group in &self.project_groups {
+ let group = group.read(cx);
for workspace in &group.workspaces {
let live_key = workspace.read(cx).project_group_key(cx);
anyhow::ensure!(
@@ -1386,7 +1495,7 @@ impl MultiWorkspace {
for workspace in &workspaces {
// detach_workspace already removes from groups
- let was_in_group = this.group_for_workspace(workspace).is_some();
+ let was_in_group = this.group_for_workspace(workspace, cx).is_some();
if was_in_group {
this.detach_workspace(workspace, cx);
removed_any = true;
@@ -101,10 +101,10 @@ async fn test_project_group_keys_initial(cx: &mut TestAppContext) {
mw.open_sidebar(cx);
});
- multi_workspace.read_with(cx, |mw, _cx| {
- let keys: Vec<&ProjectGroupKey> = mw.project_group_keys().collect();
+ multi_workspace.read_with(cx, |mw, cx| {
+ let keys: Vec<ProjectGroupKey> = mw.project_group_keys(cx);
assert_eq!(keys.len(), 1, "should have exactly one key on creation");
- assert_eq!(*keys[0], expected_key);
+ assert_eq!(keys[0], expected_key);
});
}
@@ -131,8 +131,8 @@ async fn test_project_group_keys_add_workspace(cx: &mut TestAppContext) {
mw.open_sidebar(cx);
});
- multi_workspace.read_with(cx, |mw, _cx| {
- assert_eq!(mw.project_group_keys().count(), 1);
+ multi_workspace.read_with(cx, |mw, cx| {
+ assert_eq!(mw.project_group_keys(cx).len(), 1);
});
// Adding a workspace with a different project root adds a new key.
@@ -140,15 +140,15 @@ async fn test_project_group_keys_add_workspace(cx: &mut TestAppContext) {
mw.test_add_workspace(project_b, window, cx);
});
- multi_workspace.read_with(cx, |mw, _cx| {
- let keys: Vec<&ProjectGroupKey> = mw.project_group_keys().collect();
+ multi_workspace.read_with(cx, |mw, cx| {
+ let keys: Vec<ProjectGroupKey> = mw.project_group_keys(cx);
assert_eq!(
keys.len(),
2,
"should have two keys after adding a second workspace"
);
- assert_eq!(*keys[0], key_b);
- assert_eq!(*keys[1], key_a);
+ assert_eq!(keys[0], key_b);
+ assert_eq!(keys[1], key_a);
});
}
@@ -176,8 +176,8 @@ async fn test_project_group_keys_duplicate_not_added(cx: &mut TestAppContext) {
mw.test_add_workspace(project_a2, window, cx);
});
- multi_workspace.read_with(cx, |mw, _cx| {
- let keys: Vec<&ProjectGroupKey> = mw.project_group_keys().collect();
+ multi_workspace.read_with(cx, |mw, cx| {
+ let keys: Vec<ProjectGroupKey> = mw.project_group_keys(cx);
assert_eq!(
keys.len(),
1,
@@ -185,3 +185,60 @@ async fn test_project_group_keys_duplicate_not_added(cx: &mut TestAppContext) {
);
});
}
+
+#[gpui::test]
+async fn test_groups_with_same_paths_merge(cx: &mut TestAppContext) {
+ init_test(cx);
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_tree("/a", json!({ "file.txt": "" })).await;
+ fs.insert_tree("/b", json!({ "file.txt": "" })).await;
+ let project_a = Project::test(fs.clone(), ["/a".as_ref()], cx).await;
+ let project_b = Project::test(fs.clone(), ["/b".as_ref()], cx).await;
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a, window, cx));
+
+ // Open the sidebar so workspaces get grouped.
+ multi_workspace.update(cx, |mw, cx| {
+ mw.open_sidebar(cx);
+ });
+ cx.run_until_parked();
+
+ // Add a second workspace, creating group_b with path [/b].
+ let group_a_id = multi_workspace.update_in(cx, |mw, window, cx| {
+ let group_a_id = mw.project_groups()[0].read(cx).id;
+ mw.test_add_workspace(project_b, window, cx);
+ group_a_id
+ });
+ cx.run_until_parked();
+
+ // Now add /b to group_a so it has [/a, /b].
+ multi_workspace.update(cx, |mw, cx| {
+ mw.add_folders_to_project_group(group_a_id, vec!["/b".into()], cx);
+ });
+ cx.run_until_parked();
+
+ // Verify we have two groups.
+ multi_workspace.read_with(cx, |mw, _cx| {
+ assert_eq!(
+ mw.project_groups().len(),
+ 2,
+ "should have two groups before the merge"
+ );
+ });
+
+ // Remove /a from group_a, making its key [/b] — same as group_b.
+ multi_workspace.update(cx, |mw, cx| {
+ mw.remove_folder_from_project_group(group_a_id, Path::new("/a"), cx);
+ });
+ cx.run_until_parked();
+
+ // The two groups now have identical keys [/b] and should have been merged.
+ multi_workspace.read_with(cx, |mw, _cx| {
+ assert_eq!(
+ mw.project_groups().len(),
+ 1,
+ "groups with identical paths should be merged into one"
+ );
+ });
+}
@@ -1051,7 +1051,7 @@ pub fn show_app_notification<V: Notification + 'static>(
if let Some(multi_workspace) = window.downcast::<MultiWorkspace>() {
multi_workspace
.update(cx, |multi_workspace, _window, cx| {
- for workspace in multi_workspace.workspaces() {
+ for workspace in multi_workspace.workspaces(cx) {
workspace.update(cx, |workspace, cx| {
workspace.show_notification_without_handling_dismiss_events(
&id,
@@ -1077,7 +1077,7 @@ pub fn dismiss_app_notification(id: &NotificationId, cx: &mut App) {
let id = id.clone();
multi_workspace
.update(cx, |multi_workspace, _window, cx| {
- for workspace in multi_workspace.workspaces() {
+ for workspace in multi_workspace.workspaces(cx) {
workspace.update(cx, |workspace, cx| {
workspace.dismiss_notification(&id, cx)
});
@@ -2569,7 +2569,7 @@ mod tests {
// --- Remove the first workspace (index 0, which is not the active one) ---
multi_workspace.update_in(cx, |mw, _window, cx| {
- let ws = mw.workspaces().nth(0).unwrap().clone();
+ let ws = mw.workspaces(cx).into_iter().nth(0).unwrap();
mw.remove([ws], |_, _, _| unreachable!(), _window, cx)
.detach_and_log_err(cx);
});
@@ -4240,7 +4240,7 @@ mod tests {
// Remove workspace at index 1 (the second workspace).
multi_workspace.update_in(cx, |mw, window, cx| {
- let ws = mw.workspaces().nth(1).unwrap().clone();
+ let ws = mw.workspaces(cx).into_iter().nth(1).unwrap();
mw.remove([ws], |_, _, _| unreachable!(), window, cx)
.detach_and_log_err(cx);
});
@@ -4350,7 +4350,7 @@ mod tests {
// Remove workspace2 (index 1).
multi_workspace.update_in(cx, |mw, window, cx| {
- let ws = mw.workspaces().nth(1).unwrap().clone();
+ let ws = mw.workspaces(cx).into_iter().nth(1).unwrap();
mw.remove([ws], |_, _, _| unreachable!(), window, cx)
.detach_and_log_err(cx);
});
@@ -4433,7 +4433,7 @@ mod tests {
// Remove workspace2 — this pushes a task to pending_removal_tasks.
multi_workspace.update_in(cx, |mw, window, cx| {
- let ws = mw.workspaces().nth(1).unwrap().clone();
+ let ws = mw.workspaces(cx).into_iter().nth(1).unwrap();
mw.remove([ws], |_, _, _| unreachable!(), window, cx)
.detach_and_log_err(cx);
});
@@ -4442,7 +4442,8 @@ mod tests {
// removal tasks and await them all.
let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
let mut tasks: Vec<Task<()>> = mw
- .workspaces()
+ .workspaces(cx)
+ .iter()
.map(|workspace| {
workspace.update(cx, |workspace, cx| {
workspace.flush_serialization(window, cx)
@@ -4774,7 +4775,7 @@ mod tests {
// Assign database IDs and set up session bindings so serialization
// writes real rows.
multi_workspace.update_in(cx, |mw, _, cx| {
- for workspace in mw.workspaces() {
+ for workspace in mw.workspaces(cx) {
workspace.update(cx, |ws, _cx| {
ws.set_random_database_id();
});
@@ -4788,7 +4789,7 @@ mod tests {
let window_id_u64 = window.window_handle().window_id().as_u64();
let mut tasks: Vec<Task<()>> = Vec::new();
- for workspace in mw.workspaces() {
+ for workspace in mw.workspaces(cx) {
tasks.push(workspace.update(cx, |ws, cx| ws.flush_serialization(window, cx)));
if let Some(db_id) = workspace.read(cx).database_id() {
let db = WorkspaceDb::global(cx);
@@ -4882,9 +4883,7 @@ mod tests {
// The restored window should have the same project group keys.
let restored_keys: Vec<ProjectGroupKey> = restored_handle
- .read_with(cx, |mw: &MultiWorkspace, _cx| {
- mw.project_group_keys().cloned().collect()
- })
+ .read_with(cx, |mw: &MultiWorkspace, cx| mw.project_group_keys(cx))
.unwrap();
assert_eq!(
restored_keys, expected_keys,
@@ -4959,8 +4958,9 @@ mod tests {
let group_id = mw
.project_groups()
.iter()
- .find(|g| g.key == key_b)
+ .find(|g| g.read(cx).key == key_b)
.unwrap()
+ .read(cx)
.id;
mw.remove_project_group(group_id, window, cx)
.detach_and_log_err(cx);
@@ -4982,7 +4982,7 @@ mod tests {
// Activate workspace A (the bottom) so removing it tests the
// "fall back upward" path.
let workspace_a =
- multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().cloned().unwrap());
+ multi_workspace.read_with(cx, |mw, cx| mw.workspaces(cx).into_iter().next().unwrap());
multi_workspace.update_in(cx, |mw, window, cx| {
mw.activate(workspace_a.clone(), window, cx);
});
@@ -4994,8 +4994,9 @@ mod tests {
let group_id = mw
.project_groups()
.iter()
- .find(|g| g.key == key_a)
+ .find(|g| g.read(cx).key == key_a)
.unwrap()
+ .read(cx)
.id;
mw.remove_project_group(group_id, window, cx)
.detach_and_log_err(cx);
@@ -5019,8 +5020,9 @@ mod tests {
let group_id = mw
.project_groups()
.iter()
- .find(|g| g.key == key_c)
+ .find(|g| g.read(cx).key == key_c)
.unwrap()
+ .read(cx)
.id;
mw.remove_project_group(group_id, window, cx)
.detach_and_log_err(cx);
@@ -67,10 +67,23 @@ pub struct SerializedProjectGroup {
pub id: Option<ProjectGroupId>,
pub path_list: SerializedPathList,
pub(crate) location: SerializedWorkspaceLocation,
+ #[serde(default = "default_expanded")]
+ pub expanded: bool,
+ #[serde(default)]
+ pub visible_thread_count: Option<usize>,
+}
+
+fn default_expanded() -> bool {
+ true
}
impl SerializedProjectGroup {
- pub fn from_group(id: ProjectGroupId, key: &ProjectGroupKey) -> Self {
+ pub fn from_group(
+ id: ProjectGroupId,
+ key: &ProjectGroupKey,
+ expanded: bool,
+ visible_thread_count: Option<usize>,
+ ) -> Self {
Self {
id: Some(id),
path_list: key.path_list().serialize(),
@@ -78,23 +91,30 @@ impl SerializedProjectGroup {
Some(host) => SerializedWorkspaceLocation::Remote(host),
None => SerializedWorkspaceLocation::Local,
},
+ expanded,
+ visible_thread_count,
}
}
- pub fn into_id_and_key(self) -> (ProjectGroupId, ProjectGroupKey) {
+ pub fn into_id_key_and_state(self) -> (ProjectGroupId, ProjectGroupKey, bool, Option<usize>) {
let id = self.id.unwrap_or_else(ProjectGroupId::new);
let path_list = PathList::deserialize(&self.path_list);
let host = match self.location {
SerializedWorkspaceLocation::Local => None,
SerializedWorkspaceLocation::Remote(opts) => Some(opts),
};
- (id, ProjectGroupKey::new(host, path_list))
+ (
+ id,
+ ProjectGroupKey::new(host, path_list),
+ self.expanded,
+ self.visible_thread_count,
+ )
}
}
impl From<SerializedProjectGroup> for ProjectGroupKey {
fn from(value: SerializedProjectGroup) -> Self {
- let (_, key) = value.into_id_and_key();
+ let (_, key, _, _) = value.into_id_key_and_state();
key
}
}
@@ -8780,9 +8780,14 @@ pub async fn apply_restored_multiworkspace_state(
if !project_group_keys.is_empty() {
// Resolve linked worktree paths to their main repo paths so
// stale keys from previous sessions get normalized and deduped.
- let mut resolved_groups: Vec<(project::ProjectGroupId, ProjectGroupKey)> = Vec::new();
+ let mut resolved_groups: Vec<(
+ project::ProjectGroupId,
+ ProjectGroupKey,
+ bool,
+ Option<usize>,
+ )> = Vec::new();
for serialized in project_group_keys.iter().cloned() {
- let (id, key) = serialized.into_id_and_key();
+ let (id, key, expanded, visible_thread_count) = serialized.into_id_key_and_state();
if key.path_list().paths().is_empty() {
continue;
}
@@ -8799,14 +8804,14 @@ pub async fn apply_restored_multiworkspace_state(
}
}
let resolved = ProjectGroupKey::new(key.host(), PathList::new(&resolved_paths));
- if !resolved_groups.iter().any(|(_, k)| *k == resolved) {
- resolved_groups.push((id, resolved));
+ if !resolved_groups.iter().any(|(_, k, _, _)| *k == resolved) {
+ resolved_groups.push((id, resolved, expanded, visible_thread_count));
}
}
window_handle
- .update(cx, |multi_workspace, _window, _cx| {
- multi_workspace.restore_project_groups(resolved_groups);
+ .update(cx, |multi_workspace, _window, cx| {
+ multi_workspace.restore_project_groups(resolved_groups, cx);
})
.ok();
}
@@ -9197,7 +9202,7 @@ pub fn workspace_windows_for_location(
};
multi_workspace.read(cx).is_ok_and(|multi_workspace| {
- multi_workspace.workspaces().any(|workspace| {
+ multi_workspace.workspaces(cx).iter().any(|workspace| {
match workspace.read(cx).workspace_location(cx) {
WorkspaceLocation::Location(location, _) => {
match (&location, serialized_location) {
@@ -9236,7 +9241,7 @@ pub async fn find_existing_workspace(
cx.update(|cx| {
for window in workspace_windows_for_location(location, cx) {
if let Ok(multi_workspace) = window.read(cx) {
- for workspace in multi_workspace.workspaces() {
+ for workspace in multi_workspace.workspaces(cx) {
let project = workspace.read(cx).project.read(cx);
let m = project.visibility_for_paths(
abs_paths,
@@ -9932,11 +9937,11 @@ pub fn join_in_room_project(
.and_then(|window_handle| {
window_handle
.update(cx, |multi_workspace, _window, cx| {
- for workspace in multi_workspace.workspaces() {
+ for workspace in multi_workspace.workspaces(cx) {
if workspace.read(cx).project().read(cx).remote_id()
== Some(project_id)
{
- return Some((window_handle, workspace.clone()));
+ return Some((window_handle, workspace));
}
}
None
@@ -10890,7 +10895,7 @@ mod tests {
// Activate workspace A
multi_workspace_handle
.update(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace, window, cx);
})
.unwrap();
@@ -11014,13 +11019,13 @@ mod tests {
assert!(!removed, "removal should have been cancelled");
multi_workspace_handle
- .read_with(cx, |mw, _| {
+ .read_with(cx, |mw, cx| {
assert_eq!(
mw.workspace(),
&workspace_b,
"user should stay on workspace B after cancelling"
);
- assert_eq!(mw.workspaces().count(), 2, "both workspaces should remain");
+ assert_eq!(mw.workspaces(cx).len(), 2, "both workspaces should remain");
})
.unwrap();
@@ -11042,13 +11047,13 @@ mod tests {
// Should be back on workspace A, and B should be gone.
multi_workspace_handle
- .read_with(cx, |mw, _| {
+ .read_with(cx, |mw, cx| {
assert_eq!(
mw.workspace(),
&workspace_a,
"should be back on workspace A after removing B"
);
- assert_eq!(mw.workspaces().count(), 1, "only workspace A should remain");
+ assert_eq!(mw.workspaces(cx).len(), 1, "only workspace A should remain");
})
.unwrap();
}
@@ -14773,7 +14778,7 @@ mod tests {
// Switch to workspace A
multi_workspace_handle
.update(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace, window, cx);
})
.unwrap();
@@ -14819,7 +14824,7 @@ mod tests {
// Switch to workspace B
multi_workspace_handle
.update(cx, |mw, window, cx| {
- let workspace = mw.workspaces().nth(1).unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().nth(1).unwrap();
mw.activate(workspace, window, cx);
})
.unwrap();
@@ -14828,7 +14833,7 @@ mod tests {
// Switch back to workspace A
multi_workspace_handle
.update(cx, |mw, window, cx| {
- let workspace = mw.workspaces().next().unwrap().clone();
+ let workspace = mw.workspaces(cx).into_iter().next().unwrap();
mw.activate(workspace, window, cx);
})
.unwrap();
@@ -1508,9 +1508,7 @@ fn quit(_: &Quit, cx: &mut App) {
for window in &workspace_windows {
let window = *window;
let workspaces = window
- .update(cx, |multi_workspace, _, _| {
- multi_workspace.workspaces().cloned().collect::<Vec<_>>()
- })
+ .update(cx, |multi_workspace, _, cx| multi_workspace.workspaces(cx))
.log_err();
let Some(workspaces) = workspaces else {
@@ -1540,7 +1538,7 @@ fn quit(_: &Quit, cx: &mut App) {
for window in &workspace_windows {
window
.update(cx, |multi_workspace, window, cx| {
- for workspace in multi_workspace.workspaces() {
+ for workspace in multi_workspace.workspaces(cx) {
flush_tasks.push(workspace.update(cx, |workspace, cx| {
workspace.flush_serialization(window, cx)
}));