Detailed changes
@@ -224,6 +224,8 @@ impl Server {
.add_request_handler(forward_project_request::<proto::RenameProjectEntry>)
.add_request_handler(forward_project_request::<proto::CopyProjectEntry>)
.add_request_handler(forward_project_request::<proto::DeleteProjectEntry>)
+ .add_request_handler(forward_project_request::<proto::ExpandProjectEntry>)
+ .add_request_handler(forward_project_request::<proto::CollapseProjectEntry>)
.add_request_handler(forward_project_request::<proto::OnTypeFormatting>)
.add_message_handler(create_buffer_for_peer)
.add_request_handler(update_buffer)
@@ -51,6 +51,7 @@ use lsp_command::*;
use postage::watch;
use project_settings::ProjectSettings;
use rand::prelude::*;
+use rpc::proto::PeerId;
use search::SearchQuery;
use serde::Serialize;
use settings::SettingsStore;
@@ -478,6 +479,8 @@ impl Project {
client.add_model_request_handler(Self::handle_rename_project_entry);
client.add_model_request_handler(Self::handle_copy_project_entry);
client.add_model_request_handler(Self::handle_delete_project_entry);
+ client.add_model_request_handler(Self::handle_expand_project_entry);
+ client.add_model_request_handler(Self::handle_collapse_project_entry);
client.add_model_request_handler(Self::handle_apply_additional_edits_for_completion);
client.add_model_request_handler(Self::handle_apply_code_action);
client.add_model_request_handler(Self::handle_on_type_formatting);
@@ -5403,6 +5406,56 @@ impl Project {
Some(ProjectPath { worktree_id, path })
}
+ pub fn mark_entry_expanded(
+ &mut self,
+ worktree_id: WorktreeId,
+ entry_id: ProjectEntryId,
+ cx: &mut ModelContext<Self>,
+ ) -> Option<()> {
+ if self.is_local() {
+ let worktree = self.worktree_for_id(worktree_id, cx)?;
+ worktree.update(cx, |worktree, cx| {
+ worktree
+ .as_local_mut()
+ .unwrap()
+ .mark_entry_expanded(entry_id, true, 0, cx);
+ });
+ } else if let Some(project_id) = self.remote_id() {
+ cx.background()
+ .spawn(self.client.request(proto::ExpandProjectEntry {
+ project_id,
+ entry_id: entry_id.to_proto(),
+ }))
+ .log_err();
+ }
+ Some(())
+ }
+
+ pub fn mark_entry_collapsed(
+ &mut self,
+ worktree_id: WorktreeId,
+ entry_id: ProjectEntryId,
+ cx: &mut ModelContext<Self>,
+ ) -> Option<()> {
+ if self.is_local() {
+ let worktree = self.worktree_for_id(worktree_id, cx)?;
+ worktree.update(cx, |worktree, cx| {
+ worktree
+ .as_local_mut()
+ .unwrap()
+ .mark_entry_expanded(entry_id, false, 0, cx);
+ });
+ } else if let Some(project_id) = self.remote_id() {
+ cx.background()
+ .spawn(self.client.request(proto::CollapseProjectEntry {
+ project_id,
+ entry_id: entry_id.to_proto(),
+ }))
+ .log_err();
+ }
+ Some(())
+ }
+
pub fn absolute_path(&self, project_path: &ProjectPath, cx: &AppContext) -> Option<PathBuf> {
let workspace_root = self
.worktree_for_id(project_path.worktree_id, cx)?
@@ -5705,6 +5758,66 @@ impl Project {
})
}
+ async fn handle_expand_project_entry(
+ this: ModelHandle<Self>,
+ envelope: TypedEnvelope<proto::ExpandProjectEntry>,
+ _: Arc<Client>,
+ cx: AsyncAppContext,
+ ) -> Result<proto::Ack> {
+ Self::handle_expand_or_collapse_project_entry(
+ this,
+ envelope.payload.entry_id,
+ envelope.original_sender_id,
+ true,
+ cx,
+ )
+ .await
+ }
+
+ async fn handle_collapse_project_entry(
+ this: ModelHandle<Self>,
+ envelope: TypedEnvelope<proto::CollapseProjectEntry>,
+ _: Arc<Client>,
+ cx: AsyncAppContext,
+ ) -> Result<proto::Ack> {
+ Self::handle_expand_or_collapse_project_entry(
+ this,
+ envelope.payload.entry_id,
+ envelope.original_sender_id,
+ false,
+ cx,
+ )
+ .await
+ }
+
+ async fn handle_expand_or_collapse_project_entry(
+ this: ModelHandle<Self>,
+ entry_id: u64,
+ original_sender_id: Option<PeerId>,
+ is_expanded: bool,
+ mut cx: AsyncAppContext,
+ ) -> Result<proto::Ack> {
+ let entry_id = ProjectEntryId::from_proto(entry_id);
+ let (worktree, replica_id) = this
+ .read_with(&cx, |this, cx| {
+ let replica_id = original_sender_id
+ .and_then(|peer_id| this.collaborators.get(&peer_id))?
+ .replica_id;
+ let worktree = this.worktree_for_entry(entry_id, cx)?;
+ Some((worktree, replica_id))
+ })
+ .ok_or_else(|| anyhow!("invalid request"))?;
+ worktree.update(&mut cx, |worktree, cx| {
+ worktree.as_local_mut().unwrap().mark_entry_expanded(
+ entry_id,
+ is_expanded,
+ replica_id,
+ cx,
+ )
+ });
+ Ok(proto::Ack {})
+ }
+
async fn handle_update_diagnostic_summary(
this: ModelHandle<Self>,
envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
@@ -5,7 +5,7 @@ use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
use anyhow::{anyhow, Context, Result};
use client::{proto, Client};
use clock::ReplicaId;
-use collections::{HashMap, VecDeque};
+use collections::{HashMap, HashSet, VecDeque};
use fs::{
repository::{GitFileStatus, GitRepository, RepoPath},
Fs, LineEnding,
@@ -67,7 +67,7 @@ pub enum Worktree {
pub struct LocalWorktree {
snapshot: LocalSnapshot,
- path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
+ scan_requests_tx: channel::Sender<ScanRequest>,
is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
_background_scanner_task: Task<()>,
share: Option<ShareState>,
@@ -84,6 +84,18 @@ pub struct LocalWorktree {
visible: bool,
}
+enum ScanRequest {
+ RescanPaths {
+ paths: Vec<PathBuf>,
+ done: barrier::Sender,
+ },
+ SetDirExpanded {
+ entry_id: ProjectEntryId,
+ replica_id: ReplicaId,
+ is_expanded: bool,
+ },
+}
+
pub struct RemoteWorktree {
snapshot: Snapshot,
background_snapshot: Arc<Mutex<Snapshot>>,
@@ -214,6 +226,7 @@ pub struct LocalSnapshot {
struct BackgroundScannerState {
snapshot: LocalSnapshot,
+ expanded_dirs: HashSet<(ProjectEntryId, ReplicaId)>,
/// The ids of all of the entries that were removed from the snapshot
/// as part of the current update. These entry ids may be re-used
/// if the same inode is discovered at a new path, or if the given
@@ -330,7 +343,7 @@ impl Worktree {
);
}
- let (path_changes_tx, path_changes_rx) = channel::unbounded();
+ let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
cx.spawn_weak(|this, mut cx| async move {
@@ -370,7 +383,7 @@ impl Worktree {
fs,
scan_states_tx,
background,
- path_changes_rx,
+ scan_requests_rx,
)
.run(events)
.await;
@@ -381,7 +394,7 @@ impl Worktree {
snapshot,
is_scanning: watch::channel_with(true),
share: None,
- path_changes_tx,
+ scan_requests_tx,
_background_scanner_task: background_scanner_task,
diagnostics: Default::default(),
diagnostic_summaries: Default::default(),
@@ -1068,8 +1081,11 @@ impl LocalWorktree {
this.update(&mut cx, |this, _| {
this.as_local_mut()
.unwrap()
- .path_changes_tx
- .try_send((vec![abs_path], tx))
+ .scan_requests_tx
+ .try_send(ScanRequest::RescanPaths {
+ paths: vec![abs_path],
+ done: tx,
+ })
})?;
rx.recv().await;
Ok(())
@@ -1135,6 +1151,22 @@ impl LocalWorktree {
}))
}
+ pub fn mark_entry_expanded(
+ &mut self,
+ entry_id: ProjectEntryId,
+ is_expanded: bool,
+ replica_id: ReplicaId,
+ _cx: &mut ModelContext<Worktree>,
+ ) {
+ self.scan_requests_tx
+ .try_send(ScanRequest::SetDirExpanded {
+ entry_id,
+ replica_id,
+ is_expanded,
+ })
+ .ok();
+ }
+
fn refresh_entry(
&self,
path: Arc<Path>,
@@ -1143,7 +1175,7 @@ impl LocalWorktree {
) -> Task<Result<Entry>> {
let fs = self.fs.clone();
let abs_root_path = self.abs_path.clone();
- let path_changes_tx = self.path_changes_tx.clone();
+ let path_changes_tx = self.scan_requests_tx.clone();
cx.spawn_weak(move |this, mut cx| async move {
let abs_path = fs.canonicalize(&abs_root_path).await?;
let mut paths = Vec::with_capacity(2);
@@ -1161,7 +1193,7 @@ impl LocalWorktree {
}
let (tx, mut rx) = barrier::channel();
- path_changes_tx.try_send((paths, tx))?;
+ path_changes_tx.try_send(ScanRequest::RescanPaths { paths, done: tx })?;
rx.recv().await;
this.upgrade(&cx)
.ok_or_else(|| anyhow!("worktree was dropped"))?
@@ -2784,7 +2816,7 @@ struct BackgroundScanner {
fs: Arc<dyn Fs>,
status_updates_tx: UnboundedSender<ScanState>,
executor: Arc<executor::Background>,
- refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
+ scan_requests_rx: channel::Receiver<ScanRequest>,
next_entry_id: Arc<AtomicUsize>,
phase: BackgroundScannerPhase,
}
@@ -2803,17 +2835,18 @@ impl BackgroundScanner {
fs: Arc<dyn Fs>,
status_updates_tx: UnboundedSender<ScanState>,
executor: Arc<executor::Background>,
- refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
+ scan_requests_rx: channel::Receiver<ScanRequest>,
) -> Self {
Self {
fs,
status_updates_tx,
executor,
- refresh_requests_rx,
+ scan_requests_rx,
next_entry_id,
state: Mutex::new(BackgroundScannerState {
prev_snapshot: snapshot.snapshot.clone(),
snapshot,
+ expanded_dirs: Default::default(),
removed_entry_ids: Default::default(),
changed_paths: Default::default(),
}),
@@ -2898,9 +2931,9 @@ impl BackgroundScanner {
select_biased! {
// Process any path refresh requests from the worktree. Prioritize
// these before handling changes reported by the filesystem.
- request = self.refresh_requests_rx.recv().fuse() => {
- let Ok((paths, barrier)) = request else { break };
- if !self.process_refresh_request(paths.clone(), barrier).await {
+ request = self.scan_requests_rx.recv().fuse() => {
+ let Ok(request) = request else { break };
+ if !self.process_scan_request(request).await {
return;
}
}
@@ -2917,9 +2950,29 @@ impl BackgroundScanner {
}
}
- async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
- self.reload_entries_for_paths(paths, None).await;
- self.send_status_update(false, Some(barrier))
+ async fn process_scan_request(&self, request: ScanRequest) -> bool {
+ match request {
+ ScanRequest::RescanPaths { paths, done } => {
+ self.reload_entries_for_paths(paths, None).await;
+ self.send_status_update(false, Some(done))
+ }
+ ScanRequest::SetDirExpanded {
+ entry_id,
+ replica_id,
+ is_expanded,
+ } => {
+ let mut state = self.state.lock();
+ if is_expanded {
+ state.expanded_dirs.insert((entry_id, replica_id));
+ } else {
+ state.expanded_dirs.remove(&(entry_id, replica_id));
+ }
+
+ // todo
+
+ true
+ }
+ }
}
async fn process_events(&mut self, paths: Vec<PathBuf>) {
@@ -2995,9 +3048,9 @@ impl BackgroundScanner {
select_biased! {
// Process any path refresh requests before moving on to process
// the scan queue, so that user operations are prioritized.
- request = self.refresh_requests_rx.recv().fuse() => {
- let Ok((paths, barrier)) = request else { break };
- if !self.process_refresh_request(paths, barrier).await {
+ request = self.scan_requests_rx.recv().fuse() => {
+ let Ok(request) = request else { break };
+ if !self.process_scan_request(request).await {
return;
}
}
@@ -3487,9 +3540,9 @@ impl BackgroundScanner {
select_biased! {
// Process any path refresh requests before moving on to process
// the queue of ignore statuses.
- request = self.refresh_requests_rx.recv().fuse() => {
- let Ok((paths, barrier)) = request else { break };
- if !self.process_refresh_request(paths, barrier).await {
+ request = self.scan_requests_rx.recv().fuse() => {
+ let Ok(request) = request else { break };
+ if !self.process_scan_request(request).await {
return;
}
}
@@ -431,18 +431,23 @@ impl ProjectPanel {
fn collapse_selected_entry(&mut self, _: &CollapseSelectedEntry, cx: &mut ViewContext<Self>) {
if let Some((worktree, mut entry)) = self.selected_entry(cx) {
+ let worktree_id = worktree.id();
let expanded_dir_ids =
- if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree.id()) {
+ if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree_id) {
expanded_dir_ids
} else {
return;
};
loop {
- match expanded_dir_ids.binary_search(&entry.id) {
+ let entry_id = entry.id;
+ match expanded_dir_ids.binary_search(&entry_id) {
Ok(ix) => {
expanded_dir_ids.remove(ix);
- self.update_visible_entries(Some((worktree.id(), entry.id)), cx);
+ self.update_visible_entries(Some((worktree_id, entry_id)), cx);
+ self.project.update(cx, |project, cx| {
+ project.mark_entry_collapsed(worktree_id, entry_id, cx);
+ });
cx.notify();
break;
}
@@ -938,10 +943,19 @@ impl ProjectPanel {
}
fn selected_entry<'a>(&self, cx: &'a AppContext) -> Option<(&'a Worktree, &'a project::Entry)> {
+ let (worktree, entry) = self.selected_entry_handle(cx)?;
+ Some((worktree.read(cx), entry))
+ }
+
+ fn selected_entry_handle<'a>(
+ &self,
+ cx: &'a AppContext,
+ ) -> Option<(ModelHandle<Worktree>, &'a project::Entry)> {
let selection = self.selection?;
let project = self.project.read(cx);
- let worktree = project.worktree_for_id(selection.worktree_id, cx)?.read(cx);
- Some((worktree, worktree.entry_for_id(selection.entry_id)?))
+ let worktree = project.worktree_for_id(selection.worktree_id, cx)?;
+ let entry = worktree.read(cx).entry_for_id(selection.entry_id)?;
+ Some((worktree, entry))
}
fn update_visible_entries(
@@ -1058,29 +1072,31 @@ impl ProjectPanel {
entry_id: ProjectEntryId,
cx: &mut ViewContext<Self>,
) {
- let project = self.project.read(cx);
- if let Some((worktree, expanded_dir_ids)) = project
- .worktree_for_id(worktree_id, cx)
- .zip(self.expanded_dir_ids.get_mut(&worktree_id))
- {
- let worktree = worktree.read(cx);
+ self.project.update(cx, |project, cx| {
+ if let Some((worktree, expanded_dir_ids)) = project
+ .worktree_for_id(worktree_id, cx)
+ .zip(self.expanded_dir_ids.get_mut(&worktree_id))
+ {
+ project.mark_entry_expanded(worktree_id, entry_id, cx);
+ let worktree = worktree.read(cx);
- if let Some(mut entry) = worktree.entry_for_id(entry_id) {
- loop {
- if let Err(ix) = expanded_dir_ids.binary_search(&entry.id) {
- expanded_dir_ids.insert(ix, entry.id);
- }
+ if let Some(mut entry) = worktree.entry_for_id(entry_id) {
+ loop {
+ if let Err(ix) = expanded_dir_ids.binary_search(&entry.id) {
+ expanded_dir_ids.insert(ix, entry.id);
+ }
- if let Some(parent_entry) =
- entry.path.parent().and_then(|p| worktree.entry_for_path(p))
- {
- entry = parent_entry;
- } else {
- break;
+ if let Some(parent_entry) =
+ entry.path.parent().and_then(|p| worktree.entry_for_path(p))
+ {
+ entry = parent_entry;
+ } else {
+ break;
+ }
}
}
}
- }
+ });
}
fn for_each_visible_entry(
@@ -62,6 +62,8 @@ message Envelope {
RenameProjectEntry rename_project_entry = 46;
CopyProjectEntry copy_project_entry = 47;
DeleteProjectEntry delete_project_entry = 48;
+ ExpandProjectEntry expand_project_entry = 114;
+ CollapseProjectEntry collapse_project_entry = 115;
ProjectEntryResponse project_entry_response = 49;
UpdateDiagnosticSummary update_diagnostic_summary = 50;
@@ -372,6 +374,16 @@ message DeleteProjectEntry {
uint64 entry_id = 2;
}
+message ExpandProjectEntry {
+ uint64 project_id = 1;
+ uint64 entry_id = 2;
+}
+
+message CollapseProjectEntry {
+ uint64 project_id = 1;
+ uint64 entry_id = 2;
+}
+
message ProjectEntryResponse {
Entry entry = 1;
uint64 worktree_scan_id = 2;
@@ -150,6 +150,8 @@ messages!(
(DeclineCall, Foreground),
(DeleteProjectEntry, Foreground),
(Error, Foreground),
+ (ExpandProjectEntry, Foreground),
+ (CollapseProjectEntry, Foreground),
(Follow, Foreground),
(FollowResponse, Foreground),
(FormatBuffers, Foreground),
@@ -255,6 +257,8 @@ request_messages!(
(CreateRoom, CreateRoomResponse),
(DeclineCall, Ack),
(DeleteProjectEntry, ProjectEntryResponse),
+ (ExpandProjectEntry, Ack),
+ (CollapseProjectEntry, Ack),
(Follow, FollowResponse),
(FormatBuffers, FormatBuffersResponse),
(GetChannelMessages, GetChannelMessagesResponse),
@@ -311,6 +315,8 @@ entity_messages!(
CreateBufferForPeer,
CreateProjectEntry,
DeleteProjectEntry,
+ ExpandProjectEntry,
+ CollapseProjectEntry,
Follow,
FormatBuffers,
GetCodeActions,
@@ -6,4 +6,4 @@ pub use conn::Connection;
pub use peer::*;
mod macros;
-pub const PROTOCOL_VERSION: u32 = 58;
+pub const PROTOCOL_VERSION: u32 = 59;