@@ -8,7 +8,9 @@ use clock::ReplicaId;
use collections::HashMap;
use futures::Future;
use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
-use gpui::{AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
+use gpui::{
+ AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
+};
use language::{Buffer, DiagnosticEntry, LanguageRegistry};
use lsp::DiagnosticSeverity;
use postage::{prelude::Stream, watch};
@@ -42,6 +44,7 @@ enum ProjectClientState {
_maintain_remote_id_task: Task<Option<()>>,
},
Remote {
+ sharing_has_stopped: bool,
remote_id: u64,
replica_id: ReplicaId,
},
@@ -106,59 +109,61 @@ pub struct ProjectEntry {
impl Project {
pub fn local(
- languages: Arc<LanguageRegistry>,
client: Arc<Client>,
user_store: ModelHandle<UserStore>,
+ languages: Arc<LanguageRegistry>,
fs: Arc<dyn Fs>,
- cx: &mut ModelContext<Self>,
- ) -> Self {
- let (remote_id_tx, remote_id_rx) = watch::channel();
- let _maintain_remote_id_task = cx.spawn_weak({
- let rpc = client.clone();
- move |this, mut cx| {
- async move {
- let mut status = rpc.status();
- while let Some(status) = status.recv().await {
- if let Some(this) = this.upgrade(&cx) {
- let remote_id = if let client::Status::Connected { .. } = status {
- let response = rpc.request(proto::RegisterProject {}).await?;
- Some(response.project_id)
- } else {
- None
- };
- this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
+ cx: &mut MutableAppContext,
+ ) -> ModelHandle<Self> {
+ cx.add_model(|cx: &mut ModelContext<Self>| {
+ let (remote_id_tx, remote_id_rx) = watch::channel();
+ let _maintain_remote_id_task = cx.spawn_weak({
+ let rpc = client.clone();
+ move |this, mut cx| {
+ async move {
+ let mut status = rpc.status();
+ while let Some(status) = status.recv().await {
+ if let Some(this) = this.upgrade(&cx) {
+ let remote_id = if let client::Status::Connected { .. } = status {
+ let response = rpc.request(proto::RegisterProject {}).await?;
+ Some(response.project_id)
+ } else {
+ None
+ };
+ this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
+ }
}
+ Ok(())
}
- Ok(())
+ .log_err()
}
- .log_err()
- }
- });
+ });
- Self {
- worktrees: Default::default(),
- collaborators: Default::default(),
- client_state: ProjectClientState::Local {
- is_shared: false,
- remote_id_tx,
- remote_id_rx,
- _maintain_remote_id_task,
- },
- subscriptions: Vec::new(),
- active_worktree: None,
- active_entry: None,
- languages,
- client,
- user_store,
- fs,
- }
+ Self {
+ worktrees: Default::default(),
+ collaborators: Default::default(),
+ client_state: ProjectClientState::Local {
+ is_shared: false,
+ remote_id_tx,
+ remote_id_rx,
+ _maintain_remote_id_task,
+ },
+ subscriptions: Vec::new(),
+ active_worktree: None,
+ active_entry: None,
+ languages,
+ client,
+ user_store,
+ fs,
+ }
+ })
}
- pub async fn open_remote(
+ pub async fn remote(
remote_id: u64,
- languages: Arc<LanguageRegistry>,
client: Arc<Client>,
user_store: ModelHandle<UserStore>,
+ languages: Arc<LanguageRegistry>,
fs: Arc<dyn Fs>,
cx: &mut AsyncAppContext,
) -> Result<ModelHandle<Self>> {
@@ -211,6 +216,7 @@ impl Project {
user_store,
fs,
subscriptions: vec![
+ client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
@@ -221,6 +227,7 @@ impl Project {
],
client,
client_state: ProjectClientState::Remote {
+ sharing_has_stopped: false,
remote_id,
replica_id,
},
@@ -252,6 +259,27 @@ impl Project {
}
}
+ pub fn next_remote_id(&self) -> impl Future<Output = u64> {
+ let mut id = None;
+ let mut watch = None;
+ match &self.client_state {
+ ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
+ ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
+ }
+
+ async move {
+ if let Some(id) = id {
+ return id;
+ }
+ let mut watch = watch.unwrap();
+ loop {
+ if let Some(Some(id)) = watch.recv().await {
+ return id;
+ }
+ }
+ }
+ }
+
pub fn replica_id(&self) -> ReplicaId {
match &self.client_state {
ProjectClientState::Local { .. } => 0,
@@ -277,7 +305,7 @@ impl Project {
pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
let rpc = self.client.clone();
cx.spawn(|this, mut cx| async move {
- let remote_id = this.update(&mut cx, |this, _| {
+ let project_id = this.update(&mut cx, |this, _| {
if let ProjectClientState::Local {
is_shared,
remote_id_rx,
@@ -285,25 +313,22 @@ impl Project {
} = &mut this.client_state
{
*is_shared = true;
- Ok(*remote_id_rx.borrow())
+ remote_id_rx
+ .borrow()
+ .ok_or_else(|| anyhow!("no project id"))
} else {
Err(anyhow!("can't share a remote project"))
}
})?;
- let remote_id = remote_id.ok_or_else(|| anyhow!("no project id"))?;
- rpc.send(proto::ShareProject {
- project_id: remote_id,
- })
- .await?;
-
+ rpc.send(proto::ShareProject { project_id }).await?;
this.update(&mut cx, |this, cx| {
for worktree in &this.worktrees {
worktree.update(cx, |worktree, cx| {
worktree
.as_local_mut()
.unwrap()
- .share(remote_id, cx)
+ .share(project_id, cx)
.detach();
});
}
@@ -312,6 +337,41 @@ impl Project {
})
}
+ pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
+ let rpc = self.client.clone();
+ cx.spawn(|this, mut cx| async move {
+ let project_id = this.update(&mut cx, |this, _| {
+ if let ProjectClientState::Local {
+ is_shared,
+ remote_id_rx,
+ ..
+ } = &mut this.client_state
+ {
+ *is_shared = true;
+ remote_id_rx
+ .borrow()
+ .ok_or_else(|| anyhow!("no project id"))
+ } else {
+ Err(anyhow!("can't share a remote project"))
+ }
+ })?;
+
+ rpc.send(proto::UnshareProject { project_id }).await?;
+
+ Ok(())
+ })
+ }
+
+ pub fn is_read_only(&self) -> bool {
+ match &self.client_state {
+ ProjectClientState::Local { .. } => false,
+ ProjectClientState::Remote {
+ sharing_has_stopped,
+ ..
+ } => *sharing_has_stopped,
+ }
+ }
+
pub fn open_buffer(
&self,
path: ProjectPath,
@@ -333,14 +393,14 @@ impl Project {
pub fn add_local_worktree(
&mut self,
- abs_path: &Path,
+ abs_path: impl AsRef<Path>,
cx: &mut ModelContext<Self>,
) -> Task<Result<ModelHandle<Worktree>>> {
let fs = self.fs.clone();
let client = self.client.clone();
let user_store = self.user_store.clone();
let languages = self.languages.clone();
- let path = Arc::from(abs_path);
+ let path = Arc::from(abs_path.as_ref());
cx.spawn(|project, mut cx| async move {
let worktree =
Worktree::open_local(client.clone(), user_store, path, fs, languages, &mut cx)
@@ -352,10 +412,12 @@ impl Project {
});
if let Some(project_id) = remote_project_id {
+ let worktree_id = worktree.id() as u64;
let register_message = worktree.update(&mut cx, |worktree, _| {
let worktree = worktree.as_local_mut().unwrap();
proto::RegisterWorktree {
project_id,
+ worktree_id,
root_name: worktree.root_name().to_string(),
authorized_logins: worktree.authorized_logins(),
}
@@ -432,6 +494,25 @@ impl Project {
// RPC message handlers
+ fn handle_unshare_project(
+ &mut self,
+ _: TypedEnvelope<proto::UnshareProject>,
+ _: Arc<Client>,
+ cx: &mut ModelContext<Self>,
+ ) -> Result<()> {
+ if let ProjectClientState::Remote {
+ sharing_has_stopped,
+ ..
+ } = &mut self.client_state
+ {
+ *sharing_has_stopped = true;
+ cx.notify();
+ Ok(())
+ } else {
+ unreachable!()
+ }
+ }
+
fn handle_add_collaborator(
&mut self,
mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
@@ -812,6 +893,6 @@ mod tests {
let client = client::Client::new();
let http_client = FakeHttpClient::new(|_| async move { Ok(ServerResponse::new(404)) });
let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
- cx.add_model(|cx| Project::local(languages, client, user_store, fs, cx))
+ cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
}
}
@@ -43,6 +43,7 @@ pub struct Server {
const MESSAGE_COUNT_PER_PAGE: usize = 100;
const MAX_MESSAGE_LEN: usize = 1024;
+const NO_SUCH_PROJECT: &'static str = "no such project";
impl Server {
pub fn new(
@@ -60,12 +61,15 @@ impl Server {
server
.add_handler(Server::ping)
+ .add_handler(Server::register_project)
+ .add_handler(Server::unregister_project)
+ .add_handler(Server::share_project)
+ .add_handler(Server::unshare_project)
+ .add_handler(Server::join_project)
+ .add_handler(Server::leave_project)
.add_handler(Server::register_worktree)
.add_handler(Server::unregister_worktree)
.add_handler(Server::share_worktree)
- .add_handler(Server::unshare_worktree)
- .add_handler(Server::join_worktree)
- .add_handler(Server::leave_worktree)
.add_handler(Server::update_worktree)
.add_handler(Server::open_buffer)
.add_handler(Server::close_buffer)
@@ -207,162 +211,85 @@ impl Server {
Ok(())
}
- async fn register_worktree(
+ async fn register_project(
mut self: Arc<Server>,
- request: TypedEnvelope<proto::RegisterWorktree>,
+ request: TypedEnvelope<proto::RegisterProject>,
) -> tide::Result<()> {
- let receipt = request.receipt();
- let host_user_id = self.state().user_id_for_connection(request.sender_id)?;
-
- let mut contact_user_ids = HashSet::default();
- contact_user_ids.insert(host_user_id);
- for github_login in request.payload.authorized_logins {
- match self.app_state.db.create_user(&github_login, false).await {
- Ok(contact_user_id) => {
- contact_user_ids.insert(contact_user_id);
- }
- Err(err) => {
- let message = err.to_string();
- self.peer
- .respond_with_error(receipt, proto::Error { message })
- .await?;
- return Ok(());
- }
- }
- }
-
- let contact_user_ids = contact_user_ids.into_iter().collect::<Vec<_>>();
- let ok = self.state_mut().register_worktree(
- request.project_id,
- request.worktree_id,
- Worktree {
- authorized_user_ids: contact_user_ids.clone(),
- root_name: request.payload.root_name,
- },
- );
-
- if ok {
- self.peer.respond(receipt, proto::Ack {}).await?;
- self.update_contacts_for_users(&contact_user_ids).await?;
- } else {
- self.peer
- .respond_with_error(
- receipt,
- proto::Error {
- message: "no such project".to_string(),
- },
- )
- .await?;
- }
-
+ let mut state = self.state_mut();
+ let user_id = state.user_id_for_connection(request.sender_id)?;
+ state.register_project(request.sender_id, user_id);
Ok(())
}
- async fn unregister_worktree(
+ async fn unregister_project(
mut self: Arc<Server>,
- request: TypedEnvelope<proto::UnregisterWorktree>,
+ request: TypedEnvelope<proto::UnregisterProject>,
) -> tide::Result<()> {
- let project_id = request.payload.project_id;
- let worktree_id = request.payload.worktree_id;
- let worktree =
- self.state_mut()
- .unregister_worktree(project_id, worktree_id, request.sender_id)?;
-
- if let Some(share) = worktree.share {
- broadcast(
- request.sender_id,
- share.guests.keys().copied().collect(),
- |conn_id| {
- self.peer.send(
- conn_id,
- proto::UnregisterWorktree {
- project_id,
- worktree_id,
- },
- )
- },
- )
- .await?;
- }
- self.update_contacts_for_users(&worktree.authorized_user_ids)
- .await?;
+ self.state_mut()
+ .unregister_project(request.payload.project_id, request.sender_id);
Ok(())
}
- async fn share_worktree(
+ async fn share_project(
mut self: Arc<Server>,
- mut request: TypedEnvelope<proto::ShareWorktree>,
+ request: TypedEnvelope<proto::ShareProject>,
) -> tide::Result<()> {
- let worktree = request
- .payload
- .worktree
- .as_mut()
- .ok_or_else(|| anyhow!("missing worktree"))?;
- let entries = mem::take(&mut worktree.entries)
- .into_iter()
- .map(|entry| (entry.id, entry))
- .collect();
-
- let contact_user_ids =
- self.state_mut()
- .share_worktree(worktree.id, request.sender_id, entries);
- if let Some(contact_user_ids) = contact_user_ids {
- self.peer
- .respond(request.receipt(), proto::ShareWorktreeResponse {})
- .await?;
- self.update_contacts_for_users(&contact_user_ids).await?;
- } else {
- self.peer
- .respond_with_error(
- request.receipt(),
- proto::Error {
- message: "no such worktree".to_string(),
- },
- )
- .await?;
- }
+ self.state_mut()
+ .share_project(request.payload.project_id, request.sender_id);
Ok(())
}
- async fn unshare_worktree(
+ async fn unshare_project(
mut self: Arc<Server>,
- request: TypedEnvelope<proto::UnshareWorktree>,
+ request: TypedEnvelope<proto::UnshareProject>,
) -> tide::Result<()> {
- let worktree_id = request.payload.worktree_id;
- let worktree = self
+ let project_id = request.payload.project_id;
+ let project = self
.state_mut()
- .unshare_worktree(worktree_id, request.sender_id)?;
+ .unshare_project(project_id, request.sender_id)?;
- broadcast(request.sender_id, worktree.connection_ids, |conn_id| {
+ broadcast(request.sender_id, project.connection_ids, |conn_id| {
self.peer
- .send(conn_id, proto::UnshareWorktree { worktree_id })
+ .send(conn_id, proto::UnshareProject { project_id })
})
.await?;
- self.update_contacts_for_users(&worktree.authorized_user_ids)
+ self.update_contacts_for_users(&project.authorized_user_ids)
.await?;
Ok(())
}
- async fn join_worktree(
+ async fn join_project(
mut self: Arc<Server>,
- request: TypedEnvelope<proto::JoinWorktree>,
+ request: TypedEnvelope<proto::JoinProject>,
) -> tide::Result<()> {
- let worktree_id = request.payload.worktree_id;
+ let project_id = request.payload.project_id;
let user_id = self.state().user_id_for_connection(request.sender_id)?;
let response_data = self
.state_mut()
- .join_worktree(request.sender_id, user_id, worktree_id)
+ .join_project(request.sender_id, user_id, project_id)
.and_then(|joined| {
- let share = joined.worktree.share()?;
+ let share = joined.project.share()?;
let peer_count = share.guests.len();
let mut collaborators = Vec::with_capacity(peer_count);
collaborators.push(proto::Collaborator {
- peer_id: joined.worktree.host_connection_id.0,
+ peer_id: joined.project.host_connection_id.0,
replica_id: 0,
- user_id: joined.worktree.host_user_id.to_proto(),
+ user_id: joined.project.host_user_id.to_proto(),
});
+ let worktrees = joined
+ .project
+ .worktrees
+ .values()
+ .filter_map(|worktree| {
+ worktree.share.as_ref().map(|share| proto::Worktree {
+ id: project_id,
+ root_name: worktree.root_name.clone(),
+ entries: share.entries.values().cloned().collect(),
+ })
+ })
+ .collect();
for (peer_conn_id, (peer_replica_id, peer_user_id)) in &share.guests {
if *peer_conn_id != request.sender_id {
collaborators.push(proto::Collaborator {
@@ -372,17 +299,13 @@ impl Server {
});
}
}
- let response = proto::JoinWorktreeResponse {
- worktree: Some(proto::Worktree {
- id: worktree_id,
- root_name: joined.worktree.root_name.clone(),
- entries: share.entries.values().cloned().collect(),
- }),
+ let response = proto::JoinProjectResponse {
+ worktrees,
replica_id: joined.replica_id as u32,
collaborators,
};
- let connection_ids = joined.worktree.connection_ids();
- let contact_user_ids = joined.worktree.authorized_user_ids.clone();
+ let connection_ids = joined.project.connection_ids();
+ let contact_user_ids = joined.project.authorized_user_ids();
Ok((response, connection_ids, contact_user_ids))
});
@@ -391,8 +314,8 @@ impl Server {
broadcast(request.sender_id, connection_ids, |conn_id| {
self.peer.send(
conn_id,
- proto::AddCollaborator {
- worktree_id,
+ proto::AddProjectCollaborator {
+ project_id: project_id,
collaborator: Some(proto::Collaborator {
peer_id: request.sender_id.0,
replica_id: response.replica_id,
@@ -420,19 +343,19 @@ impl Server {
Ok(())
}
- async fn leave_worktree(
+ async fn leave_project(
mut self: Arc<Server>,
- request: TypedEnvelope<proto::LeaveWorktree>,
+ request: TypedEnvelope<proto::LeaveProject>,
) -> tide::Result<()> {
let sender_id = request.sender_id;
- let worktree_id = request.payload.worktree_id;
- let worktree = self.state_mut().leave_worktree(sender_id, worktree_id);
+ let project_id = request.payload.project_id;
+ let worktree = self.state_mut().leave_project(sender_id, project_id);
if let Some(worktree) = worktree {
broadcast(sender_id, worktree.connection_ids, |conn_id| {
self.peer.send(
conn_id,
- proto::RemoveCollaborator {
- worktree_id,
+ proto::RemoveProjectCollaborator {
+ project_id,
peer_id: sender_id.0,
},
)
@@ -444,16 +367,133 @@ impl Server {
Ok(())
}
+ async fn register_worktree(
+ mut self: Arc<Server>,
+ request: TypedEnvelope<proto::RegisterWorktree>,
+ ) -> tide::Result<()> {
+ let receipt = request.receipt();
+ let host_user_id = self.state().user_id_for_connection(request.sender_id)?;
+
+ let mut contact_user_ids = HashSet::default();
+ contact_user_ids.insert(host_user_id);
+ for github_login in request.payload.authorized_logins {
+ match self.app_state.db.create_user(&github_login, false).await {
+ Ok(contact_user_id) => {
+ contact_user_ids.insert(contact_user_id);
+ }
+ Err(err) => {
+ let message = err.to_string();
+ self.peer
+ .respond_with_error(receipt, proto::Error { message })
+ .await?;
+ return Ok(());
+ }
+ }
+ }
+
+ let contact_user_ids = contact_user_ids.into_iter().collect::<Vec<_>>();
+ let ok = self.state_mut().register_worktree(
+ request.payload.project_id,
+ request.payload.worktree_id,
+ Worktree {
+ authorized_user_ids: contact_user_ids.clone(),
+ root_name: request.payload.root_name,
+ share: None,
+ },
+ );
+
+ if ok {
+ self.peer.respond(receipt, proto::Ack {}).await?;
+ self.update_contacts_for_users(&contact_user_ids).await?;
+ } else {
+ self.peer
+ .respond_with_error(
+ receipt,
+ proto::Error {
+ message: NO_SUCH_PROJECT.to_string(),
+ },
+ )
+ .await?;
+ }
+
+ Ok(())
+ }
+
+ async fn unregister_worktree(
+ mut self: Arc<Server>,
+ request: TypedEnvelope<proto::UnregisterWorktree>,
+ ) -> tide::Result<()> {
+ let project_id = request.payload.project_id;
+ let worktree_id = request.payload.worktree_id;
+ let (worktree, guest_connection_ids) =
+ self.state_mut()
+ .unregister_worktree(project_id, worktree_id, request.sender_id)?;
+
+ broadcast(request.sender_id, guest_connection_ids, |conn_id| {
+ self.peer.send(
+ conn_id,
+ proto::UnregisterWorktree {
+ project_id,
+ worktree_id,
+ },
+ )
+ })
+ .await?;
+ self.update_contacts_for_users(&worktree.authorized_user_ids)
+ .await?;
+ Ok(())
+ }
+
+ async fn share_worktree(
+ mut self: Arc<Server>,
+ mut request: TypedEnvelope<proto::ShareWorktree>,
+ ) -> tide::Result<()> {
+ let worktree = request
+ .payload
+ .worktree
+ .as_mut()
+ .ok_or_else(|| anyhow!("missing worktree"))?;
+ let entries = mem::take(&mut worktree.entries)
+ .into_iter()
+ .map(|entry| (entry.id, entry))
+ .collect();
+
+ let contact_user_ids = self.state_mut().share_worktree(
+ request.payload.project_id,
+ worktree.id,
+ request.sender_id,
+ entries,
+ );
+ if let Some(contact_user_ids) = contact_user_ids {
+ self.peer.respond(request.receipt(), proto::Ack {}).await?;
+ self.update_contacts_for_users(&contact_user_ids).await?;
+ } else {
+ self.peer
+ .respond_with_error(
+ request.receipt(),
+ proto::Error {
+ message: "no such worktree".to_string(),
+ },
+ )
+ .await?;
+ }
+ Ok(())
+ }
+
async fn update_worktree(
mut self: Arc<Server>,
request: TypedEnvelope<proto::UpdateWorktree>,
) -> tide::Result<()> {
- let connection_ids = self.state_mut().update_worktree(
- request.sender_id,
- request.payload.worktree_id,
- &request.payload.removed_entries,
- &request.payload.updated_entries,
- )?;
+ let connection_ids = self
+ .state_mut()
+ .update_worktree(
+ request.sender_id,
+ request.payload.project_id,
+ request.payload.worktree_id,
+ &request.payload.removed_entries,
+ &request.payload.updated_entries,
+ )
+ .ok_or_else(|| anyhow!("no such worktree"))?;
broadcast(request.sender_id, connection_ids, |connection_id| {
self.peer
@@ -471,7 +511,9 @@ impl Server {
let receipt = request.receipt();
let host_connection_id = self
.state()
- .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
+ .read_project(request.payload.project_id, request.sender_id)
+ .ok_or_else(|| anyhow!(NO_SUCH_PROJECT))?
+ .host_connection_id;
let response = self
.peer
.forward_request(request.sender_id, host_connection_id, request.payload)
@@ -486,7 +528,9 @@ impl Server {
) -> tide::Result<()> {
let host_connection_id = self
.state()
- .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
+ .read_project(request.payload.project_id, request.sender_id)
+ .ok_or_else(|| anyhow!(NO_SUCH_PROJECT))?
+ .host_connection_id;
self.peer
.forward_send(request.sender_id, host_connection_id, request.payload)
.await?;
@@ -501,10 +545,11 @@ impl Server {
let guests;
{
let state = self.state();
- host = state
- .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
- guests = state
- .worktree_guest_connection_ids(request.sender_id, request.payload.worktree_id)?;
+ let project = state
+ .read_project(request.payload.project_id, request.sender_id)
+ .ok_or_else(|| anyhow!(NO_SUCH_PROJECT))?;
+ host = project.host_connection_id;
+ guests = project.guest_connection_ids()
}
let sender = request.sender_id;
@@ -536,7 +581,8 @@ impl Server {
) -> tide::Result<()> {
let receiver_ids = self
.state()
- .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?;
+ .project_connection_ids(request.payload.project_id, request.sender_id)
+ .ok_or_else(|| anyhow!(NO_SUCH_PROJECT))?;
broadcast(request.sender_id, receiver_ids, |connection_id| {
self.peer
.forward_send(request.sender_id, connection_id, request.payload.clone())
@@ -552,7 +598,8 @@ impl Server {
) -> tide::Result<()> {
let receiver_ids = self
.state()
- .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?;
+ .project_connection_ids(request.payload.project_id, request.sender_id)
+ .ok_or_else(|| anyhow!(NO_SUCH_PROJECT))?;
broadcast(request.sender_id, receiver_ids, |connection_id| {
self.peer
.forward_send(request.sender_id, connection_id, request.payload.clone())
@@ -959,7 +1006,6 @@ mod tests {
self, test::FakeHttpClient, Channel, ChannelDetails, ChannelList, Client, Credentials,
EstablishConnectionError, UserStore,
},
- contacts_panel::JoinWorktree,
editor::{Editor, EditorSettings, Input, MultiBuffer},
fs::{FakeFs, Fs as _},
language::{
@@ -967,25 +1013,22 @@ mod tests {
LanguageRegistry, LanguageServerConfig, Point,
},
lsp,
- project::{ProjectPath, Worktree},
- test::test_app_state,
- workspace::Workspace,
+ project::Project,
};
#[gpui::test]
- async fn test_share_worktree(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
+ async fn test_share_project(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
let (window_b, _) = cx_b.add_window(|_| EmptyView);
let lang_registry = Arc::new(LanguageRegistry::new());
+ let fs = Arc::new(FakeFs::new());
+ cx_a.foreground().forbid_parking();
// Connect to a server as 2 clients.
let mut server = TestServer::start().await;
let client_a = server.create_client(&mut cx_a, "user_a").await;
let client_b = server.create_client(&mut cx_b, "user_b").await;
- cx_a.foreground().forbid_parking();
-
- // Share a local worktree as client A
- let fs = Arc::new(FakeFs::new());
+ // Share a project as client A
fs.insert_tree(
"/a",
json!({
@@ -995,47 +1038,56 @@ mod tests {
}),
)
.await;
- let worktree_a = Worktree::open_local(
- client_a.clone(),
- client_a.user_store.clone(),
- "/a".as_ref(),
- fs,
- lang_registry.clone(),
- &mut cx_a.to_async(),
- )
- .await
- .unwrap();
+ let project_a = cx_a.update(|cx| {
+ Project::local(
+ client_a.clone(),
+ client_a.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
+ cx,
+ )
+ });
+ let worktree_a = project_a
+ .update(&mut cx_a, |p, cx| p.add_local_worktree("/a", cx))
+ .await
+ .unwrap();
worktree_a
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
.await;
- let worktree_id = worktree_a
- .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
+ let project_id = project_a
+ .update(&mut cx_a, |project, _| project.next_remote_id())
+ .await;
+ project_a
+ .update(&mut cx_a, |project, cx| project.share(cx))
.await
.unwrap();
- // Join that worktree as client B, and see that a guest has joined as client A.
- let worktree_b = Worktree::open_remote(
+ // Join that project as client B, and see that a guest has joined as client A.
+ let project_b = Project::remote(
+ project_id,
client_b.clone(),
- worktree_id,
- lang_registry.clone(),
client_b.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
&mut cx_b.to_async(),
)
.await
.unwrap();
+ let worktree_b = project_b.update(&mut cx_b, |p, _| p.worktrees()[0].clone());
- let replica_id_b = worktree_b.read_with(&cx_b, |tree, _| {
+ let replica_id_b = project_b.read_with(&cx_b, |project, _| {
assert_eq!(
- tree.collaborators()
+ project
+ .collaborators()
.get(&client_a.peer_id)
.unwrap()
.user
.github_login,
"user_a"
);
- tree.replica_id()
+ project.replica_id()
});
- worktree_a
+ project_a
.condition(&cx_a, |tree, _| {
tree.collaborators()
.get(&client_b.peer_id)
@@ -1093,30 +1145,24 @@ mod tests {
// Dropping the worktree removes client B from client A's collaborators.
cx_b.update(move |_| drop(worktree_b));
- worktree_a
- .condition(&cx_a, |tree, _| tree.collaborators().is_empty())
+ project_a
+ .condition(&cx_a, |project, _| project.collaborators().is_empty())
.await;
}
#[gpui::test]
- async fn test_unshare_worktree(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
+ async fn test_unshare_project(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
cx_b.update(zed::contacts_panel::init);
- let mut app_state_a = cx_a.update(test_app_state);
- let mut app_state_b = cx_b.update(test_app_state);
+ let lang_registry = Arc::new(LanguageRegistry::new());
+ let fs = Arc::new(FakeFs::new());
// Connect to a server as 2 clients.
let mut server = TestServer::start().await;
let client_a = server.create_client(&mut cx_a, "user_a").await;
let client_b = server.create_client(&mut cx_b, "user_b").await;
- Arc::get_mut(&mut app_state_a).unwrap().client = client_a.clone();
- Arc::get_mut(&mut app_state_a).unwrap().user_store = client_a.user_store.clone();
- Arc::get_mut(&mut app_state_b).unwrap().client = client_b.clone();
- Arc::get_mut(&mut app_state_b).unwrap().user_store = client_b.user_store.clone();
-
cx_a.foreground().forbid_parking();
- // Share a local worktree as client A
- let fs = Arc::new(FakeFs::new());
+ // Share a project as client A
fs.insert_tree(
"/a",
json!({
@@ -1126,71 +1172,55 @@ mod tests {
}),
)
.await;
- let worktree_a = Worktree::open_local(
- app_state_a.client.clone(),
- app_state_a.user_store.clone(),
- "/a".as_ref(),
- fs,
- app_state_a.languages.clone(),
- &mut cx_a.to_async(),
- )
- .await
- .unwrap();
+ let project_a = cx_a.update(|cx| {
+ Project::local(
+ client_a.clone(),
+ client_a.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
+ cx,
+ )
+ });
+ let worktree_a = project_a
+ .update(&mut cx_a, |p, cx| p.add_local_worktree("/a", cx))
+ .await
+ .unwrap();
worktree_a
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
.await;
-
- let remote_worktree_id = worktree_a
- .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
+ let project_id = project_a
+ .update(&mut cx_a, |project, _| project.next_remote_id())
+ .await;
+ project_a
+ .update(&mut cx_a, |project, cx| project.share(cx))
.await
.unwrap();
- let (window_b, workspace_b) =
- cx_b.add_window(|cx| Workspace::new(&app_state_b.as_ref().into(), cx));
- cx_b.update(|cx| {
- cx.dispatch_action(
- window_b,
- vec![workspace_b.id()],
- &JoinWorktree(remote_worktree_id),
- );
- });
- workspace_b
- .condition(&cx_b, |workspace, cx| workspace.worktrees(cx).len() == 1)
- .await;
+ // Join that project as client B
+ let project_b = Project::remote(
+ project_id,
+ client_b.clone(),
+ client_b.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
+ &mut cx_b.to_async(),
+ )
+ .await
+ .unwrap();
- let local_worktree_id_b = workspace_b.read_with(&cx_b, |workspace, cx| {
- let active_pane = workspace.active_pane().read(cx);
- assert!(active_pane.active_item().is_none());
- workspace.worktrees(cx).first().unwrap().id()
- });
- workspace_b
- .update(&mut cx_b, |workspace, cx| {
- workspace.open_entry(
- ProjectPath {
- worktree_id: local_worktree_id_b,
- path: Path::new("a.txt").into(),
- },
- cx,
- )
- })
- .unwrap()
+ let worktree_b = project_b.read_with(&cx_b, |p, _| p.worktrees()[0].clone());
+ worktree_b
+ .update(&mut cx_b, |tree, cx| tree.open_buffer("a.txt", cx))
.await
.unwrap();
- workspace_b.read_with(&cx_b, |workspace, cx| {
- let active_pane = workspace.active_pane().read(cx);
- assert!(active_pane.active_item().is_some());
- });
- worktree_a.update(&mut cx_a, |tree, cx| {
- tree.as_local_mut().unwrap().unshare(cx);
- });
- workspace_b
- .condition(&cx_b, |workspace, cx| workspace.worktrees(cx).len() == 0)
+ project_a
+ .update(&mut cx_a, |project, cx| project.unshare(cx))
+ .await
+ .unwrap();
+ project_b
+ .condition(&mut cx_b, |project, _| project.is_read_only())
.await;
- workspace_b.read_with(&cx_b, |workspace, cx| {
- let active_pane = workspace.active_pane().read(cx);
- assert!(active_pane.active_item().is_none());
- });
}
#[gpui::test]
@@ -1201,6 +1231,7 @@ mod tests {
) {
cx_a.foreground().forbid_parking();
let lang_registry = Arc::new(LanguageRegistry::new());
+ let fs = Arc::new(FakeFs::new());
// Connect to a server as 3 clients.
let mut server = TestServer::start().await;
@@ -1208,8 +1239,6 @@ mod tests {
let client_b = server.create_client(&mut cx_b, "user_b").await;
let client_c = server.create_client(&mut cx_c, "user_c").await;
- let fs = Arc::new(FakeFs::new());
-
// Share a worktree as client A.
fs.insert_tree(
"/a",
@@ -1220,46 +1249,55 @@ mod tests {
}),
)
.await;
-
- let worktree_a = Worktree::open_local(
- client_a.clone(),
- client_a.user_store.clone(),
- "/a".as_ref(),
- fs.clone(),
- lang_registry.clone(),
- &mut cx_a.to_async(),
- )
- .await
- .unwrap();
+ let project_a = cx_a.update(|cx| {
+ Project::local(
+ client_a.clone(),
+ client_a.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
+ cx,
+ )
+ });
+ let worktree_a = project_a
+ .update(&mut cx_a, |p, cx| p.add_local_worktree("/a", cx))
+ .await
+ .unwrap();
worktree_a
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
.await;
- let worktree_id = worktree_a
- .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
+ let project_id = project_a
+ .update(&mut cx_a, |project, _| project.next_remote_id())
+ .await;
+ project_a
+ .update(&mut cx_a, |project, cx| project.share(cx))
.await
.unwrap();
// Join that worktree as clients B and C.
- let worktree_b = Worktree::open_remote(
+ let project_b = Project::remote(
+ project_id,
client_b.clone(),
- worktree_id,
- lang_registry.clone(),
client_b.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
&mut cx_b.to_async(),
)
.await
.unwrap();
- let worktree_c = Worktree::open_remote(
+ let project_c = Project::remote(
+ project_id,
client_c.clone(),
- worktree_id,
- lang_registry.clone(),
client_c.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
&mut cx_c.to_async(),
)
.await
.unwrap();
// Open and edit a buffer as both guests B and C.
+ let worktree_b = project_b.read_with(&cx_b, |p, _| p.worktrees()[0].clone());
+ let worktree_c = project_c.read_with(&cx_c, |p, _| p.worktrees()[0].clone());
let buffer_b = worktree_b
.update(&mut cx_b, |tree, cx| tree.open_buffer("file1", cx))
.await
@@ -1343,14 +1381,14 @@ mod tests {
async fn test_buffer_conflict_after_save(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
cx_a.foreground().forbid_parking();
let lang_registry = Arc::new(LanguageRegistry::new());
+ let fs = Arc::new(FakeFs::new());
// Connect to a server as 2 clients.
let mut server = TestServer::start().await;
let client_a = server.create_client(&mut cx_a, "user_a").await;
let client_b = server.create_client(&mut cx_b, "user_b").await;
- // Share a local worktree as client A
- let fs = Arc::new(FakeFs::new());
+ // Share a project as client A
fs.insert_tree(
"/dir",
json!({
@@ -1360,35 +1398,44 @@ mod tests {
)
.await;
- let worktree_a = Worktree::open_local(
- client_a.clone(),
- client_a.user_store.clone(),
- "/dir".as_ref(),
- fs,
- lang_registry.clone(),
- &mut cx_a.to_async(),
- )
- .await
- .unwrap();
+ let project_a = cx_a.update(|cx| {
+ Project::local(
+ client_a.clone(),
+ client_a.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
+ cx,
+ )
+ });
+ let worktree_a = project_a
+ .update(&mut cx_a, |p, cx| p.add_local_worktree("/dir", cx))
+ .await
+ .unwrap();
worktree_a
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
.await;
- let worktree_id = worktree_a
- .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
+ let project_id = project_a
+ .update(&mut cx_a, |project, _| project.next_remote_id())
+ .await;
+ project_a
+ .update(&mut cx_a, |project, cx| project.share(cx))
.await
.unwrap();
- // Join that worktree as client B, and see that a guest has joined as client A.
- let worktree_b = Worktree::open_remote(
+ // Join that project as client B
+ let project_b = Project::remote(
+ project_id,
client_b.clone(),
- worktree_id,
- lang_registry.clone(),
client_b.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
&mut cx_b.to_async(),
)
.await
.unwrap();
+ let worktree_b = project_b.update(&mut cx_b, |p, _| p.worktrees()[0].clone());
+ // Open a buffer as client B
let buffer_b = worktree_b
.update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx))
.await
@@ -1430,14 +1477,14 @@ mod tests {
) {
cx_a.foreground().forbid_parking();
let lang_registry = Arc::new(LanguageRegistry::new());
+ let fs = Arc::new(FakeFs::new());
// Connect to a server as 2 clients.
let mut server = TestServer::start().await;
let client_a = server.create_client(&mut cx_a, "user_a").await;
let client_b = server.create_client(&mut cx_b, "user_b").await;
- // Share a local worktree as client A
- let fs = Arc::new(FakeFs::new());
+ // Share a project as client A
fs.insert_tree(
"/dir",
json!({
@@ -1446,44 +1493,56 @@ mod tests {
}),
)
.await;
- let worktree_a = Worktree::open_local(
- client_a.clone(),
- client_a.user_store.clone(),
- "/dir".as_ref(),
- fs,
- lang_registry.clone(),
- &mut cx_a.to_async(),
- )
- .await
- .unwrap();
+ let project_a = cx_a.update(|cx| {
+ Project::local(
+ client_a.clone(),
+ client_a.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
+ cx,
+ )
+ });
+ let worktree_a = project_a
+ .update(&mut cx_a, |p, cx| p.add_local_worktree("/dir", cx))
+ .await
+ .unwrap();
worktree_a
.read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
.await;
- let worktree_id = worktree_a
- .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
+ let project_id = project_a
+ .update(&mut cx_a, |project, _| project.next_remote_id())
+ .await;
+ project_a
+ .update(&mut cx_a, |project, cx| project.share(cx))
.await
.unwrap();
- // Join that worktree as client B, and see that a guest has joined as client A.
- let worktree_b = Worktree::open_remote(
+ // Join that project as client B
+ let project_b = Project::remote(
+ project_id,
client_b.clone(),
- worktree_id,
- lang_registry.clone(),
client_b.user_store.clone(),
+ lang_registry.clone(),
+ fs.clone(),
&mut cx_b.to_async(),
)
.await
.unwrap();
+ let worktree_b = project_b.update(&mut cx_b, |p, _| p.worktrees()[0].clone());
+ // Open a buffer as client A
let buffer_a = worktree_a
.update(&mut cx_a, |tree, cx| tree.open_buffer("a.txt", cx))
.await
.unwrap();
+
+ // Start opening the same buffer as client B
let buffer_b = cx_b
.background()
.spawn(worktree_b.update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx)));
-
task::yield_now().await;
+
+ // Edit the buffer as client A while client B is still opening it.
buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "z", cx));
let text = buffer_a.read_with(&cx_a, |buf, _| buf.text());
@@ -11,7 +11,7 @@ pub struct Store {
projects: HashMap<u64, Project>,
visible_projects_by_user_id: HashMap<UserId, HashSet<u64>>,
channels: HashMap<ChannelId, Channel>,
- next_worktree_id: u64,
+ next_project_id: u64,
}
struct ConnectionState {
@@ -24,19 +24,19 @@ pub struct Project {
pub host_connection_id: ConnectionId,
pub host_user_id: UserId,
pub share: Option<ProjectShare>,
- worktrees: HashMap<u64, Worktree>,
+ pub worktrees: HashMap<u64, Worktree>,
}
pub struct Worktree {
pub authorized_user_ids: Vec<UserId>,
pub root_name: String,
+ pub share: Option<WorktreeShare>,
}
#[derive(Default)]
pub struct ProjectShare {
pub guests: HashMap<ConnectionId, (ReplicaId, UserId)>,
pub active_replica_ids: HashSet<ReplicaId>,
- pub worktrees: HashMap<u64, WorktreeShare>,
}
pub struct WorktreeShare {
@@ -57,9 +57,9 @@ pub struct RemovedConnectionState {
pub contact_ids: HashSet<UserId>,
}
-pub struct JoinedWorktree<'a> {
+pub struct JoinedProject<'a> {
pub replica_id: ReplicaId,
- pub worktree: &'a Worktree,
+ pub project: &'a Project,
}
pub struct UnsharedWorktree {
@@ -67,7 +67,7 @@ pub struct UnsharedWorktree {
pub authorized_user_ids: Vec<UserId>,
}
-pub struct LeftWorktree {
+pub struct LeftProject {
pub connection_ids: Vec<ConnectionId>,
pub authorized_user_ids: Vec<UserId>,
}
@@ -114,17 +114,17 @@ impl Store {
}
let mut result = RemovedConnectionState::default();
- for worktree_id in connection.worktrees.clone() {
- if let Ok(worktree) = self.unregister_worktree(worktree_id, connection_id) {
- result
- .contact_ids
- .extend(worktree.authorized_user_ids.iter().copied());
- result.hosted_worktrees.insert(worktree_id, worktree);
- } else if let Some(worktree) = self.leave_worktree(connection_id, worktree_id) {
+ for project_id in connection.projects.clone() {
+ if let Some((project, authorized_user_ids)) =
+ self.unregister_project(project_id, connection_id)
+ {
+ result.contact_ids.extend(authorized_user_ids);
+ result.hosted_projects.insert(project_id, project);
+ } else if let Some(project) = self.leave_project(connection_id, project_id) {
result
- .guest_worktree_ids
- .insert(worktree_id, worktree.connection_ids);
- result.contact_ids.extend(worktree.authorized_user_ids);
+ .guest_project_ids
+ .insert(project_id, project.connection_ids);
+ result.contact_ids.extend(project.authorized_user_ids);
}
}
@@ -191,7 +191,7 @@ impl Store {
let project = &self.projects[project_id];
let mut guests = HashSet::default();
- if let Ok(share) = worktree.share() {
+ if let Ok(share) = project.share() {
for guest_connection_id in share.guests.keys() {
if let Ok(user_id) = self.user_id_for_connection(*guest_connection_id) {
guests.insert(user_id.to_proto());
@@ -200,6 +200,12 @@ impl Store {
}
if let Ok(host_user_id) = self.user_id_for_connection(project.host_connection_id) {
+ let mut worktree_root_names = project
+ .worktrees
+ .values()
+ .map(|worktree| worktree.root_name.clone())
+ .collect::<Vec<_>>();
+ worktree_root_names.sort_unstable();
contacts
.entry(host_user_id)
.or_insert_with(|| proto::Contact {
@@ -209,11 +215,7 @@ impl Store {
.projects
.push(proto::ProjectMetadata {
id: *project_id,
- worktree_root_names: project
- .worktrees
- .iter()
- .map(|worktree| worktree.root_name.clone())
- .collect(),
+ worktree_root_names,
is_shared: project.share.is_some(),
guests: guests.into_iter().collect(),
});
@@ -268,7 +270,20 @@ impl Store {
}
}
- pub fn unregister_project(&mut self, project_id: u64) {
+ pub fn unregister_project(
+ &mut self,
+ project_id: u64,
+ connection_id: ConnectionId,
+ ) -> Option<(Project, Vec<UserId>)> {
+ match self.projects.entry(project_id) {
+ hash_map::Entry::Occupied(e) => {
+ if e.get().host_connection_id != connection_id {
+ return None;
+ }
+ }
+ hash_map::Entry::Vacant(_) => return None,
+ }
+
todo!()
}
@@ -277,7 +292,7 @@ impl Store {
project_id: u64,
worktree_id: u64,
acting_connection_id: ConnectionId,
- ) -> tide::Result<Worktree> {
+ ) -> tide::Result<(Worktree, Vec<ConnectionId>)> {
let project = self
.projects
.get_mut(&project_id)
@@ -291,31 +306,25 @@ impl Store {
.remove(&worktree_id)
.ok_or_else(|| anyhow!("no such worktree"))?;
- if let Some(connection) = self.connections.get_mut(&project.host_connection_id) {
- connection.worktrees.remove(&worktree_id);
- }
-
- if let Some(share) = &worktree.share {
- for connection_id in share.guests.keys() {
- if let Some(connection) = self.connections.get_mut(connection_id) {
- connection.worktrees.remove(&worktree_id);
- }
- }
+ let mut guest_connection_ids = Vec::new();
+ if let Some(share) = &project.share {
+ guest_connection_ids.extend(share.guests.keys());
}
for authorized_user_id in &worktree.authorized_user_ids {
- if let Some(visible_worktrees) = self
- .visible_worktrees_by_user_id
- .get_mut(&authorized_user_id)
+ if let Some(visible_projects) =
+ self.visible_projects_by_user_id.get_mut(authorized_user_id)
{
- visible_worktrees.remove(&worktree_id);
+ if !project.has_authorized_user_id(*authorized_user_id) {
+ visible_projects.remove(&project_id);
+ }
}
}
#[cfg(test)]
self.check_invariants();
- Ok(worktree)
+ Ok((worktree, guest_connection_ids))
}
pub fn share_project(&mut self, project_id: u64, connection_id: ConnectionId) -> bool {
@@ -328,47 +337,27 @@ impl Store {
false
}
- pub fn share_worktree(
+ pub fn unshare_project(
&mut self,
project_id: u64,
- worktree_id: u64,
- connection_id: ConnectionId,
- entries: HashMap<u64, proto::Entry>,
- ) -> Option<Vec<UserId>> {
- if let Some(project) = self.projects.get_mut(&project_id) {
- if project.host_connection_id == connection_id {
- if let Some(share) = project.share.as_mut() {
- share
- .worktrees
- .insert(worktree_id, WorktreeShare { entries });
- return Some(project.authorized_user_ids());
- }
- }
- }
- None
- }
-
- pub fn unshare_worktree(
- &mut self,
- worktree_id: u64,
acting_connection_id: ConnectionId,
) -> tide::Result<UnsharedWorktree> {
- let worktree = if let Some(worktree) = self.worktrees.get_mut(&worktree_id) {
- worktree
+ let project = if let Some(project) = self.projects.get_mut(&project_id) {
+ project
} else {
- return Err(anyhow!("no such worktree"))?;
+ return Err(anyhow!("no such project"))?;
};
- if worktree.host_connection_id != acting_connection_id {
- return Err(anyhow!("not your worktree"))?;
+ if project.host_connection_id != acting_connection_id {
+ return Err(anyhow!("not your project"))?;
}
- let connection_ids = worktree.connection_ids();
- let authorized_user_ids = worktree.authorized_user_ids.clone();
- if let Some(share) = worktree.share.take() {
+ let connection_ids = project.connection_ids();
+ let authorized_user_ids = project.authorized_user_ids();
+ if let Some(share) = project.share.take() {
for connection_id in share.guests.into_keys() {
if let Some(connection) = self.connections.get_mut(&connection_id) {
- connection.worktrees.remove(&worktree_id);
+ connection.projects.remove(&project_id);
}
}
@@ -380,34 +369,51 @@ impl Store {
authorized_user_ids,
})
} else {
- Err(anyhow!("worktree is not shared"))?
+ Err(anyhow!("project is not shared"))?
+ }
+ }
+
+ pub fn share_worktree(
+ &mut self,
+ project_id: u64,
+ worktree_id: u64,
+ connection_id: ConnectionId,
+ entries: HashMap<u64, proto::Entry>,
+ ) -> Option<Vec<UserId>> {
+ let project = self.projects.get_mut(&project_id)?;
+ let worktree = project.worktrees.get_mut(&worktree_id)?;
+ if project.host_connection_id == connection_id && project.share.is_some() {
+ worktree.share = Some(WorktreeShare { entries });
+ Some(project.authorized_user_ids())
+ } else {
+ None
}
}
- pub fn join_worktree(
+ pub fn join_project(
&mut self,
connection_id: ConnectionId,
user_id: UserId,
- worktree_id: u64,
- ) -> tide::Result<JoinedWorktree> {
+ project_id: u64,
+ ) -> tide::Result<JoinedProject> {
let connection = self
.connections
.get_mut(&connection_id)
.ok_or_else(|| anyhow!("no such connection"))?;
- let worktree = self
- .worktrees
- .get_mut(&worktree_id)
- .and_then(|worktree| {
- if worktree.authorized_user_ids.contains(&user_id) {
- Some(worktree)
+ let project = self
+ .projects
+ .get_mut(&project_id)
+ .and_then(|project| {
+ if project.has_authorized_user_id(user_id) {
+ Some(project)
} else {
None
}
})
- .ok_or_else(|| anyhow!("no such worktree"))?;
+ .ok_or_else(|| anyhow!("no such project"))?;
- let share = worktree.share_mut()?;
- connection.worktrees.insert(worktree_id);
+ let share = project.share_mut()?;
+ connection.projects.insert(project_id);
let mut replica_id = 1;
while share.active_replica_ids.contains(&replica_id) {
@@ -419,33 +425,33 @@ impl Store {
#[cfg(test)]
self.check_invariants();
- Ok(JoinedWorktree {
+ Ok(JoinedProject {
replica_id,
- worktree: &self.worktrees[&worktree_id],
+ project: &self.projects[&project_id],
})
}
- pub fn leave_worktree(
+ pub fn leave_project(
&mut self,
connection_id: ConnectionId,
- worktree_id: u64,
- ) -> Option<LeftWorktree> {
- let worktree = self.worktrees.get_mut(&worktree_id)?;
- let share = worktree.share.as_mut()?;
+ project_id: u64,
+ ) -> Option<LeftProject> {
+ let project = self.projects.get_mut(&project_id)?;
+ let share = project.share.as_mut()?;
let (replica_id, _) = share.guests.remove(&connection_id)?;
share.active_replica_ids.remove(&replica_id);
if let Some(connection) = self.connections.get_mut(&connection_id) {
- connection.worktrees.remove(&worktree_id);
+ connection.projects.remove(&project_id);
}
- let connection_ids = worktree.connection_ids();
- let authorized_user_ids = worktree.authorized_user_ids.clone();
+ let connection_ids = project.connection_ids();
+ let authorized_user_ids = project.authorized_user_ids();
#[cfg(test)]
self.check_invariants();
- Some(LeftWorktree {
+ Some(LeftProject {
connection_ids,
authorized_user_ids,
})
@@ -454,115 +460,75 @@ impl Store {
pub fn update_worktree(
&mut self,
connection_id: ConnectionId,
+ project_id: u64,
worktree_id: u64,
removed_entries: &[u64],
updated_entries: &[proto::Entry],
- ) -> tide::Result<Vec<ConnectionId>> {
- let worktree = self.write_worktree(worktree_id, connection_id)?;
- let share = worktree.share_mut()?;
+ ) -> Option<Vec<ConnectionId>> {
+ let project = self.write_project(project_id, connection_id)?;
+ let share = project.worktrees.get_mut(&worktree_id)?.share.as_mut()?;
for entry_id in removed_entries {
share.entries.remove(&entry_id);
}
for entry in updated_entries {
share.entries.insert(entry.id, entry.clone());
}
- Ok(worktree.connection_ids())
+ Some(project.connection_ids())
}
- pub fn worktree_host_connection_id(
+ pub fn project_connection_ids(
&self,
- connection_id: ConnectionId,
- worktree_id: u64,
- ) -> tide::Result<ConnectionId> {
- Ok(self
- .read_worktree(worktree_id, connection_id)?
- .host_connection_id)
- }
-
- pub fn worktree_guest_connection_ids(
- &self,
- connection_id: ConnectionId,
- worktree_id: u64,
- ) -> tide::Result<Vec<ConnectionId>> {
- Ok(self
- .read_worktree(worktree_id, connection_id)?
- .share()?
- .guests
- .keys()
- .copied()
- .collect())
- }
-
- pub fn worktree_connection_ids(
- &self,
- connection_id: ConnectionId,
- worktree_id: u64,
- ) -> tide::Result<Vec<ConnectionId>> {
- Ok(self
- .read_worktree(worktree_id, connection_id)?
- .connection_ids())
+ project_id: u64,
+ acting_connection_id: ConnectionId,
+ ) -> Option<Vec<ConnectionId>> {
+ Some(
+ self.read_project(project_id, acting_connection_id)?
+ .connection_ids(),
+ )
}
pub fn channel_connection_ids(&self, channel_id: ChannelId) -> Option<Vec<ConnectionId>> {
Some(self.channels.get(&channel_id)?.connection_ids())
}
- fn read_worktree(
- &self,
- worktree_id: u64,
- connection_id: ConnectionId,
- ) -> tide::Result<&Worktree> {
- let worktree = self
- .worktrees
- .get(&worktree_id)
- .ok_or_else(|| anyhow!("worktree not found"))?;
-
- if worktree.host_connection_id == connection_id
- || worktree.share()?.guests.contains_key(&connection_id)
+ pub fn read_project(&self, project_id: u64, connection_id: ConnectionId) -> Option<&Project> {
+ let project = self.projects.get(&project_id)?;
+ if project.host_connection_id == connection_id
+ || project.share.as_ref()?.guests.contains_key(&connection_id)
{
- Ok(worktree)
+ Some(project)
} else {
- Err(anyhow!(
- "{} is not a member of worktree {}",
- connection_id,
- worktree_id
- ))?
+ None
}
}
- fn write_worktree(
+ fn write_project(
&mut self,
- worktree_id: u64,
+ project_id: u64,
connection_id: ConnectionId,
- ) -> tide::Result<&mut Worktree> {
- let worktree = self
- .worktrees
- .get_mut(&worktree_id)
- .ok_or_else(|| anyhow!("worktree not found"))?;
-
- if worktree.host_connection_id == connection_id
- || worktree
- .share
- .as_ref()
- .map_or(false, |share| share.guests.contains_key(&connection_id))
+ ) -> Option<&mut Project> {
+ let project = self.projects.get_mut(&project_id)?;
+ if project.host_connection_id == connection_id
+ || project.share.as_ref()?.guests.contains_key(&connection_id)
{
- Ok(worktree)
+ Some(project)
} else {
- Err(anyhow!(
- "{} is not a member of worktree {}",
- connection_id,
- worktree_id
- ))?
+ None
}
}
#[cfg(test)]
fn check_invariants(&self) {
for (connection_id, connection) in &self.connections {
- for worktree_id in &connection.worktrees {
- let worktree = &self.worktrees.get(&worktree_id).unwrap();
- if worktree.host_connection_id != *connection_id {
- assert!(worktree.share().unwrap().guests.contains_key(connection_id));
+ for project_id in &connection.projects {
+ let project = &self.projects.get(&project_id).unwrap();
+ if project.host_connection_id != *connection_id {
+ assert!(project
+ .share
+ .as_ref()
+ .unwrap()
+ .guests
+ .contains_key(connection_id));
}
}
for channel_id in &connection.channels {
@@ -585,22 +551,22 @@ impl Store {
}
}
- for (worktree_id, worktree) in &self.worktrees {
- let host_connection = self.connections.get(&worktree.host_connection_id).unwrap();
- assert!(host_connection.worktrees.contains(worktree_id));
+ for (project_id, project) in &self.projects {
+ let host_connection = self.connections.get(&project.host_connection_id).unwrap();
+ assert!(host_connection.projects.contains(project_id));
- for authorized_user_ids in &worktree.authorized_user_ids {
- let visible_worktree_ids = self
- .visible_worktrees_by_user_id
- .get(authorized_user_ids)
+ for authorized_user_ids in project.authorized_user_ids() {
+ let visible_project_ids = self
+ .visible_projects_by_user_id
+ .get(&authorized_user_ids)
.unwrap();
- assert!(visible_worktree_ids.contains(worktree_id));
+ assert!(visible_project_ids.contains(project_id));
}
- if let Some(share) = &worktree.share {
+ if let Some(share) = &project.share {
for guest_connection_id in share.guests.keys() {
let guest_connection = self.connections.get(guest_connection_id).unwrap();
- assert!(guest_connection.worktrees.contains(worktree_id));
+ assert!(guest_connection.projects.contains(project_id));
}
assert_eq!(share.active_replica_ids.len(), share.guests.len(),);
assert_eq!(
@@ -614,10 +580,10 @@ impl Store {
}
}
- for (user_id, visible_worktree_ids) in &self.visible_worktrees_by_user_id {
- for worktree_id in visible_worktree_ids {
- let worktree = self.worktrees.get(worktree_id).unwrap();
- assert!(worktree.authorized_user_ids.contains(user_id));
+ for (user_id, visible_project_ids) in &self.visible_projects_by_user_id {
+ for project_id in visible_project_ids {
+ let project = self.projects.get(project_id).unwrap();
+ assert!(project.authorized_user_ids().contains(user_id));
}
}
@@ -630,7 +596,33 @@ impl Store {
}
}
-impl Worktree {
+impl Project {
+ pub fn has_authorized_user_id(&self, user_id: UserId) -> bool {
+ self.worktrees
+ .values()
+ .any(|worktree| worktree.authorized_user_ids.contains(&user_id))
+ }
+
+ pub fn authorized_user_ids(&self) -> Vec<UserId> {
+ let mut ids = self
+ .worktrees
+ .values()
+ .flat_map(|worktree| worktree.authorized_user_ids.iter())
+ .copied()
+ .collect::<Vec<_>>();
+ ids.sort_unstable();
+ ids.dedup();
+ ids
+ }
+
+ pub fn guest_connection_ids(&self) -> Vec<ConnectionId> {
+ if let Some(share) = &self.share {
+ share.guests.keys().copied().collect()
+ } else {
+ Vec::new()
+ }
+ }
+
pub fn connection_ids(&self) -> Vec<ConnectionId> {
if let Some(share) = &self.share {
share