From 447c1d2f7148c913daf0d2be7b8f85c96cb6cd7a Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Fri, 22 Apr 2022 13:33:19 -0600 Subject: [PATCH 01/11] WIP: Get compiling with Tokio by commenting almost everything Co-Authored-By: Antonio Scandurra --- Cargo.lock | 456 +- crates/collab/Cargo.toml | 10 +- crates/collab/src/api.rs | 350 +- crates/collab/src/auth.rs | 184 +- crates/collab/src/db.rs | 5 +- crates/collab/src/main.rs | 55 +- crates/collab/src/rpc.rs | 12730 +++++++++++++++---------------- crates/collab/src/rpc/store.rs | 42 +- 8 files changed, 6889 insertions(+), 6943 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15b9d1706fdd4722d3ddf40f8b18ad23d3aa8c49..0feeb2fa9848594cf740dc411cf494d2aea7e75a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,21 +97,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alloc-no-stdlib" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192ec435945d87bc2f70992b4d818154b5feede43c09fb7592146374eac90a6" - -[[package]] -name = "alloc-stdlib" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" -dependencies = [ - "alloc-no-stdlib", -] - [[package]] name = "ansi_term" version = "0.11.0" @@ -160,16 +145,6 @@ dependencies = [ "rust-embed", ] -[[package]] -name = "async-attributes" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" -dependencies = [ - "quote", - "syn", -] - [[package]] name = "async-broadcast" version = "0.3.4" @@ -198,7 +173,6 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6" dependencies = [ - "brotli", "flate2", "futures-core", "futures-io", @@ -206,16 +180,6 @@ dependencies = [ "pin-project-lite 0.2.4", ] -[[package]] -name = "async-dup" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7427a12b8dc09291528cfb1da2447059adb4a257388c2acd6497a79d55cf6f7c" -dependencies = [ - "futures-io", - "simple-mutex", -] - [[package]] name = "async-executor" version = "1.4.0" @@ -257,24 +221,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "async-h1" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5142de15b549749cce62923a50714b0d7b77f5090ced141599e78899865451" -dependencies = [ - "async-channel", - "async-dup", - "async-std", - "byte-pool", - "futures-core", - "http-types", - "httparse", - "lazy_static", - "log", - "pin-project", -] - [[package]] name = "async-io" version = "1.3.1" @@ -372,48 +318,12 @@ dependencies = [ "webpki", ] -[[package]] -name = "async-session" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345022a2eed092cd105cc1b26fd61c341e100bd5fcbbd792df4baf31c2cc631f" -dependencies = [ - "anyhow", - "async-std", - "async-trait", - "base64 0.12.3", - "bincode", - "blake3", - "chrono", - "hmac 0.8.1", - "kv-log-macro", - "rand 0.7.3", - "serde", - "serde_json", - "sha2 0.9.5", -] - -[[package]] -name = "async-sse" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53bba003996b8fd22245cd0c59b869ba764188ed435392cf2796d03b805ade10" -dependencies = [ - "async-channel", - "async-std", - "http-types", - "log", - "memchr", - "pin-project-lite 0.1.12", -] - [[package]] name = "async-std" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" dependencies = [ - "async-attributes", "async-channel", "async-global-executor", "async-io", @@ -476,7 +386,7 @@ dependencies = [ "futures-util", "log", "pin-project-lite 0.2.4", - "tungstenite", + "tungstenite 0.16.0", ] [[package]] @@ -634,21 +544,6 @@ dependencies = [ "wyz", ] -[[package]] -name = "blake3" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "cc", - "cfg-if 0.1.10", - "constant_time_eq", - "crypto-mac 0.8.0", - "digest 0.9.0", -] - [[package]] name = "block" version = "0.1.6" @@ -702,27 +597,6 @@ dependencies = [ "workspace", ] -[[package]] -name = "brotli" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f29919120f08613aadcd4383764e00526fc9f18b6c0895814faeed0dd78613e" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1052e1c3b8d4d80eb84a8b94f0a1498797b5fb96314c001156a1c761940ef4ec" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - [[package]] name = "bstr" version = "0.2.15" @@ -744,16 +618,6 @@ version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" -[[package]] -name = "byte-pool" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c7230ddbb427b1094d477d821a99f3f54d36333178eeb806e279bcdcecf0ca" -dependencies = [ - "crossbeam-queue", - "stable_deref_trait", -] - [[package]] name = "bytemuck" version = "1.5.1" @@ -838,7 +702,6 @@ dependencies = [ "libc", "num-integer", "num-traits", - "serde", "time 0.1.44", "winapi 0.3.9", ] @@ -1012,8 +875,6 @@ name = "collab" version = "0.1.0" dependencies = [ "anyhow", - "async-io", - "async-std", "async-trait", "async-tungstenite", "base64 0.13.0", @@ -1025,6 +886,7 @@ dependencies = [ "envy", "futures", "gpui", + "hyper", "json_env_logger", "language", "lazy_static", @@ -1039,13 +901,12 @@ dependencies = [ "serde", "serde_json", "settings", - "sha-1", + "sha-1 0.9.6", "sqlx", - "surf", "theme", - "tide", - "tide-compress", "time 0.2.27", + "tokio", + "tokio-tungstenite", "toml", "util", "workspace", @@ -1096,12 +957,6 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - [[package]] name = "contacts_panel" version = "0.1.0" @@ -1319,16 +1174,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "crypto-mac" version = "0.10.0" @@ -1713,22 +1558,6 @@ dependencies = [ "instant", ] -[[package]] -name = "femme" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af1a24f391a5a94d756db5092c6576aad494b88a71a5a36b20c67b63e0df034" -dependencies = [ - "cfg-if 0.1.10", - "js-sys", - "log", - "serde", - "serde_derive", - "serde_json", - "wasm-bindgen", - "web-sys", -] - [[package]] name = "file_finder" version = "0.1.0" @@ -2215,6 +2044,25 @@ dependencies = [ "syn", ] +[[package]] +name = "h2" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hashbrown" version = "0.9.1" @@ -2279,16 +2127,6 @@ dependencies = [ "hmac 0.10.1", ] -[[package]] -name = "hmac" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.10.1" @@ -2329,6 +2167,17 @@ dependencies = [ "base64 0.12.3", ] +[[package]] +name = "http-body" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +dependencies = [ + "bytes 1.0.1", + "http", + "pin-project-lite 0.2.4", +] + [[package]] name = "http-client" version = "6.4.1" @@ -2372,12 +2221,42 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + [[package]] name = "humantime" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "hyper" +version = "0.14.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa 0.4.7", + "pin-project-lite 0.2.4", + "socket2 0.4.0", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "idna" version = "0.2.3" @@ -2480,7 +2359,7 @@ dependencies = [ "fnv", "lazy_static", "libc", - "mio", + "mio 0.6.23", "rand 0.7.3", "serde", "tempfile", @@ -2955,12 +2834,25 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] +[[package]] +name = "mio" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +dependencies = [ + "libc", + "log", + "miow 0.3.7", + "ntapi", + "winapi 0.3.9", +] + [[package]] name = "miow" version = "0.2.2" @@ -2973,6 +2865,15 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "miow" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "multimap" version = "0.8.3" @@ -3023,6 +2924,15 @@ dependencies = [ "version_check", ] +[[package]] +name = "ntapi" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-bigint" version = "0.4.0" @@ -3882,12 +3792,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "route-recognizer" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56770675ebc04927ded3e60633437841581c285dc6236109ea25fbf3beb7b59e" - [[package]] name = "roxmltree" version = "0.14.1" @@ -4333,6 +4237,17 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha-1" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures 0.2.1", + "digest 0.10.3", +] + [[package]] name = "sha1" version = "0.6.0" @@ -4394,15 +4309,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ad1d488a557b235fc46dae55512ffbfc429d2482b08b4d9435ab07384ca8aec" -[[package]] -name = "simple-mutex" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38aabbeafa6f6dead8cebf246fe9fae1f9215c8d29b3a69f93bd62a9e4a3dcd6" -dependencies = [ - "event-listener", -] - [[package]] name = "simple_asn1" version = "0.5.3" @@ -4595,7 +4501,7 @@ dependencies = [ "rustls", "serde", "serde_json", - "sha-1", + "sha-1 0.9.6", "sha2 0.9.5", "smallvec", "sqlformat", @@ -4640,12 +4546,6 @@ dependencies = [ "async-std", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "standback" version = "0.2.17" @@ -4970,41 +4870,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "tide" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c459573f0dd2cc734b539047f57489ea875af8ee950860ded20cf93a79a1dee0" -dependencies = [ - "async-h1", - "async-session", - "async-sse", - "async-std", - "async-trait", - "femme", - "futures-util", - "http-client", - "http-types", - "kv-log-macro", - "log", - "pin-project-lite 0.2.4", - "route-recognizer", - "serde", - "serde_json", -] - -[[package]] -name = "tide-compress" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d59e3885ecbc547a611d81e501b51bb5f52abd44c3eb3b733ac3c44ff2f2619" -dependencies = [ - "async-compression", - "futures-lite", - "http-types", - "tide", -] - [[package]] name = "tiff" version = "0.6.1" @@ -5118,6 +4983,62 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +[[package]] +name = "tokio" +version = "1.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" +dependencies = [ + "bytes 1.0.1", + "libc", + "memchr", + "mio 0.7.14", + "num_cpus", + "once_cell", + "parking_lot", + "pin-project-lite 0.2.4", + "signal-hook-registry", + "tokio-macros", + "winapi 0.3.9", +] + +[[package]] +name = "tokio-macros" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06cda1232a49558c46f8a504d5b93101d42c0bf7f911f12a105ba48168f821ae" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite 0.17.2", +] + +[[package]] +name = "tokio-util" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +dependencies = [ + "bytes 1.0.1", + "futures-core", + "futures-sink", + "pin-project-lite 0.2.4", + "tokio", + "tracing", +] + [[package]] name = "toml" version = "0.5.8" @@ -5127,6 +5048,12 @@ dependencies = [ "serde", ] +[[package]] +name = "tower-service" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" + [[package]] name = "tracing" version = "0.1.26" @@ -5247,6 +5174,12 @@ dependencies = [ "tree-sitter", ] +[[package]] +name = "try-lock" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" + [[package]] name = "ttf-parser" version = "0.9.0" @@ -5272,7 +5205,26 @@ dependencies = [ "httparse", "log", "rand 0.8.3", - "sha-1", + "sha-1 0.9.6", + "thiserror", + "url", + "utf-8", +] + +[[package]] +name = "tungstenite" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96a2dea40e7570482f28eb57afbe42d97551905da6a9400acc5c328d24004f5" +dependencies = [ + "base64 0.13.0", + "byteorder", + "bytes 1.0.1", + "http", + "httparse", + "log", + "rand 0.8.3", + "sha-1 0.10.0", "thiserror", "url", "utf-8", @@ -5528,6 +5480,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" @@ -5547,8 +5509,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" dependencies = [ "cfg-if 1.0.0", - "serde", - "serde_json", "wasm-bindgen-macro", ] diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 44b551cd05de4249653c603895e3c3b96bcda7ad..e675e07e3f048ba42bc9c35c5f2c11d4af539a30 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -16,14 +16,15 @@ required-features = ["seed-support"] collections = { path = "../collections" } rpc = { path = "../rpc" } util = { path = "../util" } + anyhow = "1.0.40" -async-io = "1.3" -async-std = { version = "1.8.0", features = ["attributes"] } async-trait = "0.1.50" async-tungstenite = "0.16" base64 = "0.13" envy = "0.4.2" +env_logger = "0.8" futures = "0.3" +hyper = { version = "0.14", features = ["full"] } json_env_logger = "0.1" lipsum = { version = "0.8", optional = true } log = { version = "0.4.16", features = ["kv_unstable_serde"] } @@ -33,9 +34,8 @@ scrypt = "0.7" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" sha-1 = "0.9" -surf = "2.2.0" -tide = "0.16.0" -tide-compress = "0.9.0" +tokio = { version = "1", features = ["full"] } +tokio-tungstenite = "0.17" time = "0.2" toml = "0.5.8" diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index c909650f26e7466e09ab845b6174da062747da95..10a2e0a5fa85be6d228a13f5e216d8b77c63f6e8 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -1,179 +1,179 @@ -use crate::{auth, db::UserId, AppState, Request, RequestExt as _}; +// use crate::{auth, db::UserId, AppState, Request, RequestExt as _}; use async_trait::async_trait; use serde::Deserialize; use serde_json::json; use std::sync::Arc; -use surf::StatusCode; - -pub fn add_routes(app: &mut tide::Server>) { - app.at("/users").get(get_users); - app.at("/users").post(create_user); - app.at("/users/:id").put(update_user); - app.at("/users/:id").delete(destroy_user); - app.at("/users/:github_login").get(get_user); - app.at("/users/:github_login/access_tokens") - .post(create_access_token); -} - -async fn get_user(request: Request) -> tide::Result { - request.require_token().await?; - - let user = request - .db() - .get_user_by_github_login(request.param("github_login")?) - .await? - .ok_or_else(|| surf::Error::from_str(404, "user not found"))?; - - Ok(tide::Response::builder(StatusCode::Ok) - .body(tide::Body::from_json(&user)?) - .build()) -} - -async fn get_users(request: Request) -> tide::Result { - request.require_token().await?; - - let users = request.db().get_all_users().await?; - - Ok(tide::Response::builder(StatusCode::Ok) - .body(tide::Body::from_json(&users)?) - .build()) -} - -async fn create_user(mut request: Request) -> tide::Result { - request.require_token().await?; - - #[derive(Deserialize)] - struct Params { - github_login: String, - admin: bool, - } - let params = request.body_json::().await?; - - let user_id = request - .db() - .create_user(¶ms.github_login, params.admin) - .await?; - - let user = request.db().get_user_by_id(user_id).await?.ok_or_else(|| { - surf::Error::from_str( - StatusCode::InternalServerError, - "couldn't find the user we just created", - ) - })?; - - Ok(tide::Response::builder(StatusCode::Ok) - .body(tide::Body::from_json(&user)?) - .build()) -} - -async fn update_user(mut request: Request) -> tide::Result { - request.require_token().await?; - - #[derive(Deserialize)] - struct Params { - admin: bool, - } - let user_id = UserId( - request - .param("id")? - .parse::() - .map_err(|error| surf::Error::from_str(StatusCode::BadRequest, error.to_string()))?, - ); - let params = request.body_json::().await?; - - request - .db() - .set_user_is_admin(user_id, params.admin) - .await?; - - Ok(tide::Response::builder(StatusCode::Ok).build()) -} - -async fn destroy_user(request: Request) -> tide::Result { - request.require_token().await?; - let user_id = UserId( - request - .param("id")? - .parse::() - .map_err(|error| surf::Error::from_str(StatusCode::BadRequest, error.to_string()))?, - ); - - request.db().destroy_user(user_id).await?; - - Ok(tide::Response::builder(StatusCode::Ok).build()) -} - -async fn create_access_token(request: Request) -> tide::Result { - request.require_token().await?; - - let user = request - .db() - .get_user_by_github_login(request.param("github_login")?) - .await? - .ok_or_else(|| surf::Error::from_str(StatusCode::NotFound, "user not found"))?; - - #[derive(Deserialize)] - struct QueryParams { - public_key: String, - impersonate: Option, - } - - let query_params: QueryParams = request.query().map_err(|_| { - surf::Error::from_str(StatusCode::UnprocessableEntity, "invalid query params") - })?; - - let mut user_id = user.id; - if let Some(impersonate) = query_params.impersonate { - if user.admin { - if let Some(impersonated_user) = - request.db().get_user_by_github_login(&impersonate).await? - { - user_id = impersonated_user.id; - } else { - return Ok(tide::Response::builder(StatusCode::UnprocessableEntity) - .body(format!( - "Can't impersonate non-existent user {}", - impersonate - )) - .build()); - } - } else { - return Ok(tide::Response::builder(StatusCode::Unauthorized) - .body(format!( - "Can't impersonate user {} because the real user isn't an admin", - impersonate - )) - .build()); - } - } - - let access_token = auth::create_access_token(request.db().as_ref(), user_id).await?; - let encrypted_access_token = - auth::encrypt_access_token(&access_token, query_params.public_key.clone())?; - - Ok(tide::Response::builder(StatusCode::Ok) - .body(json!({"user_id": user_id, "encrypted_access_token": encrypted_access_token})) - .build()) -} - -#[async_trait] -pub trait RequestExt { - async fn require_token(&self) -> tide::Result<()>; -} - -#[async_trait] -impl RequestExt for Request { - async fn require_token(&self) -> tide::Result<()> { - let token = self - .header("Authorization") - .and_then(|header| header.get(0)) - .and_then(|header| header.as_str().strip_prefix("token ")) - .ok_or_else(|| surf::Error::from_str(403, "invalid authorization header"))?; - - if token == self.state().config.api_token { - Ok(()) - } else { - Err(tide::Error::from_str(403, "invalid authorization token")) - } - } -} +// use surf::StatusCode; + +// pub fn add_routes(app: &mut tide::Server>) { +// app.at("/users").get(get_users); +// app.at("/users").post(create_user); +// app.at("/users/:id").put(update_user); +// app.at("/users/:id").delete(destroy_user); +// app.at("/users/:github_login").get(get_user); +// app.at("/users/:github_login/access_tokens") +// .post(create_access_token); +// } + +// async fn get_user(request: Request) -> tide::Result { +// request.require_token().await?; + +// let user = request +// .db() +// .get_user_by_github_login(request.param("github_login")?) +// .await? +// .ok_or_else(|| surf::Error::from_str(404, "user not found"))?; + +// Ok(tide::Response::builder(StatusCode::Ok) +// .body(tide::Body::from_json(&user)?) +// .build()) +// } + +// async fn get_users(request: Request) -> tide::Result { +// request.require_token().await?; + +// let users = request.db().get_all_users().await?; + +// Ok(tide::Response::builder(StatusCode::Ok) +// .body(tide::Body::from_json(&users)?) +// .build()) +// } + +// async fn create_user(mut request: Request) -> tide::Result { +// request.require_token().await?; + +// #[derive(Deserialize)] +// struct Params { +// github_login: String, +// admin: bool, +// } +// let params = request.body_json::().await?; + +// let user_id = request +// .db() +// .create_user(¶ms.github_login, params.admin) +// .await?; + +// let user = request.db().get_user_by_id(user_id).await?.ok_or_else(|| { +// surf::Error::from_str( +// StatusCode::InternalServerError, +// "couldn't find the user we just created", +// ) +// })?; + +// Ok(tide::Response::builder(StatusCode::Ok) +// .body(tide::Body::from_json(&user)?) +// .build()) +// } + +// async fn update_user(mut request: Request) -> tide::Result { +// request.require_token().await?; + +// #[derive(Deserialize)] +// struct Params { +// admin: bool, +// } +// let user_id = UserId( +// request +// .param("id")? +// .parse::() +// .map_err(|error| surf::Error::from_str(StatusCode::BadRequest, error.to_string()))?, +// ); +// let params = request.body_json::().await?; + +// request +// .db() +// .set_user_is_admin(user_id, params.admin) +// .await?; + +// Ok(tide::Response::builder(StatusCode::Ok).build()) +// } + +// async fn destroy_user(request: Request) -> tide::Result { +// request.require_token().await?; +// let user_id = UserId( +// request +// .param("id")? +// .parse::() +// .map_err(|error| surf::Error::from_str(StatusCode::BadRequest, error.to_string()))?, +// ); + +// request.db().destroy_user(user_id).await?; + +// Ok(tide::Response::builder(StatusCode::Ok).build()) +// } + +// async fn create_access_token(request: Request) -> tide::Result { +// request.require_token().await?; + +// let user = request +// .db() +// .get_user_by_github_login(request.param("github_login")?) +// .await? +// .ok_or_else(|| surf::Error::from_str(StatusCode::NotFound, "user not found"))?; + +// #[derive(Deserialize)] +// struct QueryParams { +// public_key: String, +// impersonate: Option, +// } + +// let query_params: QueryParams = request.query().map_err(|_| { +// surf::Error::from_str(StatusCode::UnprocessableEntity, "invalid query params") +// })?; + +// let mut user_id = user.id; +// if let Some(impersonate) = query_params.impersonate { +// if user.admin { +// if let Some(impersonated_user) = +// request.db().get_user_by_github_login(&impersonate).await? +// { +// user_id = impersonated_user.id; +// } else { +// return Ok(tide::Response::builder(StatusCode::UnprocessableEntity) +// .body(format!( +// "Can't impersonate non-existent user {}", +// impersonate +// )) +// .build()); +// } +// } else { +// return Ok(tide::Response::builder(StatusCode::Unauthorized) +// .body(format!( +// "Can't impersonate user {} because the real user isn't an admin", +// impersonate +// )) +// .build()); +// } +// } + +// let access_token = auth::create_access_token(request.db().as_ref(), user_id).await?; +// let encrypted_access_token = +// auth::encrypt_access_token(&access_token, query_params.public_key.clone())?; + +// Ok(tide::Response::builder(StatusCode::Ok) +// .body(json!({"user_id": user_id, "encrypted_access_token": encrypted_access_token})) +// .build()) +// } + +// #[async_trait] +// pub trait RequestExt { +// async fn require_token(&self) -> tide::Result<()>; +// } + +// #[async_trait] +// impl RequestExt for Request { +// async fn require_token(&self) -> tide::Result<()> { +// let token = self +// .header("Authorization") +// .and_then(|header| header.get(0)) +// .and_then(|header| header.as_str().strip_prefix("token ")) +// .ok_or_else(|| surf::Error::from_str(403, "invalid authorization header"))?; + +// if token == self.state().config.api_token { +// Ok(()) +// } else { +// Err(tide::Error::from_str(403, "invalid authorization token")) +// } +// } +// } diff --git a/crates/collab/src/auth.rs b/crates/collab/src/auth.rs index 0d2bb045d7cb74c6f37adde0808de237fd118829..9bbd9496416c3917ac39488aa42be812a488258a 100644 --- a/crates/collab/src/auth.rs +++ b/crates/collab/src/auth.rs @@ -1,102 +1,102 @@ -use super::{ - db::{self, UserId}, - errors::TideResultExt, -}; -use crate::Request; -use anyhow::{anyhow, Context}; -use rand::thread_rng; -use rpc::auth as zed_auth; -use scrypt::{ - password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, - Scrypt, -}; -use std::convert::TryFrom; -use surf::StatusCode; -use tide::Error; +// use super::{ +// db::{self, UserId}, +// errors::TideResultExt, +// }; +// use crate::Request; +// use anyhow::{anyhow, Context}; +// use rand::thread_rng; +// use rpc::auth as zed_auth; +// use scrypt::{ +// password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, +// Scrypt, +// }; +// use std::convert::TryFrom; +// use surf::StatusCode; +// use tide::Error; -pub async fn process_auth_header(request: &Request) -> tide::Result { - let mut auth_header = request - .header("Authorization") - .ok_or_else(|| { - Error::new( - StatusCode::BadRequest, - anyhow!("missing authorization header"), - ) - })? - .last() - .as_str() - .split_whitespace(); - let user_id = UserId(auth_header.next().unwrap_or("").parse().map_err(|_| { - Error::new( - StatusCode::BadRequest, - anyhow!("missing user id in authorization header"), - ) - })?); - let access_token = auth_header.next().ok_or_else(|| { - Error::new( - StatusCode::BadRequest, - anyhow!("missing access token in authorization header"), - ) - })?; +// pub async fn process_auth_header(request: &Request) -> tide::Result { +// let mut auth_header = request +// .header("Authorization") +// .ok_or_else(|| { +// Error::new( +// StatusCode::BadRequest, +// anyhow!("missing authorization header"), +// ) +// })? +// .last() +// .as_str() +// .split_whitespace(); +// let user_id = UserId(auth_header.next().unwrap_or("").parse().map_err(|_| { +// Error::new( +// StatusCode::BadRequest, +// anyhow!("missing user id in authorization header"), +// ) +// })?); +// let access_token = auth_header.next().ok_or_else(|| { +// Error::new( +// StatusCode::BadRequest, +// anyhow!("missing access token in authorization header"), +// ) +// })?; - let state = request.state().clone(); - let mut credentials_valid = false; - for password_hash in state.db.get_access_token_hashes(user_id).await? { - if verify_access_token(&access_token, &password_hash)? { - credentials_valid = true; - break; - } - } +// let state = request.state().clone(); +// let mut credentials_valid = false; +// for password_hash in state.db.get_access_token_hashes(user_id).await? { +// if verify_access_token(&access_token, &password_hash)? { +// credentials_valid = true; +// break; +// } +// } - if !credentials_valid { - Err(Error::new( - StatusCode::Unauthorized, - anyhow!("invalid credentials"), - ))?; - } +// if !credentials_valid { +// Err(Error::new( +// StatusCode::Unauthorized, +// anyhow!("invalid credentials"), +// ))?; +// } - Ok(user_id) -} +// Ok(user_id) +// } -const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; +// const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; -pub async fn create_access_token(db: &dyn db::Db, user_id: UserId) -> tide::Result { - let access_token = zed_auth::random_token(); - let access_token_hash = - hash_access_token(&access_token).context("failed to hash access token")?; - db.create_access_token_hash(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE) - .await?; - Ok(access_token) -} +// pub async fn create_access_token(db: &dyn db::Db, user_id: UserId) -> tide::Result { +// let access_token = zed_auth::random_token(); +// let access_token_hash = +// hash_access_token(&access_token).context("failed to hash access token")?; +// db.create_access_token_hash(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE) +// .await?; +// Ok(access_token) +// } -fn hash_access_token(token: &str) -> tide::Result { - // Avoid slow hashing in debug mode. - let params = if cfg!(debug_assertions) { - scrypt::Params::new(1, 1, 1).unwrap() - } else { - scrypt::Params::recommended() - }; +// fn hash_access_token(token: &str) -> tide::Result { +// // Avoid slow hashing in debug mode. +// let params = if cfg!(debug_assertions) { +// scrypt::Params::new(1, 1, 1).unwrap() +// } else { +// scrypt::Params::recommended() +// }; - Ok(Scrypt - .hash_password( - token.as_bytes(), - None, - params, - &SaltString::generate(thread_rng()), - )? - .to_string()) -} +// Ok(Scrypt +// .hash_password( +// token.as_bytes(), +// None, +// params, +// &SaltString::generate(thread_rng()), +// )? +// .to_string()) +// } -pub fn encrypt_access_token(access_token: &str, public_key: String) -> tide::Result { - let native_app_public_key = - zed_auth::PublicKey::try_from(public_key).context("failed to parse app public key")?; - let encrypted_access_token = native_app_public_key - .encrypt_string(&access_token) - .context("failed to encrypt access token with public key")?; - Ok(encrypted_access_token) -} +// pub fn encrypt_access_token(access_token: &str, public_key: String) -> tide::Result { +// let native_app_public_key = +// zed_auth::PublicKey::try_from(public_key).context("failed to parse app public key")?; +// let encrypted_access_token = native_app_public_key +// .encrypt_string(&access_token) +// .context("failed to encrypt access token with public key")?; +// Ok(encrypted_access_token) +// } -pub fn verify_access_token(token: &str, hash: &str) -> tide::Result { - let hash = PasswordHash::new(hash)?; - Ok(Scrypt.verify_password(token.as_bytes(), &hash).is_ok()) -} +// pub fn verify_access_token(token: &str, hash: &str) -> tide::Result { +// let hash = PasswordHash::new(hash)?; +// Ok(Scrypt.verify_password(token.as_bytes(), &hash).is_ok()) +// } diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 1ae85559e0f115649ebe192024c0b3c4170ea657..283e691211bef8779515e8e6454149dc18f3931c 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1,11 +1,12 @@ use anyhow::Context; use anyhow::Result; -use async_std::task::{block_on, yield_now}; use async_trait::async_trait; +use futures::executor::block_on; use serde::Serialize; pub use sqlx::postgres::PgPoolOptions as DbOptions; use sqlx::{types::Uuid, FromRow}; use time::OffsetDateTime; +use tokio::task::yield_now; macro_rules! test_support { ($self:ident, { $($token:tt)* }) => {{ @@ -81,7 +82,7 @@ pub struct PostgresDb { } impl PostgresDb { - pub async fn new(url: &str, max_connections: u32) -> tide::Result { + pub async fn new(url: &str, max_connections: u32) -> Result { let pool = DbOptions::new() .max_connections(max_connections) .connect(url) diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index d5f7a570ae5e8456e424df3c5f31c3e2606701ac..210eeb422044b1c7bdece7eb24affc183772f0ee 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -2,18 +2,17 @@ mod api; mod auth; mod db; mod env; -mod errors; mod rpc; use ::rpc::Peer; -use async_std::net::TcpListener; +use anyhow::Result; use async_trait::async_trait; use db::{Db, PostgresDb}; use serde::Deserialize; use std::sync::Arc; -use tide_compress::CompressMiddleware; +use tokio::net::TcpListener; -type Request = tide::Request>; +// type Request = tide::Request>; #[derive(Default, Deserialize)] pub struct Config { @@ -28,7 +27,7 @@ pub struct AppState { } impl AppState { - async fn new(config: Config) -> tide::Result> { + async fn new(config: Config) -> Result> { let db = PostgresDb::new(&config.database_url, 5).await?; let this = Self { @@ -39,24 +38,24 @@ impl AppState { } } -#[async_trait] -trait RequestExt { - fn db(&self) -> &Arc; -} +// #[async_trait] +// trait RequestExt { +// fn db(&self) -> &Arc; +// } -#[async_trait] -impl RequestExt for Request { - fn db(&self) -> &Arc { - &self.state().db - } -} +// #[async_trait] +// impl RequestExt for Request { +// fn db(&self) -> &Arc { +// &self.state().db +// } +// } -#[async_std::main] -async fn main() -> tide::Result<()> { +#[tokio::main] +async fn main() -> Result<()> { if std::env::var("LOG_JSON").is_ok() { json_env_logger::init(); } else { - tide::log::start(); + env_logger::init(); } if let Err(error) = env::load_dotenv() { @@ -78,21 +77,17 @@ async fn main() -> tide::Result<()> { Ok(()) } -pub async fn run_server( - state: Arc, - rpc: Arc, - listener: TcpListener, -) -> tide::Result<()> { - let mut app = tide::with_state(state.clone()); - rpc::add_routes(&mut app, &rpc); +pub async fn run_server(state: Arc, rpc: Arc, listener: TcpListener) -> Result<()> { + // let mut app = tide::with_state(state.clone()); + // rpc::add_routes(&mut app, &rpc); - let mut web = tide::with_state(state.clone()); - web.with(CompressMiddleware::new()); - api::add_routes(&mut web); + // let mut web = tide::with_state(state.clone()); + // web.with(CompressMiddleware::new()); + // api::add_routes(&mut web); - app.at("/").nest(web); + // app.at("/").nest(web); - app.listen(listener).await?; + // app.listen(listener).await?; Ok(()) } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 47b16e2633d6ac9e262cb0035bb6891edcdae33e..1cc163d633fe690f3fd0b6a6212e019847895b94 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1,6368 +1,6362 @@ -mod store; - -use super::{ - auth::process_auth_header, - db::{ChannelId, MessageId, UserId}, - AppState, -}; -use anyhow::anyhow; -use async_io::Timer; -use async_std::{ - sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, - task, -}; -use async_tungstenite::{tungstenite::protocol::Role, WebSocketStream}; -use collections::{HashMap, HashSet}; -use futures::{channel::mpsc, future::BoxFuture, FutureExt, SinkExt, StreamExt}; -use log::{as_debug, as_display}; -use rpc::{ - proto::{self, AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage}, - Connection, ConnectionId, Peer, TypedEnvelope, -}; -use sha1::{Digest as _, Sha1}; -use std::{ - any::TypeId, - future::Future, - marker::PhantomData, - ops::{Deref, DerefMut}, - rc::Rc, - sync::Arc, - time::{Duration, Instant}, -}; -use store::{Store, Worktree}; -use surf::StatusCode; -use tide::{ - http::headers::{HeaderName, CONNECTION, UPGRADE}, - Request, Response, -}; -use time::OffsetDateTime; -use util::ResultExt; - -type MessageHandler = Box< - dyn Send - + Sync - + Fn(Arc, Box) -> BoxFuture<'static, tide::Result<()>>, ->; - -pub struct Server { - peer: Arc, - store: RwLock, - app_state: Arc, - handlers: HashMap, - notifications: Option>, -} - -pub trait Executor: Send + Clone { - type Timer: Send + Future; - fn spawn_detached>(&self, future: F); - fn timer(&self, duration: Duration) -> Self::Timer; -} - -#[derive(Clone)] -pub struct RealExecutor; - -const MESSAGE_COUNT_PER_PAGE: usize = 100; -const MAX_MESSAGE_LEN: usize = 1024; - -struct StoreReadGuard<'a> { - guard: RwLockReadGuard<'a, Store>, - _not_send: PhantomData>, -} - -struct StoreWriteGuard<'a> { - guard: RwLockWriteGuard<'a, Store>, - _not_send: PhantomData>, -} - -impl Server { - pub fn new( - app_state: Arc, - peer: Arc, - notifications: Option>, - ) -> Arc { - let mut server = Self { - peer, - app_state, - store: Default::default(), - handlers: Default::default(), - notifications, - }; - - server - .add_request_handler(Server::ping) - .add_request_handler(Server::register_project) - .add_message_handler(Server::unregister_project) - .add_request_handler(Server::share_project) - .add_message_handler(Server::unshare_project) - .add_sync_request_handler(Server::join_project) - .add_message_handler(Server::leave_project) - .add_request_handler(Server::register_worktree) - .add_message_handler(Server::unregister_worktree) - .add_request_handler(Server::update_worktree) - .add_message_handler(Server::start_language_server) - .add_message_handler(Server::update_language_server) - .add_message_handler(Server::update_diagnostic_summary) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler( - Server::forward_project_request::, - ) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::forward_project_request::) - .add_request_handler(Server::update_buffer) - .add_message_handler(Server::update_buffer_file) - .add_message_handler(Server::buffer_reloaded) - .add_message_handler(Server::buffer_saved) - .add_request_handler(Server::save_buffer) - .add_request_handler(Server::get_channels) - .add_request_handler(Server::get_users) - .add_request_handler(Server::join_channel) - .add_message_handler(Server::leave_channel) - .add_request_handler(Server::send_channel_message) - .add_request_handler(Server::follow) - .add_message_handler(Server::unfollow) - .add_message_handler(Server::update_followers) - .add_request_handler(Server::get_channel_messages); - - Arc::new(server) - } - - fn add_message_handler(&mut self, handler: F) -> &mut Self - where - F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, - Fut: 'static + Send + Future>, - M: EnvelopedMessage, - { - let prev_handler = self.handlers.insert( - TypeId::of::(), - Box::new(move |server, envelope| { - let envelope = envelope.into_any().downcast::>().unwrap(); - (handler)(server, *envelope).boxed() - }), - ); - if prev_handler.is_some() { - panic!("registered a handler for the same message twice"); - } - self - } - - fn add_request_handler(&mut self, handler: F) -> &mut Self - where - F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, - Fut: 'static + Send + Future>, - M: RequestMessage, - { - self.add_message_handler(move |server, envelope| { - let receipt = envelope.receipt(); - let response = (handler)(server.clone(), envelope); - async move { - match response.await { - Ok(response) => { - server.peer.respond(receipt, response)?; - Ok(()) - } - Err(error) => { - server.peer.respond_with_error( - receipt, - proto::Error { - message: error.to_string(), - }, - )?; - Err(error) - } - } - } - }) - } - - /// Handle a request while holding a lock to the store. This is useful when we're registering - /// a connection but we want to respond on the connection before anybody else can send on it. - fn add_sync_request_handler(&mut self, handler: F) -> &mut Self - where - F: 'static - + Send - + Sync - + Fn(Arc, &mut Store, TypedEnvelope) -> tide::Result, - M: RequestMessage, - { - let handler = Arc::new(handler); - self.add_message_handler(move |server, envelope| { - let receipt = envelope.receipt(); - let handler = handler.clone(); - async move { - let mut store = server.store.write().await; - let response = (handler)(server.clone(), &mut *store, envelope); - match response { - Ok(response) => { - server.peer.respond(receipt, response)?; - Ok(()) - } - Err(error) => { - server.peer.respond_with_error( - receipt, - proto::Error { - message: error.to_string(), - }, - )?; - Err(error) - } - } - } - }) - } - - pub fn handle_connection( - self: &Arc, - connection: Connection, - addr: String, - user_id: UserId, - mut send_connection_id: Option>, - executor: E, - ) -> impl Future { - let mut this = self.clone(); - async move { - let (connection_id, handle_io, mut incoming_rx) = this - .peer - .add_connection(connection, { - let executor = executor.clone(); - move |duration| { - let timer = executor.timer(duration); - async move { - timer.await; - } - } - }) - .await; - - if let Some(send_connection_id) = send_connection_id.as_mut() { - let _ = send_connection_id.send(connection_id).await; - } - - { - let mut state = this.state_mut().await; - state.add_connection(connection_id, user_id); - this.update_contacts_for_users(&*state, &[user_id]); - } - - let handle_io = handle_io.fuse(); - futures::pin_mut!(handle_io); - loop { - let next_message = incoming_rx.next().fuse(); - futures::pin_mut!(next_message); - futures::select_biased! { - result = handle_io => { - if let Err(err) = result { - log::error!("error handling rpc connection {:?} - {:?}", addr, err); - } - break; - } - message = next_message => { - if let Some(message) = message { - let start_time = Instant::now(); - let type_name = message.payload_type_name(); - log::info!(connection_id = connection_id.0, type_name = type_name; "rpc message received"); - if let Some(handler) = this.handlers.get(&message.payload_type_id()) { - let notifications = this.notifications.clone(); - let is_background = message.is_background(); - let handle_message = (handler)(this.clone(), message); - let handle_message = async move { - if let Err(err) = handle_message.await { - log::error!(connection_id = connection_id.0, type = type_name, error = as_display!(err); "rpc message error"); - } else { - log::info!(connection_id = connection_id.0, type = type_name, duration = as_debug!(start_time.elapsed()); "rpc message handled"); - } - if let Some(mut notifications) = notifications { - let _ = notifications.send(()).await; - } - }; - if is_background { - executor.spawn_detached(handle_message); - } else { - handle_message.await; - } - } else { - log::warn!("unhandled message: {}", type_name); - } - } else { - log::info!(address = as_debug!(addr); "rpc connection closed"); - break; - } - } - } - } - - if let Err(err) = this.sign_out(connection_id).await { - log::error!("error signing out connection {:?} - {:?}", addr, err); - } - } - } - - async fn sign_out(self: &mut Arc, connection_id: ConnectionId) -> tide::Result<()> { - self.peer.disconnect(connection_id); - let mut state = self.state_mut().await; - let removed_connection = state.remove_connection(connection_id)?; - - for (project_id, project) in removed_connection.hosted_projects { - if let Some(share) = project.share { - broadcast( - connection_id, - share.guests.keys().copied().collect(), - |conn_id| { - self.peer - .send(conn_id, proto::UnshareProject { project_id }) - }, - ); - } - } - - for (project_id, peer_ids) in removed_connection.guest_project_ids { - broadcast(connection_id, peer_ids, |conn_id| { - self.peer.send( - conn_id, - proto::RemoveProjectCollaborator { - project_id, - peer_id: connection_id.0, - }, - ) - }); - } - - self.update_contacts_for_users(&*state, removed_connection.contact_ids.iter()); - Ok(()) - } - - async fn ping(self: Arc, _: TypedEnvelope) -> tide::Result { - Ok(proto::Ack {}) - } - - async fn register_project( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let project_id = { - let mut state = self.state_mut().await; - let user_id = state.user_id_for_connection(request.sender_id)?; - state.register_project(request.sender_id, user_id) - }; - Ok(proto::RegisterProjectResponse { project_id }) - } - - async fn unregister_project( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let mut state = self.state_mut().await; - let project = state.unregister_project(request.payload.project_id, request.sender_id)?; - self.update_contacts_for_users(&*state, &project.authorized_user_ids()); - Ok(()) - } - - async fn share_project( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let mut state = self.state_mut().await; - let project = state.share_project(request.payload.project_id, request.sender_id)?; - self.update_contacts_for_users(&mut *state, &project.authorized_user_ids); - Ok(proto::Ack {}) - } - - async fn unshare_project( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let project_id = request.payload.project_id; - let mut state = self.state_mut().await; - let project = state.unshare_project(project_id, request.sender_id)?; - broadcast(request.sender_id, project.connection_ids, |conn_id| { - self.peer - .send(conn_id, proto::UnshareProject { project_id }) - }); - self.update_contacts_for_users(&mut *state, &project.authorized_user_ids); - Ok(()) - } - - fn join_project( - self: Arc, - state: &mut Store, - request: TypedEnvelope, - ) -> tide::Result { - let project_id = request.payload.project_id; - - let user_id = state.user_id_for_connection(request.sender_id)?; - let (response, connection_ids, contact_user_ids) = state - .join_project(request.sender_id, user_id, project_id) - .and_then(|joined| { - let share = joined.project.share()?; - let peer_count = share.guests.len(); - let mut collaborators = Vec::with_capacity(peer_count); - collaborators.push(proto::Collaborator { - peer_id: joined.project.host_connection_id.0, - replica_id: 0, - user_id: joined.project.host_user_id.to_proto(), - }); - let worktrees = share - .worktrees - .iter() - .filter_map(|(id, shared_worktree)| { - let worktree = joined.project.worktrees.get(&id)?; - Some(proto::Worktree { - id: *id, - root_name: worktree.root_name.clone(), - entries: shared_worktree.entries.values().cloned().collect(), - diagnostic_summaries: shared_worktree - .diagnostic_summaries - .values() - .cloned() - .collect(), - visible: worktree.visible, - }) - }) - .collect(); - for (peer_conn_id, (peer_replica_id, peer_user_id)) in &share.guests { - if *peer_conn_id != request.sender_id { - collaborators.push(proto::Collaborator { - peer_id: peer_conn_id.0, - replica_id: *peer_replica_id as u32, - user_id: peer_user_id.to_proto(), - }); - } - } - let response = proto::JoinProjectResponse { - worktrees, - replica_id: joined.replica_id as u32, - collaborators, - language_servers: joined.project.language_servers.clone(), - }; - let connection_ids = joined.project.connection_ids(); - let contact_user_ids = joined.project.authorized_user_ids(); - Ok((response, connection_ids, contact_user_ids)) - })?; - - broadcast(request.sender_id, connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::AddProjectCollaborator { - project_id, - collaborator: Some(proto::Collaborator { - peer_id: request.sender_id.0, - replica_id: response.replica_id, - user_id: user_id.to_proto(), - }), - }, - ) - }); - self.update_contacts_for_users(state, &contact_user_ids); - Ok(response) - } - - async fn leave_project( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let sender_id = request.sender_id; - let project_id = request.payload.project_id; - let mut state = self.state_mut().await; - let worktree = state.leave_project(sender_id, project_id)?; - broadcast(sender_id, worktree.connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::RemoveProjectCollaborator { - project_id, - peer_id: sender_id.0, - }, - ) - }); - self.update_contacts_for_users(&*state, &worktree.authorized_user_ids); - Ok(()) - } - - async fn register_worktree( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let mut contact_user_ids = HashSet::default(); - for github_login in &request.payload.authorized_logins { - let contact_user_id = self.app_state.db.create_user(github_login, false).await?; - contact_user_ids.insert(contact_user_id); - } - - let mut state = self.state_mut().await; - let host_user_id = state.user_id_for_connection(request.sender_id)?; - contact_user_ids.insert(host_user_id); - - let contact_user_ids = contact_user_ids.into_iter().collect::>(); - let guest_connection_ids = state - .read_project(request.payload.project_id, request.sender_id)? - .guest_connection_ids(); - state.register_worktree( - request.payload.project_id, - request.payload.worktree_id, - request.sender_id, - Worktree { - authorized_user_ids: contact_user_ids.clone(), - root_name: request.payload.root_name.clone(), - visible: request.payload.visible, - }, - )?; - - broadcast(request.sender_id, guest_connection_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - self.update_contacts_for_users(&*state, &contact_user_ids); - Ok(proto::Ack {}) - } - - async fn unregister_worktree( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let project_id = request.payload.project_id; - let worktree_id = request.payload.worktree_id; - let mut state = self.state_mut().await; - let (worktree, guest_connection_ids) = - state.unregister_worktree(project_id, worktree_id, request.sender_id)?; - broadcast(request.sender_id, guest_connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::UnregisterWorktree { - project_id, - worktree_id, - }, - ) - }); - self.update_contacts_for_users(&*state, &worktree.authorized_user_ids); - Ok(()) - } - - async fn update_worktree( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let connection_ids = self.state_mut().await.update_worktree( - request.sender_id, - request.payload.project_id, - request.payload.worktree_id, - &request.payload.removed_entries, - &request.payload.updated_entries, - )?; - - broadcast(request.sender_id, connection_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - - Ok(proto::Ack {}) - } - - async fn update_diagnostic_summary( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let summary = request - .payload - .summary - .clone() - .ok_or_else(|| anyhow!("invalid summary"))?; - let receiver_ids = self.state_mut().await.update_diagnostic_summary( - request.payload.project_id, - request.payload.worktree_id, - request.sender_id, - summary, - )?; - - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - Ok(()) - } - - async fn start_language_server( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let receiver_ids = self.state_mut().await.start_language_server( - request.payload.project_id, - request.sender_id, - request - .payload - .server - .clone() - .ok_or_else(|| anyhow!("invalid language server"))?, - )?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - Ok(()) - } - - async fn update_language_server( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let receiver_ids = self - .state() - .await - .project_connection_ids(request.payload.project_id, request.sender_id)?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - Ok(()) - } - - async fn forward_project_request( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result - where - T: EntityMessage + RequestMessage, - { - let host_connection_id = self - .state() - .await - .read_project(request.payload.remote_entity_id(), request.sender_id)? - .host_connection_id; - Ok(self - .peer - .forward_request(request.sender_id, host_connection_id, request.payload) - .await?) - } - - async fn save_buffer( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let host = self - .state() - .await - .read_project(request.payload.project_id, request.sender_id)? - .host_connection_id; - let response = self - .peer - .forward_request(request.sender_id, host, request.payload.clone()) - .await?; - - let mut guests = self - .state() - .await - .read_project(request.payload.project_id, request.sender_id)? - .connection_ids(); - guests.retain(|guest_connection_id| *guest_connection_id != request.sender_id); - broadcast(host, guests, |conn_id| { - self.peer.forward_send(host, conn_id, response.clone()) - }); - - Ok(response) - } - - async fn update_buffer( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let receiver_ids = self - .state() - .await - .project_connection_ids(request.payload.project_id, request.sender_id)?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - Ok(proto::Ack {}) - } - - async fn update_buffer_file( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let receiver_ids = self - .state() - .await - .project_connection_ids(request.payload.project_id, request.sender_id)?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - Ok(()) - } - - async fn buffer_reloaded( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let receiver_ids = self - .state() - .await - .project_connection_ids(request.payload.project_id, request.sender_id)?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - Ok(()) - } - - async fn buffer_saved( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let receiver_ids = self - .state() - .await - .project_connection_ids(request.payload.project_id, request.sender_id)?; - broadcast(request.sender_id, receiver_ids, |connection_id| { - self.peer - .forward_send(request.sender_id, connection_id, request.payload.clone()) - }); - Ok(()) - } - - async fn follow( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let leader_id = ConnectionId(request.payload.leader_id); - let follower_id = request.sender_id; - if !self - .state() - .await - .project_connection_ids(request.payload.project_id, follower_id)? - .contains(&leader_id) - { - Err(anyhow!("no such peer"))?; - } - let mut response = self - .peer - .forward_request(request.sender_id, leader_id, request.payload) - .await?; - response - .views - .retain(|view| view.leader_id != Some(follower_id.0)); - Ok(response) - } - - async fn unfollow( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let leader_id = ConnectionId(request.payload.leader_id); - if !self - .state() - .await - .project_connection_ids(request.payload.project_id, request.sender_id)? - .contains(&leader_id) - { - Err(anyhow!("no such peer"))?; - } - self.peer - .forward_send(request.sender_id, leader_id, request.payload)?; - Ok(()) - } - - async fn update_followers( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let connection_ids = self - .state() - .await - .project_connection_ids(request.payload.project_id, request.sender_id)?; - let leader_id = request - .payload - .variant - .as_ref() - .and_then(|variant| match variant { - proto::update_followers::Variant::CreateView(payload) => payload.leader_id, - proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, - proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, - }); - for follower_id in &request.payload.follower_ids { - let follower_id = ConnectionId(*follower_id); - if connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { - self.peer - .forward_send(request.sender_id, follower_id, request.payload.clone())?; - } - } - Ok(()) - } - - async fn get_channels( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let user_id = self - .state() - .await - .user_id_for_connection(request.sender_id)?; - let channels = self.app_state.db.get_accessible_channels(user_id).await?; - Ok(proto::GetChannelsResponse { - channels: channels - .into_iter() - .map(|chan| proto::Channel { - id: chan.id.to_proto(), - name: chan.name, - }) - .collect(), - }) - } - - async fn get_users( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let user_ids = request - .payload - .user_ids - .into_iter() - .map(UserId::from_proto) - .collect(); - let users = self - .app_state - .db - .get_users_by_ids(user_ids) - .await? - .into_iter() - .map(|user| proto::User { - id: user.id.to_proto(), - avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), - github_login: user.github_login, - }) - .collect(); - Ok(proto::GetUsersResponse { users }) - } - - fn update_contacts_for_users<'a>( - self: &Arc, - state: &Store, - user_ids: impl IntoIterator, - ) { - for user_id in user_ids { - let contacts = state.contacts_for_user(*user_id); - for connection_id in state.connection_ids_for_user(*user_id) { - self.peer - .send( - connection_id, - proto::UpdateContacts { - contacts: contacts.clone(), - }, - ) - .log_err(); - } - } - } - - async fn join_channel( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let user_id = self - .state() - .await - .user_id_for_connection(request.sender_id)?; - let channel_id = ChannelId::from_proto(request.payload.channel_id); - if !self - .app_state - .db - .can_user_access_channel(user_id, channel_id) - .await? - { - Err(anyhow!("access denied"))?; - } - - self.state_mut() - .await - .join_channel(request.sender_id, channel_id); - let messages = self - .app_state - .db - .get_channel_messages(channel_id, MESSAGE_COUNT_PER_PAGE, None) - .await? - .into_iter() - .map(|msg| proto::ChannelMessage { - id: msg.id.to_proto(), - body: msg.body, - timestamp: msg.sent_at.unix_timestamp() as u64, - sender_id: msg.sender_id.to_proto(), - nonce: Some(msg.nonce.as_u128().into()), - }) - .collect::>(); - Ok(proto::JoinChannelResponse { - done: messages.len() < MESSAGE_COUNT_PER_PAGE, - messages, - }) - } - - async fn leave_channel( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result<()> { - let user_id = self - .state() - .await - .user_id_for_connection(request.sender_id)?; - let channel_id = ChannelId::from_proto(request.payload.channel_id); - if !self - .app_state - .db - .can_user_access_channel(user_id, channel_id) - .await? - { - Err(anyhow!("access denied"))?; - } - - self.state_mut() - .await - .leave_channel(request.sender_id, channel_id); - - Ok(()) - } - - async fn send_channel_message( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let channel_id = ChannelId::from_proto(request.payload.channel_id); - let user_id; - let connection_ids; - { - let state = self.state().await; - user_id = state.user_id_for_connection(request.sender_id)?; - connection_ids = state.channel_connection_ids(channel_id)?; - } - - // Validate the message body. - let body = request.payload.body.trim().to_string(); - if body.len() > MAX_MESSAGE_LEN { - return Err(anyhow!("message is too long"))?; - } - if body.is_empty() { - return Err(anyhow!("message can't be blank"))?; - } - - let timestamp = OffsetDateTime::now_utc(); - let nonce = request - .payload - .nonce - .ok_or_else(|| anyhow!("nonce can't be blank"))?; - - let message_id = self - .app_state - .db - .create_channel_message(channel_id, user_id, &body, timestamp, nonce.clone().into()) - .await? - .to_proto(); - let message = proto::ChannelMessage { - sender_id: user_id.to_proto(), - id: message_id, - body, - timestamp: timestamp.unix_timestamp() as u64, - nonce: Some(nonce), - }; - broadcast(request.sender_id, connection_ids, |conn_id| { - self.peer.send( - conn_id, - proto::ChannelMessageSent { - channel_id: channel_id.to_proto(), - message: Some(message.clone()), - }, - ) - }); - Ok(proto::SendChannelMessageResponse { - message: Some(message), - }) - } - - async fn get_channel_messages( - self: Arc, - request: TypedEnvelope, - ) -> tide::Result { - let user_id = self - .state() - .await - .user_id_for_connection(request.sender_id)?; - let channel_id = ChannelId::from_proto(request.payload.channel_id); - if !self - .app_state - .db - .can_user_access_channel(user_id, channel_id) - .await? - { - Err(anyhow!("access denied"))?; - } - - let messages = self - .app_state - .db - .get_channel_messages( - channel_id, - MESSAGE_COUNT_PER_PAGE, - Some(MessageId::from_proto(request.payload.before_message_id)), - ) - .await? - .into_iter() - .map(|msg| proto::ChannelMessage { - id: msg.id.to_proto(), - body: msg.body, - timestamp: msg.sent_at.unix_timestamp() as u64, - sender_id: msg.sender_id.to_proto(), - nonce: Some(msg.nonce.as_u128().into()), - }) - .collect::>(); - - Ok(proto::GetChannelMessagesResponse { - done: messages.len() < MESSAGE_COUNT_PER_PAGE, - messages, - }) - } - - async fn state<'a>(self: &'a Arc) -> StoreReadGuard<'a> { - #[cfg(test)] - async_std::task::yield_now().await; - let guard = self.store.read().await; - #[cfg(test)] - async_std::task::yield_now().await; - StoreReadGuard { - guard, - _not_send: PhantomData, - } - } - - async fn state_mut<'a>(self: &'a Arc) -> StoreWriteGuard<'a> { - #[cfg(test)] - async_std::task::yield_now().await; - let guard = self.store.write().await; - #[cfg(test)] - async_std::task::yield_now().await; - StoreWriteGuard { - guard, - _not_send: PhantomData, - } - } -} - -impl<'a> Deref for StoreReadGuard<'a> { - type Target = Store; - - fn deref(&self) -> &Self::Target { - &*self.guard - } -} - -impl<'a> Deref for StoreWriteGuard<'a> { - type Target = Store; - - fn deref(&self) -> &Self::Target { - &*self.guard - } -} - -impl<'a> DerefMut for StoreWriteGuard<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut *self.guard - } -} - -impl<'a> Drop for StoreWriteGuard<'a> { - fn drop(&mut self) { - #[cfg(test)] - self.check_invariants(); - } -} - -impl Executor for RealExecutor { - type Timer = Timer; - - fn spawn_detached>(&self, future: F) { - task::spawn(future); - } - - fn timer(&self, duration: Duration) -> Self::Timer { - Timer::after(duration) - } -} - -fn broadcast(sender_id: ConnectionId, receiver_ids: Vec, mut f: F) -where - F: FnMut(ConnectionId) -> anyhow::Result<()>, -{ - for receiver_id in receiver_ids { - if receiver_id != sender_id { - f(receiver_id).log_err(); - } - } -} - -pub fn add_routes(app: &mut tide::Server>, rpc: &Arc) { - let server = Server::new(app.state().clone(), rpc.clone(), None); - app.at("/rpc").get(move |request: Request>| { - let server = server.clone(); - async move { - const WEBSOCKET_GUID: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; - - let connection_upgrade = header_contains_ignore_case(&request, CONNECTION, "upgrade"); - let upgrade_to_websocket = header_contains_ignore_case(&request, UPGRADE, "websocket"); - let upgrade_requested = connection_upgrade && upgrade_to_websocket; - let client_protocol_version: Option = request - .header("X-Zed-Protocol-Version") - .and_then(|v| v.as_str().parse().ok()); - - if !upgrade_requested || client_protocol_version != Some(rpc::PROTOCOL_VERSION) { - return Ok(Response::new(StatusCode::UpgradeRequired)); - } - - let header = match request.header("Sec-Websocket-Key") { - Some(h) => h.as_str(), - None => return Err(anyhow!("expected sec-websocket-key"))?, - }; - - let user_id = process_auth_header(&request).await?; - - let mut response = Response::new(StatusCode::SwitchingProtocols); - response.insert_header(UPGRADE, "websocket"); - response.insert_header(CONNECTION, "Upgrade"); - let hash = Sha1::new().chain(header).chain(WEBSOCKET_GUID).finalize(); - response.insert_header("Sec-Websocket-Accept", base64::encode(&hash[..])); - response.insert_header("Sec-Websocket-Version", "13"); - - let http_res: &mut tide::http::Response = response.as_mut(); - let upgrade_receiver = http_res.recv_upgrade().await; - let addr = request.remote().unwrap_or("unknown").to_string(); - task::spawn(async move { - if let Some(stream) = upgrade_receiver.await { - server - .handle_connection( - Connection::new( - WebSocketStream::from_raw_socket(stream, Role::Server, None).await, - ), - addr, - user_id, - None, - RealExecutor, - ) - .await; - } - }); - - Ok(response) - } - }); -} - -fn header_contains_ignore_case( - request: &tide::Request, - header_name: HeaderName, - value: &str, -) -> bool { - request - .header(header_name) - .map(|h| { - h.as_str() - .split(',') - .any(|s| s.trim().eq_ignore_ascii_case(value.trim())) - }) - .unwrap_or(false) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - db::{tests::TestDb, UserId}, - AppState, Config, - }; - use ::rpc::Peer; - use client::{ - self, test::FakeHttpClient, Channel, ChannelDetails, ChannelList, Client, Credentials, - EstablishConnectionError, UserStore, RECEIVE_TIMEOUT, - }; - use collections::BTreeMap; - use editor::{ - self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Input, Redo, Rename, - ToOffset, ToggleCodeActions, Undo, - }; - use gpui::{ - executor::{self, Deterministic}, - geometry::vector::vec2f, - ModelHandle, TestAppContext, ViewHandle, - }; - use language::{ - range_to_lsp, tree_sitter_rust, Diagnostic, DiagnosticEntry, FakeLspAdapter, Language, - LanguageConfig, LanguageRegistry, OffsetRangeExt, Point, Rope, - }; - use lsp::{self, FakeLanguageServer}; - use parking_lot::Mutex; - use project::{ - fs::{FakeFs, Fs as _}, - search::SearchQuery, - worktree::WorktreeHandle, - DiagnosticSummary, Project, ProjectPath, WorktreeId, - }; - use rand::prelude::*; - use rpc::PeerId; - use serde_json::json; - use settings::Settings; - use sqlx::types::time::OffsetDateTime; - use std::{ - env, - ops::Deref, - path::{Path, PathBuf}, - rc::Rc, - sync::{ - atomic::{AtomicBool, Ordering::SeqCst}, - Arc, - }, - time::Duration, - }; - use theme::ThemeRegistry; - use workspace::{Item, SplitDirection, ToggleFollow, Workspace, WorkspaceParams}; - - #[cfg(test)] - #[ctor::ctor] - fn init_logger() { - if std::env::var("RUST_LOG").is_ok() { - env_logger::init(); - } - } - - #[gpui::test(iterations = 10)] - async fn test_share_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - let (window_b, _) = cx_b.add_window(|_| EmptyView); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - cx_a.foreground().forbid_parking(); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.txt": "a-contents", - "b.txt": "b-contents", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join that project as client B - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - let replica_id_b = project_b.read_with(cx_b, |project, _| { - assert_eq!( - project - .collaborators() - .get(&client_a.peer_id) - .unwrap() - .user - .github_login, - "user_a" - ); - project.replica_id() - }); - project_a - .condition(&cx_a, |tree, _| { - tree.collaborators() - .get(&client_b.peer_id) - .map_or(false, |collaborator| { - collaborator.replica_id == replica_id_b - && collaborator.user.github_login == "user_b" - }) - }) - .await; - - // Open the same file as client B and client A. - let buffer_b = project_b - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.txt"), cx)) - .await - .unwrap(); - buffer_b.read_with(cx_b, |buf, _| assert_eq!(buf.text(), "b-contents")); - project_a.read_with(cx_a, |project, cx| { - assert!(project.has_open_buffer((worktree_id, "b.txt"), cx)) - }); - let buffer_a = project_a - .update(cx_a, |p, cx| p.open_buffer((worktree_id, "b.txt"), cx)) - .await - .unwrap(); - - let editor_b = cx_b.add_view(window_b, |cx| Editor::for_buffer(buffer_b, None, cx)); - - // TODO - // // Create a selection set as client B and see that selection set as client A. - // buffer_a - // .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 1) - // .await; - - // Edit the buffer as client B and see that edit as client A. - editor_b.update(cx_b, |editor, cx| { - editor.handle_input(&Input("ok, ".into()), cx) - }); - buffer_a - .condition(&cx_a, |buffer, _| buffer.text() == "ok, b-contents") - .await; - - // TODO - // // Remove the selection set as client B, see those selections disappear as client A. - cx_b.update(move |_| drop(editor_b)); - // buffer_a - // .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 0) - // .await; - - // Dropping the client B's project removes client B from client A's collaborators. - cx_b.update(move |_| drop(project_b)); - project_a - .condition(&cx_a, |project, _| project.collaborators().is_empty()) - .await; - } - - #[gpui::test(iterations = 10)] - async fn test_unshare_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - cx_a.foreground().forbid_parking(); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.txt": "a-contents", - "b.txt": "b-contents", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - - // Join that project as client B - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - project_b - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) - .await - .unwrap(); - - // Unshare the project as client A - project_a.update(cx_a, |project, cx| project.unshare(cx)); - project_b - .condition(cx_b, |project, _| project.is_read_only()) - .await; - assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared())); - cx_b.update(|_| { - drop(project_b); - }); - - // Share the project again and ensure guests can still join. - project_a - .update(cx_a, |project, cx| project.share(cx)) - .await - .unwrap(); - assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - - let project_b2 = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - project_b2 - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) - .await - .unwrap(); - } - - #[gpui::test(iterations = 10)] - async fn test_host_disconnect(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - cx_a.foreground().forbid_parking(); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.txt": "a-contents", - "b.txt": "b-contents", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - - // Join that project as client B - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - project_b - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) - .await - .unwrap(); - - // Drop client A's connection. Collaborators should disappear and the project should not be shown as shared. - server.disconnect_client(client_a.current_user_id(cx_a)); - cx_a.foreground().advance_clock(rpc::RECEIVE_TIMEOUT); - project_a - .condition(cx_a, |project, _| project.collaborators().is_empty()) - .await; - project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); - project_b - .condition(cx_b, |project, _| project.is_read_only()) - .await; - assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared())); - cx_b.update(|_| { - drop(project_b); - }); - - // Await reconnection - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - - // Share the project again and ensure guests can still join. - project_a - .update(cx_a, |project, cx| project.share(cx)) - .await - .unwrap(); - assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - - let project_b2 = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - project_b2 - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) - .await - .unwrap(); - } - - #[gpui::test(iterations = 10)] - async fn test_propagate_saves_and_fs_changes( - cx_a: &mut TestAppContext, - cx_b: &mut TestAppContext, - cx_c: &mut TestAppContext, - ) { - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - cx_a.foreground().forbid_parking(); - - // Connect to a server as 3 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - let client_c = server.create_client(cx_c, "user_c").await; - - // Share a worktree as client A. - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, - "file1": "", - "file2": "" - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join that worktree as clients B and C. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - let project_c = Project::remote( - project_id, - client_c.clone(), - client_c.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_c.to_async(), - ) - .await - .unwrap(); - let worktree_b = project_b.read_with(cx_b, |p, cx| p.worktrees(cx).next().unwrap()); - let worktree_c = project_c.read_with(cx_c, |p, cx| p.worktrees(cx).next().unwrap()); - - // Open and edit a buffer as both guests B and C. - let buffer_b = project_b - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) - .await - .unwrap(); - let buffer_c = project_c - .update(cx_c, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) - .await - .unwrap(); - buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "i-am-b, ", cx)); - buffer_c.update(cx_c, |buf, cx| buf.edit([0..0], "i-am-c, ", cx)); - - // Open and edit that buffer as the host. - let buffer_a = project_a - .update(cx_a, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) - .await - .unwrap(); - - buffer_a - .condition(cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, ") - .await; - buffer_a.update(cx_a, |buf, cx| { - buf.edit([buf.len()..buf.len()], "i-am-a", cx) - }); - - // Wait for edits to propagate - buffer_a - .condition(cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") - .await; - buffer_b - .condition(cx_b, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") - .await; - buffer_c - .condition(cx_c, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") - .await; - - // Edit the buffer as the host and concurrently save as guest B. - let save_b = buffer_b.update(cx_b, |buf, cx| buf.save(cx)); - buffer_a.update(cx_a, |buf, cx| buf.edit([0..0], "hi-a, ", cx)); - save_b.await.unwrap(); - assert_eq!( - fs.load("/a/file1".as_ref()).await.unwrap(), - "hi-a, i-am-c, i-am-b, i-am-a" - ); - buffer_a.read_with(cx_a, |buf, _| assert!(!buf.is_dirty())); - buffer_b.read_with(cx_b, |buf, _| assert!(!buf.is_dirty())); - buffer_c.condition(cx_c, |buf, _| !buf.is_dirty()).await; - - worktree_a.flush_fs_events(cx_a).await; - - // Make changes on host's file system, see those changes on guest worktrees. - fs.rename( - "/a/file1".as_ref(), - "/a/file1-renamed".as_ref(), - Default::default(), - ) - .await - .unwrap(); - - fs.rename("/a/file2".as_ref(), "/a/file3".as_ref(), Default::default()) - .await - .unwrap(); - fs.insert_file(Path::new("/a/file4"), "4".into()).await; - - worktree_a - .condition(&cx_a, |tree, _| { - tree.paths() - .map(|p| p.to_string_lossy()) - .collect::>() - == [".zed.toml", "file1-renamed", "file3", "file4"] - }) - .await; - worktree_b - .condition(&cx_b, |tree, _| { - tree.paths() - .map(|p| p.to_string_lossy()) - .collect::>() - == [".zed.toml", "file1-renamed", "file3", "file4"] - }) - .await; - worktree_c - .condition(&cx_c, |tree, _| { - tree.paths() - .map(|p| p.to_string_lossy()) - .collect::>() - == [".zed.toml", "file1-renamed", "file3", "file4"] - }) - .await; - - // Ensure buffer files are updated as well. - buffer_a - .condition(&cx_a, |buf, _| { - buf.file().unwrap().path().to_str() == Some("file1-renamed") - }) - .await; - buffer_b - .condition(&cx_b, |buf, _| { - buf.file().unwrap().path().to_str() == Some("file1-renamed") - }) - .await; - buffer_c - .condition(&cx_c, |buf, _| { - buf.file().unwrap().path().to_str() == Some("file1-renamed") - }) - .await; - } - - #[gpui::test(iterations = 10)] - async fn test_buffer_conflict_after_save(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/dir", - json!({ - ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, - "a.txt": "a-contents", - }), - ) - .await; - - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/dir", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join that project as client B - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Open a buffer as client B - let buffer_b = project_b - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) - .await - .unwrap(); - - buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "world ", cx)); - buffer_b.read_with(cx_b, |buf, _| { - assert!(buf.is_dirty()); - assert!(!buf.has_conflict()); - }); - - buffer_b.update(cx_b, |buf, cx| buf.save(cx)).await.unwrap(); - buffer_b - .condition(&cx_b, |buffer_b, _| !buffer_b.is_dirty()) - .await; - buffer_b.read_with(cx_b, |buf, _| { - assert!(!buf.has_conflict()); - }); - - buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "hello ", cx)); - buffer_b.read_with(cx_b, |buf, _| { - assert!(buf.is_dirty()); - assert!(!buf.has_conflict()); - }); - } - - #[gpui::test(iterations = 10)] - async fn test_buffer_reloading(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/dir", - json!({ - ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, - "a.txt": "a-contents", - }), - ) - .await; - - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/dir", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join that project as client B - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - let _worktree_b = project_b.update(cx_b, |p, cx| p.worktrees(cx).next().unwrap()); - - // Open a buffer as client B - let buffer_b = project_b - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) - .await - .unwrap(); - buffer_b.read_with(cx_b, |buf, _| { - assert!(!buf.is_dirty()); - assert!(!buf.has_conflict()); - }); - - fs.save(Path::new("/dir/a.txt"), &"new contents".into()) - .await - .unwrap(); - buffer_b - .condition(&cx_b, |buf, _| { - buf.text() == "new contents" && !buf.is_dirty() - }) - .await; - buffer_b.read_with(cx_b, |buf, _| { - assert!(!buf.has_conflict()); - }); - } - - #[gpui::test(iterations = 10)] - async fn test_editing_while_guest_opens_buffer( - cx_a: &mut TestAppContext, - cx_b: &mut TestAppContext, - ) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/dir", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.txt": "a-contents", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/dir", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join that project as client B - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Open a buffer as client A - let buffer_a = project_a - .update(cx_a, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) - .await - .unwrap(); - - // Start opening the same buffer as client B - let buffer_b = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); - - // Edit the buffer as client A while client B is still opening it. - cx_b.background().simulate_random_delay().await; - buffer_a.update(cx_a, |buf, cx| buf.edit([0..0], "X", cx)); - cx_b.background().simulate_random_delay().await; - buffer_a.update(cx_a, |buf, cx| buf.edit([1..1], "Y", cx)); - - let text = buffer_a.read_with(cx_a, |buf, _| buf.text()); - let buffer_b = buffer_b.await.unwrap(); - buffer_b.condition(&cx_b, |buf, _| buf.text() == text).await; - } - - #[gpui::test(iterations = 10)] - async fn test_leaving_worktree_while_opening_buffer( - cx_a: &mut TestAppContext, - cx_b: &mut TestAppContext, - ) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/dir", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.txt": "a-contents", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/dir", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join that project as client B - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // See that a guest has joined as client A. - project_a - .condition(&cx_a, |p, _| p.collaborators().len() == 1) - .await; - - // Begin opening a buffer as client B, but leave the project before the open completes. - let buffer_b = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); - cx_b.update(|_| drop(project_b)); - drop(buffer_b); - - // See that the guest has left. - project_a - .condition(&cx_a, |p, _| p.collaborators().len() == 0) - .await; - } - - #[gpui::test(iterations = 10)] - async fn test_leaving_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.txt": "a-contents", - "b.txt": "b-contents", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a - .update(cx_a, |project, _| project.next_remote_id()) - .await; - project_a - .update(cx_a, |project, cx| project.share(cx)) - .await - .unwrap(); - - // Join that project as client B - let _project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Client A sees that a guest has joined. - project_a - .condition(cx_a, |p, _| p.collaborators().len() == 1) - .await; - - // Drop client B's connection and ensure client A observes client B leaving the project. - client_b.disconnect(&cx_b.to_async()).unwrap(); - project_a - .condition(cx_a, |p, _| p.collaborators().len() == 0) - .await; - - // Rejoin the project as client B - let _project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Client A sees that a guest has re-joined. - project_a - .condition(cx_a, |p, _| p.collaborators().len() == 1) - .await; - - // Simulate connection loss for client B and ensure client A observes client B leaving the project. - client_b.wait_for_current_user(cx_b).await; - server.disconnect_client(client_b.current_user_id(cx_b)); - cx_a.foreground().advance_clock(Duration::from_secs(3)); - project_a - .condition(cx_a, |p, _| p.collaborators().len() == 0) - .await; - } - - #[gpui::test(iterations = 10)] - async fn test_collaborating_with_diagnostics( - cx_a: &mut TestAppContext, - cx_b: &mut TestAppContext, - ) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.rs": "let one = two", - "other.rs": "", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Cause the language server to start. - let _ = cx_a - .background() - .spawn(project_a.update(cx_a, |project, cx| { - project.open_buffer( - ProjectPath { - worktree_id, - path: Path::new("other.rs").into(), - }, - cx, - ) - })) - .await - .unwrap(); - - // Simulate a language server reporting errors for a file. - let mut fake_language_server = fake_language_servers.next().await.unwrap(); - fake_language_server - .receive_notification::() - .await; - fake_language_server.notify::( - lsp::PublishDiagnosticsParams { - uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), - version: None, - diagnostics: vec![lsp::Diagnostic { - severity: Some(lsp::DiagnosticSeverity::ERROR), - range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 7)), - message: "message 1".to_string(), - ..Default::default() - }], - }, - ); - - // Wait for server to see the diagnostics update. - server - .condition(|store| { - let worktree = store - .project(project_id) - .unwrap() - .share - .as_ref() - .unwrap() - .worktrees - .get(&worktree_id.to_proto()) - .unwrap(); - - !worktree.diagnostic_summaries.is_empty() - }) - .await; - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - project_b.read_with(cx_b, |project, cx| { - assert_eq!( - project.diagnostic_summaries(cx).collect::>(), - &[( - ProjectPath { - worktree_id, - path: Arc::from(Path::new("a.rs")), - }, - DiagnosticSummary { - error_count: 1, - warning_count: 0, - ..Default::default() - }, - )] - ) - }); - - // Simulate a language server reporting more errors for a file. - fake_language_server.notify::( - lsp::PublishDiagnosticsParams { - uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), - version: None, - diagnostics: vec![ - lsp::Diagnostic { - severity: Some(lsp::DiagnosticSeverity::ERROR), - range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 7)), - message: "message 1".to_string(), - ..Default::default() - }, - lsp::Diagnostic { - severity: Some(lsp::DiagnosticSeverity::WARNING), - range: lsp::Range::new( - lsp::Position::new(0, 10), - lsp::Position::new(0, 13), - ), - message: "message 2".to_string(), - ..Default::default() - }, - ], - }, - ); - - // Client b gets the updated summaries - project_b - .condition(&cx_b, |project, cx| { - project.diagnostic_summaries(cx).collect::>() - == &[( - ProjectPath { - worktree_id, - path: Arc::from(Path::new("a.rs")), - }, - DiagnosticSummary { - error_count: 1, - warning_count: 1, - ..Default::default() - }, - )] - }) - .await; - - // Open the file with the errors on client B. They should be present. - let buffer_b = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) - .await - .unwrap(); - - buffer_b.read_with(cx_b, |buffer, _| { - assert_eq!( - buffer - .snapshot() - .diagnostics_in_range::<_, Point>(0..buffer.len(), false) - .map(|entry| entry) - .collect::>(), - &[ - DiagnosticEntry { - range: Point::new(0, 4)..Point::new(0, 7), - diagnostic: Diagnostic { - group_id: 0, - message: "message 1".to_string(), - severity: lsp::DiagnosticSeverity::ERROR, - is_primary: true, - ..Default::default() - } - }, - DiagnosticEntry { - range: Point::new(0, 10)..Point::new(0, 13), - diagnostic: Diagnostic { - group_id: 1, - severity: lsp::DiagnosticSeverity::WARNING, - message: "message 2".to_string(), - is_primary: true, - ..Default::default() - } - } - ] - ); - }); - - // Simulate a language server reporting no errors for a file. - fake_language_server.notify::( - lsp::PublishDiagnosticsParams { - uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), - version: None, - diagnostics: vec![], - }, - ); - project_a - .condition(cx_a, |project, cx| { - project.diagnostic_summaries(cx).collect::>() == &[] - }) - .await; - project_b - .condition(cx_b, |project, cx| { - project.diagnostic_summaries(cx).collect::>() == &[] - }) - .await; - } - - #[gpui::test(iterations = 10)] - async fn test_collaborating_with_completion( - cx_a: &mut TestAppContext, - cx_b: &mut TestAppContext, - ) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(FakeLspAdapter { - capabilities: lsp::ServerCapabilities { - completion_provider: Some(lsp::CompletionOptions { - trigger_characters: Some(vec![".".to_string()]), - ..Default::default() - }), - ..Default::default() - }, - ..Default::default() - }); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "main.rs": "fn main() { a }", - "other.rs": "", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Open a file in an editor as the guest. - let buffer_b = project_b - .update(cx_b, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx)) - .await - .unwrap(); - let (window_b, _) = cx_b.add_window(|_| EmptyView); - let editor_b = cx_b.add_view(window_b, |cx| { - Editor::for_buffer(buffer_b.clone(), Some(project_b.clone()), cx) - }); - - let fake_language_server = fake_language_servers.next().await.unwrap(); - buffer_b - .condition(&cx_b, |buffer, _| !buffer.completion_triggers().is_empty()) - .await; - - // Type a completion trigger character as the guest. - editor_b.update(cx_b, |editor, cx| { - editor.select_ranges([13..13], None, cx); - editor.handle_input(&Input(".".into()), cx); - cx.focus(&editor_b); - }); - - // Receive a completion request as the host's language server. - // Return some completions from the host's language server. - cx_a.foreground().start_waiting(); - fake_language_server - .handle_request::(|params, _| async move { - assert_eq!( - params.text_document_position.text_document.uri, - lsp::Url::from_file_path("/a/main.rs").unwrap(), - ); - assert_eq!( - params.text_document_position.position, - lsp::Position::new(0, 14), - ); - - Ok(Some(lsp::CompletionResponse::Array(vec![ - lsp::CompletionItem { - label: "first_method(…)".into(), - detail: Some("fn(&mut self, B) -> C".into()), - text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { - new_text: "first_method($1)".to_string(), - range: lsp::Range::new( - lsp::Position::new(0, 14), - lsp::Position::new(0, 14), - ), - })), - insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), - ..Default::default() - }, - lsp::CompletionItem { - label: "second_method(…)".into(), - detail: Some("fn(&mut self, C) -> D".into()), - text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { - new_text: "second_method()".to_string(), - range: lsp::Range::new( - lsp::Position::new(0, 14), - lsp::Position::new(0, 14), - ), - })), - insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), - ..Default::default() - }, - ]))) - }) - .next() - .await - .unwrap(); - cx_a.foreground().finish_waiting(); - - // Open the buffer on the host. - let buffer_a = project_a - .update(cx_a, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx)) - .await - .unwrap(); - buffer_a - .condition(&cx_a, |buffer, _| buffer.text() == "fn main() { a. }") - .await; - - // Confirm a completion on the guest. - editor_b - .condition(&cx_b, |editor, _| editor.context_menu_visible()) - .await; - editor_b.update(cx_b, |editor, cx| { - editor.confirm_completion(&ConfirmCompletion { item_ix: Some(0) }, cx); - assert_eq!(editor.text(cx), "fn main() { a.first_method() }"); - }); - - // Return a resolved completion from the host's language server. - // The resolved completion has an additional text edit. - fake_language_server.handle_request::( - |params, _| async move { - assert_eq!(params.label, "first_method(…)"); - Ok(lsp::CompletionItem { - label: "first_method(…)".into(), - detail: Some("fn(&mut self, B) -> C".into()), - text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { - new_text: "first_method($1)".to_string(), - range: lsp::Range::new( - lsp::Position::new(0, 14), - lsp::Position::new(0, 14), - ), - })), - additional_text_edits: Some(vec![lsp::TextEdit { - new_text: "use d::SomeTrait;\n".to_string(), - range: lsp::Range::new(lsp::Position::new(0, 0), lsp::Position::new(0, 0)), - }]), - insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), - ..Default::default() - }) - }, - ); - - // The additional edit is applied. - buffer_a - .condition(&cx_a, |buffer, _| { - buffer.text() == "use d::SomeTrait;\nfn main() { a.first_method() }" - }) - .await; - buffer_b - .condition(&cx_b, |buffer, _| { - buffer.text() == "use d::SomeTrait;\nfn main() { a.first_method() }" - }) - .await; - } - - #[gpui::test(iterations = 10)] - async fn test_reloading_buffer_manually(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.rs": "let one = 1;", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - let buffer_a = project_a - .update(cx_a, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx)) - .await - .unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - let buffer_b = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) - .await - .unwrap(); - buffer_b.update(cx_b, |buffer, cx| { - buffer.edit([4..7], "six", cx); - buffer.edit([10..11], "6", cx); - assert_eq!(buffer.text(), "let six = 6;"); - assert!(buffer.is_dirty()); - assert!(!buffer.has_conflict()); - }); - buffer_a - .condition(cx_a, |buffer, _| buffer.text() == "let six = 6;") - .await; - - fs.save(Path::new("/a/a.rs"), &Rope::from("let seven = 7;")) - .await - .unwrap(); - buffer_a - .condition(cx_a, |buffer, _| buffer.has_conflict()) - .await; - buffer_b - .condition(cx_b, |buffer, _| buffer.has_conflict()) - .await; - - project_b - .update(cx_b, |project, cx| { - project.reload_buffers(HashSet::from_iter([buffer_b.clone()]), true, cx) - }) - .await - .unwrap(); - buffer_a.read_with(cx_a, |buffer, _| { - assert_eq!(buffer.text(), "let seven = 7;"); - assert!(!buffer.is_dirty()); - assert!(!buffer.has_conflict()); - }); - buffer_b.read_with(cx_b, |buffer, _| { - assert_eq!(buffer.text(), "let seven = 7;"); - assert!(!buffer.is_dirty()); - assert!(!buffer.has_conflict()); - }); - - buffer_a.update(cx_a, |buffer, cx| { - // Undoing on the host is a no-op when the reload was initiated by the guest. - buffer.undo(cx); - assert_eq!(buffer.text(), "let seven = 7;"); - assert!(!buffer.is_dirty()); - assert!(!buffer.has_conflict()); - }); - buffer_b.update(cx_b, |buffer, cx| { - // Undoing on the guest rolls back the buffer to before it was reloaded but the conflict gets cleared. - buffer.undo(cx); - assert_eq!(buffer.text(), "let six = 6;"); - assert!(buffer.is_dirty()); - assert!(!buffer.has_conflict()); - }); - } - - #[gpui::test(iterations = 10)] - async fn test_formatting_buffer(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.rs": "let one = two", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - let buffer_b = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) - .await - .unwrap(); - - let fake_language_server = fake_language_servers.next().await.unwrap(); - fake_language_server.handle_request::(|_, _| async move { - Ok(Some(vec![ - lsp::TextEdit { - range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 4)), - new_text: "h".to_string(), - }, - lsp::TextEdit { - range: lsp::Range::new(lsp::Position::new(0, 7), lsp::Position::new(0, 7)), - new_text: "y".to_string(), - }, - ])) - }); - - project_b - .update(cx_b, |project, cx| { - project.format(HashSet::from_iter([buffer_b.clone()]), true, cx) - }) - .await - .unwrap(); - assert_eq!( - buffer_b.read_with(cx_b, |buffer, _| buffer.text()), - "let honey = two" - ); - } - - #[gpui::test(iterations = 10)] - async fn test_definition(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - fs.insert_tree( - "/root-1", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.rs": "const ONE: usize = b::TWO + b::THREE;", - }), - ) - .await; - fs.insert_tree( - "/root-2", - json!({ - "b.rs": "const TWO: usize = 2;\nconst THREE: usize = 3;", - }), - ) - .await; - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root-1", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Open the file on client B. - let buffer_b = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) - .await - .unwrap(); - - // Request the definition of a symbol as the guest. - let fake_language_server = fake_language_servers.next().await.unwrap(); - fake_language_server.handle_request::( - |_, _| async move { - Ok(Some(lsp::GotoDefinitionResponse::Scalar( - lsp::Location::new( - lsp::Url::from_file_path("/root-2/b.rs").unwrap(), - lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), - ), - ))) - }, - ); - - let definitions_1 = project_b - .update(cx_b, |p, cx| p.definition(&buffer_b, 23, cx)) - .await - .unwrap(); - cx_b.read(|cx| { - assert_eq!(definitions_1.len(), 1); - assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); - let target_buffer = definitions_1[0].buffer.read(cx); - assert_eq!( - target_buffer.text(), - "const TWO: usize = 2;\nconst THREE: usize = 3;" - ); - assert_eq!( - definitions_1[0].range.to_point(target_buffer), - Point::new(0, 6)..Point::new(0, 9) - ); - }); - - // Try getting more definitions for the same buffer, ensuring the buffer gets reused from - // the previous call to `definition`. - fake_language_server.handle_request::( - |_, _| async move { - Ok(Some(lsp::GotoDefinitionResponse::Scalar( - lsp::Location::new( - lsp::Url::from_file_path("/root-2/b.rs").unwrap(), - lsp::Range::new(lsp::Position::new(1, 6), lsp::Position::new(1, 11)), - ), - ))) - }, - ); - - let definitions_2 = project_b - .update(cx_b, |p, cx| p.definition(&buffer_b, 33, cx)) - .await - .unwrap(); - cx_b.read(|cx| { - assert_eq!(definitions_2.len(), 1); - assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); - let target_buffer = definitions_2[0].buffer.read(cx); - assert_eq!( - target_buffer.text(), - "const TWO: usize = 2;\nconst THREE: usize = 3;" - ); - assert_eq!( - definitions_2[0].range.to_point(target_buffer), - Point::new(1, 6)..Point::new(1, 11) - ); - }); - assert_eq!(definitions_1[0].buffer, definitions_2[0].buffer); - } - - #[gpui::test(iterations = 10)] - async fn test_references(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - fs.insert_tree( - "/root-1", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "one.rs": "const ONE: usize = 1;", - "two.rs": "const TWO: usize = one::ONE + one::ONE;", - }), - ) - .await; - fs.insert_tree( - "/root-2", - json!({ - "three.rs": "const THREE: usize = two::TWO + one::ONE;", - }), - ) - .await; - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root-1", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Open the file on client B. - let buffer_b = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "one.rs"), cx))) - .await - .unwrap(); - - // Request references to a symbol as the guest. - let fake_language_server = fake_language_servers.next().await.unwrap(); - fake_language_server.handle_request::( - |params, _| async move { - assert_eq!( - params.text_document_position.text_document.uri.as_str(), - "file:///root-1/one.rs" - ); - Ok(Some(vec![ - lsp::Location { - uri: lsp::Url::from_file_path("/root-1/two.rs").unwrap(), - range: lsp::Range::new( - lsp::Position::new(0, 24), - lsp::Position::new(0, 27), - ), - }, - lsp::Location { - uri: lsp::Url::from_file_path("/root-1/two.rs").unwrap(), - range: lsp::Range::new( - lsp::Position::new(0, 35), - lsp::Position::new(0, 38), - ), - }, - lsp::Location { - uri: lsp::Url::from_file_path("/root-2/three.rs").unwrap(), - range: lsp::Range::new( - lsp::Position::new(0, 37), - lsp::Position::new(0, 40), - ), - }, - ])) - }, - ); - - let references = project_b - .update(cx_b, |p, cx| p.references(&buffer_b, 7, cx)) - .await - .unwrap(); - cx_b.read(|cx| { - assert_eq!(references.len(), 3); - assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); - - let two_buffer = references[0].buffer.read(cx); - let three_buffer = references[2].buffer.read(cx); - assert_eq!( - two_buffer.file().unwrap().path().as_ref(), - Path::new("two.rs") - ); - assert_eq!(references[1].buffer, references[0].buffer); - assert_eq!( - three_buffer.file().unwrap().full_path(cx), - Path::new("three.rs") - ); - - assert_eq!(references[0].range.to_offset(&two_buffer), 24..27); - assert_eq!(references[1].range.to_offset(&two_buffer), 35..38); - assert_eq!(references[2].range.to_offset(&three_buffer), 37..40); - }); - } - - #[gpui::test(iterations = 10)] - async fn test_project_search(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - fs.insert_tree( - "/root-1", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a": "hello world", - "b": "goodnight moon", - "c": "a world of goo", - "d": "world champion of clown world", - }), - ) - .await; - fs.insert_tree( - "/root-2", - json!({ - "e": "disney world is fun", - }), - ) - .await; - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - - let (worktree_1, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root-1", true, cx) - }) - .await - .unwrap(); - worktree_1 - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let (worktree_2, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root-2", true, cx) - }) - .await - .unwrap(); - worktree_2 - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - let results = project_b - .update(cx_b, |project, cx| { - project.search(SearchQuery::text("world", false, false), cx) - }) - .await - .unwrap(); - - let mut ranges_by_path = results - .into_iter() - .map(|(buffer, ranges)| { - buffer.read_with(cx_b, |buffer, cx| { - let path = buffer.file().unwrap().full_path(cx); - let offset_ranges = ranges - .into_iter() - .map(|range| range.to_offset(buffer)) - .collect::>(); - (path, offset_ranges) - }) - }) - .collect::>(); - ranges_by_path.sort_by_key(|(path, _)| path.clone()); - - assert_eq!( - ranges_by_path, - &[ - (PathBuf::from("root-1/a"), vec![6..11]), - (PathBuf::from("root-1/c"), vec![2..7]), - (PathBuf::from("root-1/d"), vec![0..5, 24..29]), - (PathBuf::from("root-2/e"), vec![7..12]), - ] - ); - } - - #[gpui::test(iterations = 10)] - async fn test_document_highlights(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - fs.insert_tree( - "/root-1", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "main.rs": "fn double(number: i32) -> i32 { number + number }", - }), - ) - .await; - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root-1", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Open the file on client B. - let buffer_b = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx))) - .await - .unwrap(); - - // Request document highlights as the guest. - let fake_language_server = fake_language_servers.next().await.unwrap(); - fake_language_server.handle_request::( - |params, _| async move { - assert_eq!( - params - .text_document_position_params - .text_document - .uri - .as_str(), - "file:///root-1/main.rs" - ); - assert_eq!( - params.text_document_position_params.position, - lsp::Position::new(0, 34) - ); - Ok(Some(vec![ - lsp::DocumentHighlight { - kind: Some(lsp::DocumentHighlightKind::WRITE), - range: lsp::Range::new( - lsp::Position::new(0, 10), - lsp::Position::new(0, 16), - ), - }, - lsp::DocumentHighlight { - kind: Some(lsp::DocumentHighlightKind::READ), - range: lsp::Range::new( - lsp::Position::new(0, 32), - lsp::Position::new(0, 38), - ), - }, - lsp::DocumentHighlight { - kind: Some(lsp::DocumentHighlightKind::READ), - range: lsp::Range::new( - lsp::Position::new(0, 41), - lsp::Position::new(0, 47), - ), - }, - ])) - }, - ); - - let highlights = project_b - .update(cx_b, |p, cx| p.document_highlights(&buffer_b, 34, cx)) - .await - .unwrap(); - buffer_b.read_with(cx_b, |buffer, _| { - let snapshot = buffer.snapshot(); - - let highlights = highlights - .into_iter() - .map(|highlight| (highlight.kind, highlight.range.to_offset(&snapshot))) - .collect::>(); - assert_eq!( - highlights, - &[ - (lsp::DocumentHighlightKind::WRITE, 10..16), - (lsp::DocumentHighlightKind::READ, 32..38), - (lsp::DocumentHighlightKind::READ, 41..47) - ] - ) - }); - } - - #[gpui::test(iterations = 10)] - async fn test_project_symbols(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - fs.insert_tree( - "/code", - json!({ - "crate-1": { - ".zed.toml": r#"collaborators = ["user_b"]"#, - "one.rs": "const ONE: usize = 1;", - }, - "crate-2": { - "two.rs": "const TWO: usize = 2; const THREE: usize = 3;", - }, - "private": { - "passwords.txt": "the-password", - } - }), - ) - .await; - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/code/crate-1", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - // Cause the language server to start. - let _buffer = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "one.rs"), cx))) - .await - .unwrap(); - - let fake_language_server = fake_language_servers.next().await.unwrap(); - fake_language_server.handle_request::( - |_, _| async move { - #[allow(deprecated)] - Ok(Some(vec![lsp::SymbolInformation { - name: "TWO".into(), - location: lsp::Location { - uri: lsp::Url::from_file_path("/code/crate-2/two.rs").unwrap(), - range: lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), - }, - kind: lsp::SymbolKind::CONSTANT, - tags: None, - container_name: None, - deprecated: None, - }])) - }, - ); - - // Request the definition of a symbol as the guest. - let symbols = project_b - .update(cx_b, |p, cx| p.symbols("two", cx)) - .await - .unwrap(); - assert_eq!(symbols.len(), 1); - assert_eq!(symbols[0].name, "TWO"); - - // Open one of the returned symbols. - let buffer_b_2 = project_b - .update(cx_b, |project, cx| { - project.open_buffer_for_symbol(&symbols[0], cx) - }) - .await - .unwrap(); - buffer_b_2.read_with(cx_b, |buffer, _| { - assert_eq!( - buffer.file().unwrap().path().as_ref(), - Path::new("../crate-2/two.rs") - ); - }); - - // Attempt to craft a symbol and violate host's privacy by opening an arbitrary file. - let mut fake_symbol = symbols[0].clone(); - fake_symbol.path = Path::new("/code/secrets").into(); - let error = project_b - .update(cx_b, |project, cx| { - project.open_buffer_for_symbol(&fake_symbol, cx) - }) - .await - .unwrap_err(); - assert!(error.to_string().contains("invalid symbol signature")); - } - - #[gpui::test(iterations = 10)] - async fn test_open_buffer_while_getting_definition_pointing_to_it( - cx_a: &mut TestAppContext, - cx_b: &mut TestAppContext, - mut rng: StdRng, - ) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - fs.insert_tree( - "/root", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "a.rs": "const ONE: usize = b::TWO;", - "b.rs": "const TWO: usize = 2", - }), - ) - .await; - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/root", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - let buffer_b1 = cx_b - .background() - .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) - .await - .unwrap(); - - let fake_language_server = fake_language_servers.next().await.unwrap(); - fake_language_server.handle_request::( - |_, _| async move { - Ok(Some(lsp::GotoDefinitionResponse::Scalar( - lsp::Location::new( - lsp::Url::from_file_path("/root/b.rs").unwrap(), - lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), - ), - ))) - }, - ); - - let definitions; - let buffer_b2; - if rng.gen() { - definitions = project_b.update(cx_b, |p, cx| p.definition(&buffer_b1, 23, cx)); - buffer_b2 = project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.rs"), cx)); - } else { - buffer_b2 = project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.rs"), cx)); - definitions = project_b.update(cx_b, |p, cx| p.definition(&buffer_b1, 23, cx)); - } - - let buffer_b2 = buffer_b2.await.unwrap(); - let definitions = definitions.await.unwrap(); - assert_eq!(definitions.len(), 1); - assert_eq!(definitions[0].buffer, buffer_b2); - } - - #[gpui::test(iterations = 10)] - async fn test_collaborating_with_code_actions( - cx_a: &mut TestAppContext, - cx_b: &mut TestAppContext, - ) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - cx_b.update(|cx| editor::init(cx)); - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "main.rs": "mod other;\nfn main() { let foo = other::foo(); }", - "other.rs": "pub fn foo() -> usize { 4 }", - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - let mut params = cx_b.update(WorkspaceParams::test); - params.languages = lang_registry.clone(); - params.client = client_b.client.clone(); - params.user_store = client_b.user_store.clone(); - params.project = project_b; - - let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(¶ms, cx)); - let editor_b = workspace_b - .update(cx_b, |workspace, cx| { - workspace.open_path((worktree_id, "main.rs"), cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - - let mut fake_language_server = fake_language_servers.next().await.unwrap(); - fake_language_server - .handle_request::(|params, _| async move { - assert_eq!( - params.text_document.uri, - lsp::Url::from_file_path("/a/main.rs").unwrap(), - ); - assert_eq!(params.range.start, lsp::Position::new(0, 0)); - assert_eq!(params.range.end, lsp::Position::new(0, 0)); - Ok(None) - }) - .next() - .await; - - // Move cursor to a location that contains code actions. - editor_b.update(cx_b, |editor, cx| { - editor.select_ranges([Point::new(1, 31)..Point::new(1, 31)], None, cx); - cx.focus(&editor_b); - }); - - fake_language_server - .handle_request::(|params, _| async move { - assert_eq!( - params.text_document.uri, - lsp::Url::from_file_path("/a/main.rs").unwrap(), - ); - assert_eq!(params.range.start, lsp::Position::new(1, 31)); - assert_eq!(params.range.end, lsp::Position::new(1, 31)); - - Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( - lsp::CodeAction { - title: "Inline into all callers".to_string(), - edit: Some(lsp::WorkspaceEdit { - changes: Some( - [ - ( - lsp::Url::from_file_path("/a/main.rs").unwrap(), - vec![lsp::TextEdit::new( - lsp::Range::new( - lsp::Position::new(1, 22), - lsp::Position::new(1, 34), - ), - "4".to_string(), - )], - ), - ( - lsp::Url::from_file_path("/a/other.rs").unwrap(), - vec![lsp::TextEdit::new( - lsp::Range::new( - lsp::Position::new(0, 0), - lsp::Position::new(0, 27), - ), - "".to_string(), - )], - ), - ] - .into_iter() - .collect(), - ), - ..Default::default() - }), - data: Some(json!({ - "codeActionParams": { - "range": { - "start": {"line": 1, "column": 31}, - "end": {"line": 1, "column": 31}, - } - } - })), - ..Default::default() - }, - )])) - }) - .next() - .await; - - // Toggle code actions and wait for them to display. - editor_b.update(cx_b, |editor, cx| { - editor.toggle_code_actions( - &ToggleCodeActions { - deployed_from_indicator: false, - }, - cx, - ); - }); - editor_b - .condition(&cx_b, |editor, _| editor.context_menu_visible()) - .await; - - fake_language_server.remove_request_handler::(); - - // Confirming the code action will trigger a resolve request. - let confirm_action = workspace_b - .update(cx_b, |workspace, cx| { - Editor::confirm_code_action(workspace, &ConfirmCodeAction { item_ix: Some(0) }, cx) - }) - .unwrap(); - fake_language_server.handle_request::( - |_, _| async move { - Ok(lsp::CodeAction { - title: "Inline into all callers".to_string(), - edit: Some(lsp::WorkspaceEdit { - changes: Some( - [ - ( - lsp::Url::from_file_path("/a/main.rs").unwrap(), - vec![lsp::TextEdit::new( - lsp::Range::new( - lsp::Position::new(1, 22), - lsp::Position::new(1, 34), - ), - "4".to_string(), - )], - ), - ( - lsp::Url::from_file_path("/a/other.rs").unwrap(), - vec![lsp::TextEdit::new( - lsp::Range::new( - lsp::Position::new(0, 0), - lsp::Position::new(0, 27), - ), - "".to_string(), - )], - ), - ] - .into_iter() - .collect(), - ), - ..Default::default() - }), - ..Default::default() - }) - }, - ); - - // After the action is confirmed, an editor containing both modified files is opened. - confirm_action.await.unwrap(); - let code_action_editor = workspace_b.read_with(cx_b, |workspace, cx| { - workspace - .active_item(cx) - .unwrap() - .downcast::() - .unwrap() - }); - code_action_editor.update(cx_b, |editor, cx| { - assert_eq!(editor.text(cx), "\nmod other;\nfn main() { let foo = 4; }"); - editor.undo(&Undo, cx); - assert_eq!( - editor.text(cx), - "pub fn foo() -> usize { 4 }\nmod other;\nfn main() { let foo = other::foo(); }" - ); - editor.redo(&Redo, cx); - assert_eq!(editor.text(cx), "\nmod other;\nfn main() { let foo = 4; }"); - }); - } - - #[gpui::test(iterations = 10)] - async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - cx_b.update(|cx| editor::init(cx)); - - // Set up a fake language server. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - Some(tree_sitter_rust::language()), - ); - let mut fake_language_servers = language.set_fake_lsp_adapter(FakeLspAdapter { - capabilities: lsp::ServerCapabilities { - rename_provider: Some(lsp::OneOf::Right(lsp::RenameOptions { - prepare_provider: Some(true), - work_done_progress_options: Default::default(), - })), - ..Default::default() - }, - ..Default::default() - }); - lang_registry.add(Arc::new(language)); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Share a project as client A - fs.insert_tree( - "/dir", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "one.rs": "const ONE: usize = 1;", - "two.rs": "const TWO: usize = one::ONE + one::ONE;" - }), - ) - .await; - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/dir", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); - project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - - // Join the worktree as client B. - let project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - let mut params = cx_b.update(WorkspaceParams::test); - params.languages = lang_registry.clone(); - params.client = client_b.client.clone(); - params.user_store = client_b.user_store.clone(); - params.project = project_b; - - let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(¶ms, cx)); - let editor_b = workspace_b - .update(cx_b, |workspace, cx| { - workspace.open_path((worktree_id, "one.rs"), cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - let fake_language_server = fake_language_servers.next().await.unwrap(); - - // Move cursor to a location that can be renamed. - let prepare_rename = editor_b.update(cx_b, |editor, cx| { - editor.select_ranges([7..7], None, cx); - editor.rename(&Rename, cx).unwrap() - }); - - fake_language_server - .handle_request::(|params, _| async move { - assert_eq!(params.text_document.uri.as_str(), "file:///dir/one.rs"); - assert_eq!(params.position, lsp::Position::new(0, 7)); - Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( - lsp::Position::new(0, 6), - lsp::Position::new(0, 9), - )))) - }) - .next() - .await - .unwrap(); - prepare_rename.await.unwrap(); - editor_b.update(cx_b, |editor, cx| { - let rename = editor.pending_rename().unwrap(); - let buffer = editor.buffer().read(cx).snapshot(cx); - assert_eq!( - rename.range.start.to_offset(&buffer)..rename.range.end.to_offset(&buffer), - 6..9 - ); - rename.editor.update(cx, |rename_editor, cx| { - rename_editor.buffer().update(cx, |rename_buffer, cx| { - rename_buffer.edit([0..3], "THREE", cx); - }); - }); - }); - - let confirm_rename = workspace_b.update(cx_b, |workspace, cx| { - Editor::confirm_rename(workspace, &ConfirmRename, cx).unwrap() - }); - fake_language_server - .handle_request::(|params, _| async move { - assert_eq!( - params.text_document_position.text_document.uri.as_str(), - "file:///dir/one.rs" - ); - assert_eq!( - params.text_document_position.position, - lsp::Position::new(0, 6) - ); - assert_eq!(params.new_name, "THREE"); - Ok(Some(lsp::WorkspaceEdit { - changes: Some( - [ - ( - lsp::Url::from_file_path("/dir/one.rs").unwrap(), - vec![lsp::TextEdit::new( - lsp::Range::new( - lsp::Position::new(0, 6), - lsp::Position::new(0, 9), - ), - "THREE".to_string(), - )], - ), - ( - lsp::Url::from_file_path("/dir/two.rs").unwrap(), - vec![ - lsp::TextEdit::new( - lsp::Range::new( - lsp::Position::new(0, 24), - lsp::Position::new(0, 27), - ), - "THREE".to_string(), - ), - lsp::TextEdit::new( - lsp::Range::new( - lsp::Position::new(0, 35), - lsp::Position::new(0, 38), - ), - "THREE".to_string(), - ), - ], - ), - ] - .into_iter() - .collect(), - ), - ..Default::default() - })) - }) - .next() - .await - .unwrap(); - confirm_rename.await.unwrap(); - - let rename_editor = workspace_b.read_with(cx_b, |workspace, cx| { - workspace - .active_item(cx) - .unwrap() - .downcast::() - .unwrap() - }); - rename_editor.update(cx_b, |editor, cx| { - assert_eq!( - editor.text(cx), - "const TWO: usize = one::THREE + one::THREE;\nconst THREE: usize = 1;" - ); - editor.undo(&Undo, cx); - assert_eq!( - editor.text(cx), - "const TWO: usize = one::ONE + one::ONE;\nconst ONE: usize = 1;" - ); - editor.redo(&Redo, cx); - assert_eq!( - editor.text(cx), - "const TWO: usize = one::THREE + one::THREE;\nconst THREE: usize = 1;" - ); - }); - - // Ensure temporary rename edits cannot be undone/redone. - editor_b.update(cx_b, |editor, cx| { - editor.undo(&Undo, cx); - assert_eq!(editor.text(cx), "const ONE: usize = 1;"); - editor.undo(&Undo, cx); - assert_eq!(editor.text(cx), "const ONE: usize = 1;"); - editor.redo(&Redo, cx); - assert_eq!(editor.text(cx), "const THREE: usize = 1;"); - }) - } - - #[gpui::test(iterations = 10)] - async fn test_basic_chat(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - - // Create an org that includes these 2 users. - let db = &server.app_state.db; - let org_id = db.create_org("Test Org", "test-org").await.unwrap(); - db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) - .await - .unwrap(); - db.add_org_member(org_id, client_b.current_user_id(&cx_b), false) - .await - .unwrap(); - - // Create a channel that includes all the users. - let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); - db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) - .await - .unwrap(); - db.add_channel_member(channel_id, client_b.current_user_id(&cx_b), false) - .await - .unwrap(); - db.create_channel_message( - channel_id, - client_b.current_user_id(&cx_b), - "hello A, it's B.", - OffsetDateTime::now_utc(), - 1, - ) - .await - .unwrap(); - - let channels_a = cx_a - .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); - channels_a - .condition(cx_a, |list, _| list.available_channels().is_some()) - .await; - channels_a.read_with(cx_a, |list, _| { - assert_eq!( - list.available_channels().unwrap(), - &[ChannelDetails { - id: channel_id.to_proto(), - name: "test-channel".to_string() - }] - ) - }); - let channel_a = channels_a.update(cx_a, |this, cx| { - this.get_channel(channel_id.to_proto(), cx).unwrap() - }); - channel_a.read_with(cx_a, |channel, _| assert!(channel.messages().is_empty())); - channel_a - .condition(&cx_a, |channel, _| { - channel_messages(channel) - == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] - }) - .await; - - let channels_b = cx_b - .add_model(|cx| ChannelList::new(client_b.user_store.clone(), client_b.clone(), cx)); - channels_b - .condition(cx_b, |list, _| list.available_channels().is_some()) - .await; - channels_b.read_with(cx_b, |list, _| { - assert_eq!( - list.available_channels().unwrap(), - &[ChannelDetails { - id: channel_id.to_proto(), - name: "test-channel".to_string() - }] - ) - }); - - let channel_b = channels_b.update(cx_b, |this, cx| { - this.get_channel(channel_id.to_proto(), cx).unwrap() - }); - channel_b.read_with(cx_b, |channel, _| assert!(channel.messages().is_empty())); - channel_b - .condition(&cx_b, |channel, _| { - channel_messages(channel) - == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] - }) - .await; - - channel_a - .update(cx_a, |channel, cx| { - channel - .send_message("oh, hi B.".to_string(), cx) - .unwrap() - .detach(); - let task = channel.send_message("sup".to_string(), cx).unwrap(); - assert_eq!( - channel_messages(channel), - &[ - ("user_b".to_string(), "hello A, it's B.".to_string(), false), - ("user_a".to_string(), "oh, hi B.".to_string(), true), - ("user_a".to_string(), "sup".to_string(), true) - ] - ); - task - }) - .await - .unwrap(); - - channel_b - .condition(&cx_b, |channel, _| { - channel_messages(channel) - == [ - ("user_b".to_string(), "hello A, it's B.".to_string(), false), - ("user_a".to_string(), "oh, hi B.".to_string(), false), - ("user_a".to_string(), "sup".to_string(), false), - ] - }) - .await; - - assert_eq!( - server - .state() - .await - .channel(channel_id) - .unwrap() - .connection_ids - .len(), - 2 - ); - cx_b.update(|_| drop(channel_b)); - server - .condition(|state| state.channel(channel_id).unwrap().connection_ids.len() == 1) - .await; - - cx_a.update(|_| drop(channel_a)); - server - .condition(|state| state.channel(channel_id).is_none()) - .await; - } - - #[gpui::test(iterations = 10)] - async fn test_chat_message_validation(cx_a: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - - let db = &server.app_state.db; - let org_id = db.create_org("Test Org", "test-org").await.unwrap(); - let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); - db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) - .await - .unwrap(); - db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) - .await - .unwrap(); - - let channels_a = cx_a - .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); - channels_a - .condition(cx_a, |list, _| list.available_channels().is_some()) - .await; - let channel_a = channels_a.update(cx_a, |this, cx| { - this.get_channel(channel_id.to_proto(), cx).unwrap() - }); - - // Messages aren't allowed to be too long. - channel_a - .update(cx_a, |channel, cx| { - let long_body = "this is long.\n".repeat(1024); - channel.send_message(long_body, cx).unwrap() - }) - .await - .unwrap_err(); - - // Messages aren't allowed to be blank. - channel_a.update(cx_a, |channel, cx| { - channel.send_message(String::new(), cx).unwrap_err() - }); - - // Leading and trailing whitespace are trimmed. - channel_a - .update(cx_a, |channel, cx| { - channel - .send_message("\n surrounded by whitespace \n".to_string(), cx) - .unwrap() - }) - .await - .unwrap(); - assert_eq!( - db.get_channel_messages(channel_id, 10, None) - .await - .unwrap() - .iter() - .map(|m| &m.body) - .collect::>(), - &["surrounded by whitespace"] - ); - } - - #[gpui::test(iterations = 10)] - async fn test_chat_reconnection(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - - // Connect to a server as 2 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - let mut status_b = client_b.status(); - - // Create an org that includes these 2 users. - let db = &server.app_state.db; - let org_id = db.create_org("Test Org", "test-org").await.unwrap(); - db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) - .await - .unwrap(); - db.add_org_member(org_id, client_b.current_user_id(&cx_b), false) - .await - .unwrap(); - - // Create a channel that includes all the users. - let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); - db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) - .await - .unwrap(); - db.add_channel_member(channel_id, client_b.current_user_id(&cx_b), false) - .await - .unwrap(); - db.create_channel_message( - channel_id, - client_b.current_user_id(&cx_b), - "hello A, it's B.", - OffsetDateTime::now_utc(), - 2, - ) - .await - .unwrap(); - - let channels_a = cx_a - .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); - channels_a - .condition(cx_a, |list, _| list.available_channels().is_some()) - .await; - - channels_a.read_with(cx_a, |list, _| { - assert_eq!( - list.available_channels().unwrap(), - &[ChannelDetails { - id: channel_id.to_proto(), - name: "test-channel".to_string() - }] - ) - }); - let channel_a = channels_a.update(cx_a, |this, cx| { - this.get_channel(channel_id.to_proto(), cx).unwrap() - }); - channel_a.read_with(cx_a, |channel, _| assert!(channel.messages().is_empty())); - channel_a - .condition(&cx_a, |channel, _| { - channel_messages(channel) - == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] - }) - .await; - - let channels_b = cx_b - .add_model(|cx| ChannelList::new(client_b.user_store.clone(), client_b.clone(), cx)); - channels_b - .condition(cx_b, |list, _| list.available_channels().is_some()) - .await; - channels_b.read_with(cx_b, |list, _| { - assert_eq!( - list.available_channels().unwrap(), - &[ChannelDetails { - id: channel_id.to_proto(), - name: "test-channel".to_string() - }] - ) - }); - - let channel_b = channels_b.update(cx_b, |this, cx| { - this.get_channel(channel_id.to_proto(), cx).unwrap() - }); - channel_b.read_with(cx_b, |channel, _| assert!(channel.messages().is_empty())); - channel_b - .condition(&cx_b, |channel, _| { - channel_messages(channel) - == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] - }) - .await; - - // Disconnect client B, ensuring we can still access its cached channel data. - server.forbid_connections(); - server.disconnect_client(client_b.current_user_id(&cx_b)); - cx_b.foreground().advance_clock(Duration::from_secs(3)); - while !matches!( - status_b.next().await, - Some(client::Status::ReconnectionError { .. }) - ) {} - - channels_b.read_with(cx_b, |channels, _| { - assert_eq!( - channels.available_channels().unwrap(), - [ChannelDetails { - id: channel_id.to_proto(), - name: "test-channel".to_string() - }] - ) - }); - channel_b.read_with(cx_b, |channel, _| { - assert_eq!( - channel_messages(channel), - [("user_b".to_string(), "hello A, it's B.".to_string(), false)] - ) - }); - - // Send a message from client B while it is disconnected. - channel_b - .update(cx_b, |channel, cx| { - let task = channel - .send_message("can you see this?".to_string(), cx) - .unwrap(); - assert_eq!( - channel_messages(channel), - &[ - ("user_b".to_string(), "hello A, it's B.".to_string(), false), - ("user_b".to_string(), "can you see this?".to_string(), true) - ] - ); - task - }) - .await - .unwrap_err(); - - // Send a message from client A while B is disconnected. - channel_a - .update(cx_a, |channel, cx| { - channel - .send_message("oh, hi B.".to_string(), cx) - .unwrap() - .detach(); - let task = channel.send_message("sup".to_string(), cx).unwrap(); - assert_eq!( - channel_messages(channel), - &[ - ("user_b".to_string(), "hello A, it's B.".to_string(), false), - ("user_a".to_string(), "oh, hi B.".to_string(), true), - ("user_a".to_string(), "sup".to_string(), true) - ] - ); - task - }) - .await - .unwrap(); - - // Give client B a chance to reconnect. - server.allow_connections(); - cx_b.foreground().advance_clock(Duration::from_secs(10)); - - // Verify that B sees the new messages upon reconnection, as well as the message client B - // sent while offline. - channel_b - .condition(&cx_b, |channel, _| { - channel_messages(channel) - == [ - ("user_b".to_string(), "hello A, it's B.".to_string(), false), - ("user_a".to_string(), "oh, hi B.".to_string(), false), - ("user_a".to_string(), "sup".to_string(), false), - ("user_b".to_string(), "can you see this?".to_string(), false), - ] - }) - .await; - - // Ensure client A and B can communicate normally after reconnection. - channel_a - .update(cx_a, |channel, cx| { - channel.send_message("you online?".to_string(), cx).unwrap() - }) - .await - .unwrap(); - channel_b - .condition(&cx_b, |channel, _| { - channel_messages(channel) - == [ - ("user_b".to_string(), "hello A, it's B.".to_string(), false), - ("user_a".to_string(), "oh, hi B.".to_string(), false), - ("user_a".to_string(), "sup".to_string(), false), - ("user_b".to_string(), "can you see this?".to_string(), false), - ("user_a".to_string(), "you online?".to_string(), false), - ] - }) - .await; - - channel_b - .update(cx_b, |channel, cx| { - channel.send_message("yep".to_string(), cx).unwrap() - }) - .await - .unwrap(); - channel_a - .condition(&cx_a, |channel, _| { - channel_messages(channel) - == [ - ("user_b".to_string(), "hello A, it's B.".to_string(), false), - ("user_a".to_string(), "oh, hi B.".to_string(), false), - ("user_a".to_string(), "sup".to_string(), false), - ("user_b".to_string(), "can you see this?".to_string(), false), - ("user_a".to_string(), "you online?".to_string(), false), - ("user_b".to_string(), "yep".to_string(), false), - ] - }) - .await; - } - - #[gpui::test(iterations = 10)] - async fn test_contacts( - cx_a: &mut TestAppContext, - cx_b: &mut TestAppContext, - cx_c: &mut TestAppContext, - ) { - cx_a.foreground().forbid_parking(); - let lang_registry = Arc::new(LanguageRegistry::test()); - let fs = FakeFs::new(cx_a.background()); - - // Connect to a server as 3 clients. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let client_a = server.create_client(cx_a, "user_a").await; - let client_b = server.create_client(cx_b, "user_b").await; - let client_c = server.create_client(cx_c, "user_c").await; - - // Share a worktree as client A. - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, - }), - ) - .await; - - let project_a = cx_a.update(|cx| { - Project::local( - client_a.clone(), - client_a.user_store.clone(), - lang_registry.clone(), - fs.clone(), - cx, - ) - }); - let (worktree_a, _) = project_a - .update(cx_a, |p, cx| { - p.find_or_create_local_worktree("/a", true, cx) - }) - .await - .unwrap(); - worktree_a - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - - client_a - .user_store - .condition(&cx_a, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] - }) - .await; - client_b - .user_store - .condition(&cx_b, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] - }) - .await; - client_c - .user_store - .condition(&cx_c, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] - }) - .await; - - let project_id = project_a - .update(cx_a, |project, _| project.next_remote_id()) - .await; - project_a - .update(cx_a, |project, cx| project.share(cx)) - .await - .unwrap(); - client_a - .user_store - .condition(&cx_a, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] - }) - .await; - client_b - .user_store - .condition(&cx_b, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] - }) - .await; - client_c - .user_store - .condition(&cx_c, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] - }) - .await; - - let _project_b = Project::remote( - project_id, - client_b.clone(), - client_b.user_store.clone(), - lang_registry.clone(), - fs.clone(), - &mut cx_b.to_async(), - ) - .await - .unwrap(); - - client_a - .user_store - .condition(&cx_a, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] - }) - .await; - client_b - .user_store - .condition(&cx_b, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] - }) - .await; - client_c - .user_store - .condition(&cx_c, |user_store, _| { - contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] - }) - .await; - - project_a - .condition(&cx_a, |project, _| { - project.collaborators().contains_key(&client_b.peer_id) - }) - .await; - - cx_a.update(move |_| drop(project_a)); - client_a - .user_store - .condition(&cx_a, |user_store, _| contacts(user_store) == vec![]) - .await; - client_b - .user_store - .condition(&cx_b, |user_store, _| contacts(user_store) == vec![]) - .await; - client_c - .user_store - .condition(&cx_c, |user_store, _| contacts(user_store) == vec![]) - .await; - - fn contacts(user_store: &UserStore) -> Vec<(&str, Vec<(&str, bool, Vec<&str>)>)> { - user_store - .contacts() - .iter() - .map(|contact| { - let worktrees = contact - .projects - .iter() - .map(|p| { - ( - p.worktree_root_names[0].as_str(), - p.is_shared, - p.guests.iter().map(|p| p.github_login.as_str()).collect(), - ) - }) - .collect(); - (contact.user.github_login.as_str(), worktrees) - }) - .collect() - } - } - - #[gpui::test(iterations = 10)] - async fn test_following(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let fs = FakeFs::new(cx_a.background()); - - // 2 clients connect to a server. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let mut client_a = server.create_client(cx_a, "user_a").await; - let mut client_b = server.create_client(cx_b, "user_b").await; - cx_a.update(editor::init); - cx_b.update(editor::init); - - // Client A shares a project. - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "1.txt": "one", - "2.txt": "two", - "3.txt": "three", - }), - ) - .await; - let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; - project_a - .update(cx_a, |project, cx| project.share(cx)) - .await - .unwrap(); - - // Client B joins the project. - let project_b = client_b - .build_remote_project( - project_a - .read_with(cx_a, |project, _| project.remote_id()) - .unwrap(), - cx_b, - ) - .await; - - // Client A opens some editors. - let workspace_a = client_a.build_workspace(&project_a, cx_a); - let pane_a = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); - let editor_a1 = workspace_a - .update(cx_a, |workspace, cx| { - workspace.open_path((worktree_id, "1.txt"), cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - let editor_a2 = workspace_a - .update(cx_a, |workspace, cx| { - workspace.open_path((worktree_id, "2.txt"), cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - - // Client B opens an editor. - let workspace_b = client_b.build_workspace(&project_b, cx_b); - let editor_b1 = workspace_b - .update(cx_b, |workspace, cx| { - workspace.open_path((worktree_id, "1.txt"), cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - - let client_a_id = project_b.read_with(cx_b, |project, _| { - project.collaborators().values().next().unwrap().peer_id - }); - let client_b_id = project_a.read_with(cx_a, |project, _| { - project.collaborators().values().next().unwrap().peer_id - }); - - // When client B starts following client A, all visible view states are replicated to client B. - editor_a1.update(cx_a, |editor, cx| editor.select_ranges([0..1], None, cx)); - editor_a2.update(cx_a, |editor, cx| editor.select_ranges([2..3], None, cx)); - workspace_b - .update(cx_b, |workspace, cx| { - workspace - .toggle_follow(&ToggleFollow(client_a_id), cx) - .unwrap() - }) - .await - .unwrap(); - let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| { - workspace - .active_item(cx) - .unwrap() - .downcast::() - .unwrap() - }); - assert!(cx_b.read(|cx| editor_b2.is_focused(cx))); - assert_eq!( - editor_b2.read_with(cx_b, |editor, cx| editor.project_path(cx)), - Some((worktree_id, "2.txt").into()) - ); - assert_eq!( - editor_b2.read_with(cx_b, |editor, cx| editor.selected_ranges(cx)), - vec![2..3] - ); - assert_eq!( - editor_b1.read_with(cx_b, |editor, cx| editor.selected_ranges(cx)), - vec![0..1] - ); - - // When client A activates a different editor, client B does so as well. - workspace_a.update(cx_a, |workspace, cx| { - workspace.activate_item(&editor_a1, cx) - }); - workspace_b - .condition(cx_b, |workspace, cx| { - workspace.active_item(cx).unwrap().id() == editor_b1.id() - }) - .await; - - // When client A navigates back and forth, client B does so as well. - workspace_a - .update(cx_a, |workspace, cx| { - workspace::Pane::go_back(workspace, None, cx) - }) - .await; - workspace_b - .condition(cx_b, |workspace, cx| { - workspace.active_item(cx).unwrap().id() == editor_b2.id() - }) - .await; - - workspace_a - .update(cx_a, |workspace, cx| { - workspace::Pane::go_forward(workspace, None, cx) - }) - .await; - workspace_b - .condition(cx_b, |workspace, cx| { - workspace.active_item(cx).unwrap().id() == editor_b1.id() - }) - .await; - - // Changes to client A's editor are reflected on client B. - editor_a1.update(cx_a, |editor, cx| { - editor.select_ranges([1..1, 2..2], None, cx); - }); - editor_b1 - .condition(cx_b, |editor, cx| { - editor.selected_ranges(cx) == vec![1..1, 2..2] - }) - .await; - - editor_a1.update(cx_a, |editor, cx| editor.set_text("TWO", cx)); - editor_b1 - .condition(cx_b, |editor, cx| editor.text(cx) == "TWO") - .await; - - editor_a1.update(cx_a, |editor, cx| { - editor.select_ranges([3..3], None, cx); - editor.set_scroll_position(vec2f(0., 100.), cx); - }); - editor_b1 - .condition(cx_b, |editor, cx| editor.selected_ranges(cx) == vec![3..3]) - .await; - - // After unfollowing, client B stops receiving updates from client A. - workspace_b.update(cx_b, |workspace, cx| { - workspace.unfollow(&workspace.active_pane().clone(), cx) - }); - workspace_a.update(cx_a, |workspace, cx| { - workspace.activate_item(&editor_a2, cx) - }); - cx_a.foreground().run_until_parked(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, cx| workspace - .active_item(cx) - .unwrap() - .id()), - editor_b1.id() - ); - - // Client A starts following client B. - workspace_a - .update(cx_a, |workspace, cx| { - workspace - .toggle_follow(&ToggleFollow(client_b_id), cx) - .unwrap() - }) - .await - .unwrap(); - assert_eq!( - workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), - Some(client_b_id) - ); - assert_eq!( - workspace_a.read_with(cx_a, |workspace, cx| workspace - .active_item(cx) - .unwrap() - .id()), - editor_a1.id() - ); - - // Following interrupts when client B disconnects. - client_b.disconnect(&cx_b.to_async()).unwrap(); - cx_a.foreground().run_until_parked(); - assert_eq!( - workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), - None - ); - } - - #[gpui::test(iterations = 10)] - async fn test_peers_following_each_other(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let fs = FakeFs::new(cx_a.background()); - - // 2 clients connect to a server. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let mut client_a = server.create_client(cx_a, "user_a").await; - let mut client_b = server.create_client(cx_b, "user_b").await; - cx_a.update(editor::init); - cx_b.update(editor::init); - - // Client A shares a project. - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "1.txt": "one", - "2.txt": "two", - "3.txt": "three", - "4.txt": "four", - }), - ) - .await; - let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; - project_a - .update(cx_a, |project, cx| project.share(cx)) - .await - .unwrap(); - - // Client B joins the project. - let project_b = client_b - .build_remote_project( - project_a - .read_with(cx_a, |project, _| project.remote_id()) - .unwrap(), - cx_b, - ) - .await; - - // Client A opens some editors. - let workspace_a = client_a.build_workspace(&project_a, cx_a); - let pane_a1 = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); - let _editor_a1 = workspace_a - .update(cx_a, |workspace, cx| { - workspace.open_path((worktree_id, "1.txt"), cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - - // Client B opens an editor. - let workspace_b = client_b.build_workspace(&project_b, cx_b); - let pane_b1 = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); - let _editor_b1 = workspace_b - .update(cx_b, |workspace, cx| { - workspace.open_path((worktree_id, "2.txt"), cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - - // Clients A and B follow each other in split panes - workspace_a - .update(cx_a, |workspace, cx| { - workspace.split_pane(workspace.active_pane().clone(), SplitDirection::Right, cx); - assert_ne!(*workspace.active_pane(), pane_a1); - let leader_id = *project_a.read(cx).collaborators().keys().next().unwrap(); - workspace - .toggle_follow(&workspace::ToggleFollow(leader_id), cx) - .unwrap() - }) - .await - .unwrap(); - workspace_b - .update(cx_b, |workspace, cx| { - workspace.split_pane(workspace.active_pane().clone(), SplitDirection::Right, cx); - assert_ne!(*workspace.active_pane(), pane_b1); - let leader_id = *project_b.read(cx).collaborators().keys().next().unwrap(); - workspace - .toggle_follow(&workspace::ToggleFollow(leader_id), cx) - .unwrap() - }) - .await - .unwrap(); - - workspace_a - .update(cx_a, |workspace, cx| { - workspace.activate_next_pane(cx); - assert_eq!(*workspace.active_pane(), pane_a1); - workspace.open_path((worktree_id, "3.txt"), cx) - }) - .await - .unwrap(); - workspace_b - .update(cx_b, |workspace, cx| { - workspace.activate_next_pane(cx); - assert_eq!(*workspace.active_pane(), pane_b1); - workspace.open_path((worktree_id, "4.txt"), cx) - }) - .await - .unwrap(); - cx_a.foreground().run_until_parked(); - - // Ensure leader updates don't change the active pane of followers - workspace_a.read_with(cx_a, |workspace, _| { - assert_eq!(*workspace.active_pane(), pane_a1); - }); - workspace_b.read_with(cx_b, |workspace, _| { - assert_eq!(*workspace.active_pane(), pane_b1); - }); - - // Ensure peers following each other doesn't cause an infinite loop. - assert_eq!( - workspace_a.read_with(cx_a, |workspace, cx| workspace - .active_item(cx) - .unwrap() - .project_path(cx)), - Some((worktree_id, "3.txt").into()) - ); - workspace_a.update(cx_a, |workspace, cx| { - assert_eq!( - workspace.active_item(cx).unwrap().project_path(cx), - Some((worktree_id, "3.txt").into()) - ); - workspace.activate_next_pane(cx); - assert_eq!( - workspace.active_item(cx).unwrap().project_path(cx), - Some((worktree_id, "4.txt").into()) - ); - }); - workspace_b.update(cx_b, |workspace, cx| { - assert_eq!( - workspace.active_item(cx).unwrap().project_path(cx), - Some((worktree_id, "4.txt").into()) - ); - workspace.activate_next_pane(cx); - assert_eq!( - workspace.active_item(cx).unwrap().project_path(cx), - Some((worktree_id, "3.txt").into()) - ); - }); - } - - #[gpui::test(iterations = 10)] - async fn test_auto_unfollowing(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { - cx_a.foreground().forbid_parking(); - let fs = FakeFs::new(cx_a.background()); - - // 2 clients connect to a server. - let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; - let mut client_a = server.create_client(cx_a, "user_a").await; - let mut client_b = server.create_client(cx_b, "user_b").await; - cx_a.update(editor::init); - cx_b.update(editor::init); - - // Client A shares a project. - fs.insert_tree( - "/a", - json!({ - ".zed.toml": r#"collaborators = ["user_b"]"#, - "1.txt": "one", - "2.txt": "two", - "3.txt": "three", - }), - ) - .await; - let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; - project_a - .update(cx_a, |project, cx| project.share(cx)) - .await - .unwrap(); - - // Client B joins the project. - let project_b = client_b - .build_remote_project( - project_a - .read_with(cx_a, |project, _| project.remote_id()) - .unwrap(), - cx_b, - ) - .await; - - // Client A opens some editors. - let workspace_a = client_a.build_workspace(&project_a, cx_a); - let _editor_a1 = workspace_a - .update(cx_a, |workspace, cx| { - workspace.open_path((worktree_id, "1.txt"), cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - - // Client B starts following client A. - let workspace_b = client_b.build_workspace(&project_b, cx_b); - let pane_b = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); - let leader_id = project_b.read_with(cx_b, |project, _| { - project.collaborators().values().next().unwrap().peer_id - }); - workspace_b - .update(cx_b, |workspace, cx| { - workspace - .toggle_follow(&ToggleFollow(leader_id), cx) - .unwrap() - }) - .await - .unwrap(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - Some(leader_id) - ); - let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| { - workspace - .active_item(cx) - .unwrap() - .downcast::() - .unwrap() - }); - - // When client B moves, it automatically stops following client A. - editor_b2.update(cx_b, |editor, cx| editor.move_right(&editor::MoveRight, cx)); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - None - ); - - workspace_b - .update(cx_b, |workspace, cx| { - workspace - .toggle_follow(&ToggleFollow(leader_id), cx) - .unwrap() - }) - .await - .unwrap(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - Some(leader_id) - ); - - // When client B edits, it automatically stops following client A. - editor_b2.update(cx_b, |editor, cx| editor.insert("X", cx)); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - None - ); - - workspace_b - .update(cx_b, |workspace, cx| { - workspace - .toggle_follow(&ToggleFollow(leader_id), cx) - .unwrap() - }) - .await - .unwrap(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - Some(leader_id) - ); - - // When client B scrolls, it automatically stops following client A. - editor_b2.update(cx_b, |editor, cx| { - editor.set_scroll_position(vec2f(0., 3.), cx) - }); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - None - ); - - workspace_b - .update(cx_b, |workspace, cx| { - workspace - .toggle_follow(&ToggleFollow(leader_id), cx) - .unwrap() - }) - .await - .unwrap(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - Some(leader_id) - ); - - // When client B activates a different pane, it continues following client A in the original pane. - workspace_b.update(cx_b, |workspace, cx| { - workspace.split_pane(pane_b.clone(), SplitDirection::Right, cx) - }); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - Some(leader_id) - ); - - workspace_b.update(cx_b, |workspace, cx| workspace.activate_next_pane(cx)); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - Some(leader_id) - ); - - // When client B activates a different item in the original pane, it automatically stops following client A. - workspace_b - .update(cx_b, |workspace, cx| { - workspace.open_path((worktree_id, "2.txt"), cx) - }) - .await - .unwrap(); - assert_eq!( - workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), - None - ); - } - - #[gpui::test(iterations = 100)] - async fn test_random_collaboration( - cx: &mut TestAppContext, - deterministic: Arc, - rng: StdRng, - ) { - cx.foreground().forbid_parking(); - let max_peers = env::var("MAX_PEERS") - .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) - .unwrap_or(5); - assert!(max_peers <= 5); - - let max_operations = env::var("OPERATIONS") - .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) - .unwrap_or(10); - - let rng = Arc::new(Mutex::new(rng)); - - let guest_lang_registry = Arc::new(LanguageRegistry::test()); - let host_language_registry = Arc::new(LanguageRegistry::test()); - - let fs = FakeFs::new(cx.background()); - fs.insert_tree( - "/_collab", - json!({ - ".zed.toml": r#"collaborators = ["guest-1", "guest-2", "guest-3", "guest-4"]"# - }), - ) - .await; - - let mut server = TestServer::start(cx.foreground(), cx.background()).await; - let mut clients = Vec::new(); - let mut user_ids = Vec::new(); - let mut op_start_signals = Vec::new(); - let files = Arc::new(Mutex::new(Vec::new())); - - let mut next_entity_id = 100000; - let mut host_cx = TestAppContext::new( - cx.foreground_platform(), - cx.platform(), - deterministic.build_foreground(next_entity_id), - deterministic.build_background(), - cx.font_cache(), - cx.leak_detector(), - next_entity_id, - ); - let host = server.create_client(&mut host_cx, "host").await; - let host_project = host_cx.update(|cx| { - Project::local( - host.client.clone(), - host.user_store.clone(), - host_language_registry.clone(), - fs.clone(), - cx, - ) - }); - let host_project_id = host_project - .update(&mut host_cx, |p, _| p.next_remote_id()) - .await; - - let (collab_worktree, _) = host_project - .update(&mut host_cx, |project, cx| { - project.find_or_create_local_worktree("/_collab", true, cx) - }) - .await - .unwrap(); - collab_worktree - .read_with(&host_cx, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - host_project - .update(&mut host_cx, |project, cx| project.share(cx)) - .await - .unwrap(); - - // Set up fake language servers. - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - None, - ); - let _fake_servers = language.set_fake_lsp_adapter(FakeLspAdapter { - name: "the-fake-language-server", - capabilities: lsp::LanguageServer::full_capabilities(), - initializer: Some(Box::new({ - let rng = rng.clone(); - let files = files.clone(); - let project = host_project.downgrade(); - move |fake_server: &mut FakeLanguageServer| { - fake_server.handle_request::( - |_, _| async move { - Ok(Some(lsp::CompletionResponse::Array(vec![ - lsp::CompletionItem { - text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { - range: lsp::Range::new( - lsp::Position::new(0, 0), - lsp::Position::new(0, 0), - ), - new_text: "the-new-text".to_string(), - })), - ..Default::default() - }, - ]))) - }, - ); - - fake_server.handle_request::( - |_, _| async move { - Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( - lsp::CodeAction { - title: "the-code-action".to_string(), - ..Default::default() - }, - )])) - }, - ); - - fake_server.handle_request::( - |params, _| async move { - Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( - params.position, - params.position, - )))) - }, - ); - - fake_server.handle_request::({ - let files = files.clone(); - let rng = rng.clone(); - move |_, _| { - let files = files.clone(); - let rng = rng.clone(); - async move { - let files = files.lock(); - let mut rng = rng.lock(); - let count = rng.gen_range::(1..3); - let files = (0..count) - .map(|_| files.choose(&mut *rng).unwrap()) - .collect::>(); - log::info!("LSP: Returning definitions in files {:?}", &files); - Ok(Some(lsp::GotoDefinitionResponse::Array( - files - .into_iter() - .map(|file| lsp::Location { - uri: lsp::Url::from_file_path(file).unwrap(), - range: Default::default(), - }) - .collect(), - ))) - } - } - }); - - fake_server.handle_request::({ - let rng = rng.clone(); - let project = project.clone(); - move |params, mut cx| { - let highlights = if let Some(project) = project.upgrade(&cx) { - project.update(&mut cx, |project, cx| { - let path = params - .text_document_position_params - .text_document - .uri - .to_file_path() - .unwrap(); - let (worktree, relative_path) = - project.find_local_worktree(&path, cx)?; - let project_path = - ProjectPath::from((worktree.read(cx).id(), relative_path)); - let buffer = - project.get_open_buffer(&project_path, cx)?.read(cx); - - let mut highlights = Vec::new(); - let highlight_count = rng.lock().gen_range(1..=5); - let mut prev_end = 0; - for _ in 0..highlight_count { - let range = - buffer.random_byte_range(prev_end, &mut *rng.lock()); - - highlights.push(lsp::DocumentHighlight { - range: range_to_lsp(range.to_point_utf16(buffer)), - kind: Some(lsp::DocumentHighlightKind::READ), - }); - prev_end = range.end; - } - Some(highlights) - }) - } else { - None - }; - async move { Ok(highlights) } - } - }); - } - })), - ..Default::default() - }); - host_language_registry.add(Arc::new(language)); - - let op_start_signal = futures::channel::mpsc::unbounded(); - user_ids.push(host.current_user_id(&host_cx)); - op_start_signals.push(op_start_signal.0); - clients.push(host_cx.foreground().spawn(host.simulate_host( - host_project, - files, - op_start_signal.1, - rng.clone(), - host_cx, - ))); - - let disconnect_host_at = if rng.lock().gen_bool(0.2) { - rng.lock().gen_range(0..max_operations) - } else { - max_operations - }; - let mut available_guests = vec![ - "guest-1".to_string(), - "guest-2".to_string(), - "guest-3".to_string(), - "guest-4".to_string(), - ]; - let mut operations = 0; - while operations < max_operations { - if operations == disconnect_host_at { - server.disconnect_client(user_ids[0]); - cx.foreground().advance_clock(RECEIVE_TIMEOUT); - drop(op_start_signals); - let mut clients = futures::future::join_all(clients).await; - cx.foreground().run_until_parked(); - - let (host, mut host_cx, host_err) = clients.remove(0); - if let Some(host_err) = host_err { - log::error!("host error - {}", host_err); - } - host.project - .as_ref() - .unwrap() - .read_with(&host_cx, |project, _| assert!(!project.is_shared())); - for (guest, mut guest_cx, guest_err) in clients { - if let Some(guest_err) = guest_err { - log::error!("{} error - {}", guest.username, guest_err); - } - let contacts = server - .store - .read() - .await - .contacts_for_user(guest.current_user_id(&guest_cx)); - assert!(!contacts - .iter() - .flat_map(|contact| &contact.projects) - .any(|project| project.id == host_project_id)); - guest - .project - .as_ref() - .unwrap() - .read_with(&guest_cx, |project, _| assert!(project.is_read_only())); - guest_cx.update(|_| drop(guest)); - } - host_cx.update(|_| drop(host)); - - return; - } - - let distribution = rng.lock().gen_range(0..100); - match distribution { - 0..=19 if !available_guests.is_empty() => { - let guest_ix = rng.lock().gen_range(0..available_guests.len()); - let guest_username = available_guests.remove(guest_ix); - log::info!("Adding new connection for {}", guest_username); - next_entity_id += 100000; - let mut guest_cx = TestAppContext::new( - cx.foreground_platform(), - cx.platform(), - deterministic.build_foreground(next_entity_id), - deterministic.build_background(), - cx.font_cache(), - cx.leak_detector(), - next_entity_id, - ); - let guest = server.create_client(&mut guest_cx, &guest_username).await; - let guest_project = Project::remote( - host_project_id, - guest.client.clone(), - guest.user_store.clone(), - guest_lang_registry.clone(), - FakeFs::new(cx.background()), - &mut guest_cx.to_async(), - ) - .await - .unwrap(); - let op_start_signal = futures::channel::mpsc::unbounded(); - user_ids.push(guest.current_user_id(&guest_cx)); - op_start_signals.push(op_start_signal.0); - clients.push(guest_cx.foreground().spawn(guest.simulate_guest( - guest_username.clone(), - guest_project, - op_start_signal.1, - rng.clone(), - guest_cx, - ))); - - log::info!("Added connection for {}", guest_username); - operations += 1; - } - 20..=29 if clients.len() > 1 => { - log::info!("Removing guest"); - let guest_ix = rng.lock().gen_range(1..clients.len()); - let removed_guest_id = user_ids.remove(guest_ix); - let guest = clients.remove(guest_ix); - op_start_signals.remove(guest_ix); - server.disconnect_client(removed_guest_id); - cx.foreground().advance_clock(RECEIVE_TIMEOUT); - let (guest, mut guest_cx, guest_err) = guest.await; - if let Some(guest_err) = guest_err { - log::error!("{} error - {}", guest.username, guest_err); - } - guest - .project - .as_ref() - .unwrap() - .read_with(&guest_cx, |project, _| assert!(project.is_read_only())); - for user_id in &user_ids { - for contact in server.store.read().await.contacts_for_user(*user_id) { - assert_ne!( - contact.user_id, removed_guest_id.0 as u64, - "removed guest is still a contact of another peer" - ); - for project in contact.projects { - for project_guest_id in project.guests { - assert_ne!( - project_guest_id, removed_guest_id.0 as u64, - "removed guest appears as still participating on a project" - ); - } - } - } - } - - log::info!("{} removed", guest.username); - available_guests.push(guest.username.clone()); - guest_cx.update(|_| drop(guest)); - - operations += 1; - } - _ => { - while operations < max_operations && rng.lock().gen_bool(0.7) { - op_start_signals - .choose(&mut *rng.lock()) - .unwrap() - .unbounded_send(()) - .unwrap(); - operations += 1; - } - - if rng.lock().gen_bool(0.8) { - cx.foreground().run_until_parked(); - } - } - } - } - - drop(op_start_signals); - let mut clients = futures::future::join_all(clients).await; - cx.foreground().run_until_parked(); - - let (host_client, mut host_cx, host_err) = clients.remove(0); - if let Some(host_err) = host_err { - panic!("host error - {}", host_err); - } - let host_project = host_client.project.as_ref().unwrap(); - let host_worktree_snapshots = host_project.read_with(&host_cx, |project, cx| { - project - .worktrees(cx) - .map(|worktree| { - let snapshot = worktree.read(cx).snapshot(); - (snapshot.id(), snapshot) - }) - .collect::>() - }); - - host_client - .project - .as_ref() - .unwrap() - .read_with(&host_cx, |project, cx| project.check_invariants(cx)); - - for (guest_client, mut guest_cx, guest_err) in clients.into_iter() { - if let Some(guest_err) = guest_err { - panic!("{} error - {}", guest_client.username, guest_err); - } - let worktree_snapshots = - guest_client - .project - .as_ref() - .unwrap() - .read_with(&guest_cx, |project, cx| { - project - .worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - (worktree.id(), worktree.snapshot()) - }) - .collect::>() - }); - - assert_eq!( - worktree_snapshots.keys().collect::>(), - host_worktree_snapshots.keys().collect::>(), - "{} has different worktrees than the host", - guest_client.username - ); - for (id, host_snapshot) in &host_worktree_snapshots { - let guest_snapshot = &worktree_snapshots[id]; - assert_eq!( - guest_snapshot.root_name(), - host_snapshot.root_name(), - "{} has different root name than the host for worktree {}", - guest_client.username, - id - ); - assert_eq!( - guest_snapshot.entries(false).collect::>(), - host_snapshot.entries(false).collect::>(), - "{} has different snapshot than the host for worktree {}", - guest_client.username, - id - ); - } - - guest_client - .project - .as_ref() - .unwrap() - .read_with(&guest_cx, |project, cx| project.check_invariants(cx)); - - for guest_buffer in &guest_client.buffers { - let buffer_id = guest_buffer.read_with(&guest_cx, |buffer, _| buffer.remote_id()); - let host_buffer = host_project.read_with(&host_cx, |project, cx| { - project.buffer_for_id(buffer_id, cx).expect(&format!( - "host does not have buffer for guest:{}, peer:{}, id:{}", - guest_client.username, guest_client.peer_id, buffer_id - )) - }); - let path = host_buffer - .read_with(&host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); - - assert_eq!( - guest_buffer.read_with(&guest_cx, |buffer, _| buffer.deferred_ops_len()), - 0, - "{}, buffer {}, path {:?} has deferred operations", - guest_client.username, - buffer_id, - path, - ); - assert_eq!( - guest_buffer.read_with(&guest_cx, |buffer, _| buffer.text()), - host_buffer.read_with(&host_cx, |buffer, _| buffer.text()), - "{}, buffer {}, path {:?}, differs from the host's buffer", - guest_client.username, - buffer_id, - path - ); - } - - guest_cx.update(|_| drop(guest_client)); - } - - host_cx.update(|_| drop(host_client)); - } - - struct TestServer { - peer: Arc, - app_state: Arc, - server: Arc, - foreground: Rc, - notifications: mpsc::UnboundedReceiver<()>, - connection_killers: Arc>>>, - forbid_connections: Arc, - _test_db: TestDb, - } - - impl TestServer { - async fn start( - foreground: Rc, - background: Arc, - ) -> Self { - let test_db = TestDb::fake(background); - let app_state = Self::build_app_state(&test_db).await; - let peer = Peer::new(); - let notifications = mpsc::unbounded(); - let server = Server::new(app_state.clone(), peer.clone(), Some(notifications.0)); - Self { - peer, - app_state, - server, - foreground, - notifications: notifications.1, - connection_killers: Default::default(), - forbid_connections: Default::default(), - _test_db: test_db, - } - } - - async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient { - cx.update(|cx| { - let settings = Settings::test(cx); - cx.set_global(settings); - }); - - let http = FakeHttpClient::with_404_response(); - let user_id = self.app_state.db.create_user(name, false).await.unwrap(); - let client_name = name.to_string(); - let mut client = Client::new(http.clone()); - let server = self.server.clone(); - let connection_killers = self.connection_killers.clone(); - let forbid_connections = self.forbid_connections.clone(); - let (connection_id_tx, mut connection_id_rx) = mpsc::channel(16); - - Arc::get_mut(&mut client) - .unwrap() - .override_authenticate(move |cx| { - cx.spawn(|_| async move { - let access_token = "the-token".to_string(); - Ok(Credentials { - user_id: user_id.0 as u64, - access_token, - }) - }) - }) - .override_establish_connection(move |credentials, cx| { - assert_eq!(credentials.user_id, user_id.0 as u64); - assert_eq!(credentials.access_token, "the-token"); - - let server = server.clone(); - let connection_killers = connection_killers.clone(); - let forbid_connections = forbid_connections.clone(); - let client_name = client_name.clone(); - let connection_id_tx = connection_id_tx.clone(); - cx.spawn(move |cx| async move { - if forbid_connections.load(SeqCst) { - Err(EstablishConnectionError::other(anyhow!( - "server is forbidding connections" - ))) - } else { - let (client_conn, server_conn, killed) = - Connection::in_memory(cx.background()); - connection_killers.lock().insert(user_id, killed); - cx.background() - .spawn(server.handle_connection( - server_conn, - client_name, - user_id, - Some(connection_id_tx), - cx.background(), - )) - .detach(); - Ok(client_conn) - } - }) - }); - - client - .authenticate_and_connect(false, &cx.to_async()) - .await - .unwrap(); - - Channel::init(&client); - Project::init(&client); - cx.update(|cx| { - workspace::init(&client, cx); - }); - - let peer_id = PeerId(connection_id_rx.next().await.unwrap().0); - let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx)); - - let client = TestClient { - client, - peer_id, - username: name.to_string(), - user_store, - language_registry: Arc::new(LanguageRegistry::test()), - project: Default::default(), - buffers: Default::default(), - }; - client.wait_for_current_user(cx).await; - client - } - - fn disconnect_client(&self, user_id: UserId) { - self.connection_killers - .lock() - .remove(&user_id) - .unwrap() - .store(true, SeqCst); - } - - fn forbid_connections(&self) { - self.forbid_connections.store(true, SeqCst); - } - - fn allow_connections(&self) { - self.forbid_connections.store(false, SeqCst); - } - - async fn build_app_state(test_db: &TestDb) -> Arc { - let mut config = Config::default(); - config.database_url = test_db.url.clone(); - Arc::new(AppState { - db: test_db.db().clone(), - config, - }) - } - - async fn state<'a>(&'a self) -> RwLockReadGuard<'a, Store> { - self.server.store.read().await - } - - async fn condition(&mut self, mut predicate: F) - where - F: FnMut(&Store) -> bool, - { - async_std::future::timeout(Duration::from_millis(500), async { - while !(predicate)(&*self.server.store.read().await) { - self.foreground.start_waiting(); - self.notifications.next().await; - self.foreground.finish_waiting(); - } - }) - .await - .expect("condition timed out"); - } - } - - impl Deref for TestServer { - type Target = Server; - - fn deref(&self) -> &Self::Target { - &self.server - } - } - - impl Drop for TestServer { - fn drop(&mut self) { - self.peer.reset(); - } - } - - struct TestClient { - client: Arc, - username: String, - pub peer_id: PeerId, - pub user_store: ModelHandle, - language_registry: Arc, - project: Option>, - buffers: HashSet>, - } - - impl Deref for TestClient { - type Target = Arc; - - fn deref(&self) -> &Self::Target { - &self.client - } - } - - impl TestClient { - pub fn current_user_id(&self, cx: &TestAppContext) -> UserId { - UserId::from_proto( - self.user_store - .read_with(cx, |user_store, _| user_store.current_user().unwrap().id), - ) - } - - async fn wait_for_current_user(&self, cx: &TestAppContext) { - let mut authed_user = self - .user_store - .read_with(cx, |user_store, _| user_store.watch_current_user()); - while authed_user.next().await.unwrap().is_none() {} - } - - async fn build_local_project( - &mut self, - fs: Arc, - root_path: impl AsRef, - cx: &mut TestAppContext, - ) -> (ModelHandle, WorktreeId) { - let project = cx.update(|cx| { - Project::local( - self.client.clone(), - self.user_store.clone(), - self.language_registry.clone(), - fs, - cx, - ) - }); - self.project = Some(project.clone()); - let (worktree, _) = project - .update(cx, |p, cx| { - p.find_or_create_local_worktree(root_path, true, cx) - }) - .await - .unwrap(); - worktree - .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - project - .update(cx, |project, _| project.next_remote_id()) - .await; - (project, worktree.read_with(cx, |tree, _| tree.id())) - } - - async fn build_remote_project( - &mut self, - project_id: u64, - cx: &mut TestAppContext, - ) -> ModelHandle { - let project = Project::remote( - project_id, - self.client.clone(), - self.user_store.clone(), - self.language_registry.clone(), - FakeFs::new(cx.background()), - &mut cx.to_async(), - ) - .await - .unwrap(); - self.project = Some(project.clone()); - project - } - - fn build_workspace( - &self, - project: &ModelHandle, - cx: &mut TestAppContext, - ) -> ViewHandle { - let (window_id, _) = cx.add_window(|_| EmptyView); - cx.add_view(window_id, |cx| { - let fs = project.read(cx).fs().clone(); - Workspace::new( - &WorkspaceParams { - fs, - project: project.clone(), - user_store: self.user_store.clone(), - languages: self.language_registry.clone(), - themes: ThemeRegistry::new((), cx.font_cache().clone()), - channel_list: cx.add_model(|cx| { - ChannelList::new(self.user_store.clone(), self.client.clone(), cx) - }), - client: self.client.clone(), - }, - cx, - ) - }) - } - - async fn simulate_host( - mut self, - project: ModelHandle, - files: Arc>>, - op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, - rng: Arc>, - mut cx: TestAppContext, - ) -> (Self, TestAppContext, Option) { - async fn simulate_host_internal( - client: &mut TestClient, - project: ModelHandle, - files: Arc>>, - mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, - rng: Arc>, - cx: &mut TestAppContext, - ) -> anyhow::Result<()> { - let fs = project.read_with(cx, |project, _| project.fs().clone()); - - while op_start_signal.next().await.is_some() { - let distribution = rng.lock().gen_range::(0..100); - match distribution { - 0..=20 if !files.lock().is_empty() => { - let path = files.lock().choose(&mut *rng.lock()).unwrap().clone(); - let mut path = path.as_path(); - while let Some(parent_path) = path.parent() { - path = parent_path; - if rng.lock().gen() { - break; - } - } - - log::info!("Host: find/create local worktree {:?}", path); - let find_or_create_worktree = project.update(cx, |project, cx| { - project.find_or_create_local_worktree(path, true, cx) - }); - if rng.lock().gen() { - cx.background().spawn(find_or_create_worktree).detach(); - } else { - find_or_create_worktree.await?; - } - } - 10..=80 if !files.lock().is_empty() => { - let buffer = if client.buffers.is_empty() || rng.lock().gen() { - let file = files.lock().choose(&mut *rng.lock()).unwrap().clone(); - let (worktree, path) = project - .update(cx, |project, cx| { - project.find_or_create_local_worktree( - file.clone(), - true, - cx, - ) - }) - .await?; - let project_path = - worktree.read_with(cx, |worktree, _| (worktree.id(), path)); - log::info!( - "Host: opening path {:?}, worktree {}, relative_path {:?}", - file, - project_path.0, - project_path.1 - ); - let buffer = project - .update(cx, |project, cx| project.open_buffer(project_path, cx)) - .await - .unwrap(); - client.buffers.insert(buffer.clone()); - buffer - } else { - client - .buffers - .iter() - .choose(&mut *rng.lock()) - .unwrap() - .clone() - }; - - if rng.lock().gen_bool(0.1) { - cx.update(|cx| { - log::info!( - "Host: dropping buffer {:?}", - buffer.read(cx).file().unwrap().full_path(cx) - ); - client.buffers.remove(&buffer); - drop(buffer); - }); - } else { - buffer.update(cx, |buffer, cx| { - log::info!( - "Host: updating buffer {:?} ({})", - buffer.file().unwrap().full_path(cx), - buffer.remote_id() - ); - - if rng.lock().gen_bool(0.7) { - buffer.randomly_edit(&mut *rng.lock(), 5, cx); - } else { - buffer.randomly_undo_redo(&mut *rng.lock(), cx); - } - }); - } - } - _ => loop { - let path_component_count = rng.lock().gen_range::(1..=5); - let mut path = PathBuf::new(); - path.push("/"); - for _ in 0..path_component_count { - let letter = rng.lock().gen_range(b'a'..=b'z'); - path.push(std::str::from_utf8(&[letter]).unwrap()); - } - path.set_extension("rs"); - let parent_path = path.parent().unwrap(); - - log::info!("Host: creating file {:?}", path,); - - if fs.create_dir(&parent_path).await.is_ok() - && fs.create_file(&path, Default::default()).await.is_ok() - { - files.lock().push(path); - break; - } else { - log::info!("Host: cannot create file"); - } - }, - } - - cx.background().simulate_random_delay().await; - } - - Ok(()) - } - - let result = simulate_host_internal( - &mut self, - project.clone(), - files, - op_start_signal, - rng, - &mut cx, - ) - .await; - log::info!("Host done"); - self.project = Some(project); - (self, cx, result.err()) - } - - pub async fn simulate_guest( - mut self, - guest_username: String, - project: ModelHandle, - op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, - rng: Arc>, - mut cx: TestAppContext, - ) -> (Self, TestAppContext, Option) { - async fn simulate_guest_internal( - client: &mut TestClient, - guest_username: &str, - project: ModelHandle, - mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, - rng: Arc>, - cx: &mut TestAppContext, - ) -> anyhow::Result<()> { - while op_start_signal.next().await.is_some() { - let buffer = if client.buffers.is_empty() || rng.lock().gen() { - let worktree = if let Some(worktree) = - project.read_with(cx, |project, cx| { - project - .worktrees(&cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() - && worktree.entries(false).any(|e| e.is_file()) - }) - .choose(&mut *rng.lock()) - }) { - worktree - } else { - cx.background().simulate_random_delay().await; - continue; - }; - - let (worktree_root_name, project_path) = - worktree.read_with(cx, |worktree, _| { - let entry = worktree - .entries(false) - .filter(|e| e.is_file()) - .choose(&mut *rng.lock()) - .unwrap(); - ( - worktree.root_name().to_string(), - (worktree.id(), entry.path.clone()), - ) - }); - log::info!( - "{}: opening path {:?} in worktree {} ({})", - guest_username, - project_path.1, - project_path.0, - worktree_root_name, - ); - let buffer = project - .update(cx, |project, cx| { - project.open_buffer(project_path.clone(), cx) - }) - .await?; - log::info!( - "{}: opened path {:?} in worktree {} ({}) with buffer id {}", - guest_username, - project_path.1, - project_path.0, - worktree_root_name, - buffer.read_with(cx, |buffer, _| buffer.remote_id()) - ); - client.buffers.insert(buffer.clone()); - buffer - } else { - client - .buffers - .iter() - .choose(&mut *rng.lock()) - .unwrap() - .clone() - }; - - let choice = rng.lock().gen_range(0..100); - match choice { - 0..=9 => { - cx.update(|cx| { - log::info!( - "{}: dropping buffer {:?}", - guest_username, - buffer.read(cx).file().unwrap().full_path(cx) - ); - client.buffers.remove(&buffer); - drop(buffer); - }); - } - 10..=19 => { - let completions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting completions for buffer {} ({:?})", - guest_username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); - project.completions(&buffer, offset, cx) - }); - let completions = cx.background().spawn(async move { - completions - .await - .map_err(|err| anyhow!("completions request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching completions request", guest_username); - cx.update(|cx| completions.detach_and_log_err(cx)); - } else { - completions.await?; - } - } - 20..=29 => { - let code_actions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting code actions for buffer {} ({:?})", - guest_username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock()); - project.code_actions(&buffer, range, cx) - }); - let code_actions = cx.background().spawn(async move { - code_actions.await.map_err(|err| { - anyhow!("code actions request failed: {:?}", err) - }) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching code actions request", guest_username); - cx.update(|cx| code_actions.detach_and_log_err(cx)); - } else { - code_actions.await?; - } - } - 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => { - let (requested_version, save) = buffer.update(cx, |buffer, cx| { - log::info!( - "{}: saving buffer {} ({:?})", - guest_username, - buffer.remote_id(), - buffer.file().unwrap().full_path(cx) - ); - (buffer.version(), buffer.save(cx)) - }); - let save = cx.background().spawn(async move { - let (saved_version, _) = save - .await - .map_err(|err| anyhow!("save request failed: {:?}", err))?; - assert!(saved_version.observed_all(&requested_version)); - Ok::<_, anyhow::Error>(()) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching save request", guest_username); - cx.update(|cx| save.detach_and_log_err(cx)); - } else { - save.await?; - } - } - 40..=44 => { - let prepare_rename = project.update(cx, |project, cx| { - log::info!( - "{}: preparing rename for buffer {} ({:?})", - guest_username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); - project.prepare_rename(buffer, offset, cx) - }); - let prepare_rename = cx.background().spawn(async move { - prepare_rename.await.map_err(|err| { - anyhow!("prepare rename request failed: {:?}", err) - }) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching prepare rename request", guest_username); - cx.update(|cx| prepare_rename.detach_and_log_err(cx)); - } else { - prepare_rename.await?; - } - } - 45..=49 => { - let definitions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting definitions for buffer {} ({:?})", - guest_username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); - project.definition(&buffer, offset, cx) - }); - let definitions = cx.background().spawn(async move { - definitions - .await - .map_err(|err| anyhow!("definitions request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching definitions request", guest_username); - cx.update(|cx| definitions.detach_and_log_err(cx)); - } else { - client - .buffers - .extend(definitions.await?.into_iter().map(|loc| loc.buffer)); - } - } - 50..=54 => { - let highlights = project.update(cx, |project, cx| { - log::info!( - "{}: requesting highlights for buffer {} ({:?})", - guest_username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); - project.document_highlights(&buffer, offset, cx) - }); - let highlights = cx.background().spawn(async move { - highlights - .await - .map_err(|err| anyhow!("highlights request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching highlights request", guest_username); - cx.update(|cx| highlights.detach_and_log_err(cx)); - } else { - highlights.await?; - } - } - 55..=59 => { - let search = project.update(cx, |project, cx| { - let query = rng.lock().gen_range('a'..='z'); - log::info!("{}: project-wide search {:?}", guest_username, query); - project.search(SearchQuery::text(query, false, false), cx) - }); - let search = cx.background().spawn(async move { - search - .await - .map_err(|err| anyhow!("search request failed: {:?}", err)) - }); - if rng.lock().gen_bool(0.3) { - log::info!("{}: detaching search request", guest_username); - cx.update(|cx| search.detach_and_log_err(cx)); - } else { - client.buffers.extend(search.await?.into_keys()); - } - } - _ => { - buffer.update(cx, |buffer, cx| { - log::info!( - "{}: updating buffer {} ({:?})", - guest_username, - buffer.remote_id(), - buffer.file().unwrap().full_path(cx) - ); - if rng.lock().gen_bool(0.7) { - buffer.randomly_edit(&mut *rng.lock(), 5, cx); - } else { - buffer.randomly_undo_redo(&mut *rng.lock(), cx); - } - }); - } - } - cx.background().simulate_random_delay().await; - } - Ok(()) - } - - let result = simulate_guest_internal( - &mut self, - &guest_username, - project.clone(), - op_start_signal, - rng, - &mut cx, - ) - .await; - log::info!("{}: done", guest_username); - - self.project = Some(project); - (self, cx, result.err()) - } - } - - impl Drop for TestClient { - fn drop(&mut self) { - self.client.tear_down(); - } - } - - impl Executor for Arc { - type Timer = gpui::executor::Timer; - - fn spawn_detached>(&self, future: F) { - self.spawn(future).detach(); - } - - fn timer(&self, duration: Duration) -> Self::Timer { - self.as_ref().timer(duration) - } - } - - fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> { - channel - .messages() - .cursor::<()>() - .map(|m| { - ( - m.sender.github_login.clone(), - m.body.clone(), - m.is_pending(), - ) - }) - .collect() - } - - struct EmptyView; - - impl gpui::Entity for EmptyView { - type Event = (); - } - - impl gpui::View for EmptyView { - fn ui_name() -> &'static str { - "empty view" - } - - fn render(&mut self, _: &mut gpui::RenderContext) -> gpui::ElementBox { - gpui::Element::boxed(gpui::elements::Empty) - } - } -} +// mod store; + +// use super::{ +// auth::process_auth_header, +// db::{ChannelId, MessageId, UserId}, +// AppState, +// }; +// use anyhow::anyhow; +// use async_std::{ +// sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, +// task, +// }; +// use async_tungstenite::{tungstenite::protocol::Role, WebSocketStream}; +// use collections::{HashMap, HashSet}; +// use futures::{channel::mpsc, future::BoxFuture, FutureExt, SinkExt, StreamExt}; +// use log::{as_debug, as_display}; +// use rpc::{ +// proto::{self, AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage}, +// Connection, ConnectionId, Peer, TypedEnvelope, +// }; +// use sha1::{Digest as _, Sha1}; +// use std::{ +// any::TypeId, +// future::Future, +// marker::PhantomData, +// ops::{Deref, DerefMut}, +// rc::Rc, +// sync::Arc, +// time::{Duration, Instant}, +// }; +// use store::{Store, Worktree}; +// use time::OffsetDateTime; +// use util::ResultExt; + +// type MessageHandler = Box< +// dyn Send +// + Sync +// + Fn(Arc, Box) -> BoxFuture<'static, tide::Result<()>>, +// >; + +// pub struct Server { +// peer: Arc, +// store: RwLock, +// app_state: Arc, +// handlers: HashMap, +// notifications: Option>, +// } + +// pub trait Executor: Send + Clone { +// type Timer: Send + Future; +// fn spawn_detached>(&self, future: F); +// fn timer(&self, duration: Duration) -> Self::Timer; +// } + +// #[derive(Clone)] +// pub struct RealExecutor; + +// const MESSAGE_COUNT_PER_PAGE: usize = 100; +// const MAX_MESSAGE_LEN: usize = 1024; + +// struct StoreReadGuard<'a> { +// guard: RwLockReadGuard<'a, Store>, +// _not_send: PhantomData>, +// } + +// struct StoreWriteGuard<'a> { +// guard: RwLockWriteGuard<'a, Store>, +// _not_send: PhantomData>, +// } + +// impl Server { +// pub fn new( +// app_state: Arc, +// peer: Arc, +// notifications: Option>, +// ) -> Arc { +// let mut server = Self { +// peer, +// app_state, +// store: Default::default(), +// handlers: Default::default(), +// notifications, +// }; + +// server +// .add_request_handler(Server::ping) +// .add_request_handler(Server::register_project) +// .add_message_handler(Server::unregister_project) +// .add_request_handler(Server::share_project) +// .add_message_handler(Server::unshare_project) +// .add_sync_request_handler(Server::join_project) +// .add_message_handler(Server::leave_project) +// .add_request_handler(Server::register_worktree) +// .add_message_handler(Server::unregister_worktree) +// .add_request_handler(Server::update_worktree) +// .add_message_handler(Server::start_language_server) +// .add_message_handler(Server::update_language_server) +// .add_message_handler(Server::update_diagnostic_summary) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler( +// Server::forward_project_request::, +// ) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::forward_project_request::) +// .add_request_handler(Server::update_buffer) +// .add_message_handler(Server::update_buffer_file) +// .add_message_handler(Server::buffer_reloaded) +// .add_message_handler(Server::buffer_saved) +// .add_request_handler(Server::save_buffer) +// .add_request_handler(Server::get_channels) +// .add_request_handler(Server::get_users) +// .add_request_handler(Server::join_channel) +// .add_message_handler(Server::leave_channel) +// .add_request_handler(Server::send_channel_message) +// .add_request_handler(Server::follow) +// .add_message_handler(Server::unfollow) +// .add_message_handler(Server::update_followers) +// .add_request_handler(Server::get_channel_messages); + +// Arc::new(server) +// } + +// fn add_message_handler(&mut self, handler: F) -> &mut Self +// where +// F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, +// Fut: 'static + Send + Future>, +// M: EnvelopedMessage, +// { +// let prev_handler = self.handlers.insert( +// TypeId::of::(), +// Box::new(move |server, envelope| { +// let envelope = envelope.into_any().downcast::>().unwrap(); +// (handler)(server, *envelope).boxed() +// }), +// ); +// if prev_handler.is_some() { +// panic!("registered a handler for the same message twice"); +// } +// self +// } + +// fn add_request_handler(&mut self, handler: F) -> &mut Self +// where +// F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, +// Fut: 'static + Send + Future>, +// M: RequestMessage, +// { +// self.add_message_handler(move |server, envelope| { +// let receipt = envelope.receipt(); +// let response = (handler)(server.clone(), envelope); +// async move { +// match response.await { +// Ok(response) => { +// server.peer.respond(receipt, response)?; +// Ok(()) +// } +// Err(error) => { +// server.peer.respond_with_error( +// receipt, +// proto::Error { +// message: error.to_string(), +// }, +// )?; +// Err(error) +// } +// } +// } +// }) +// } + +// /// Handle a request while holding a lock to the store. This is useful when we're registering +// /// a connection but we want to respond on the connection before anybody else can send on it. +// fn add_sync_request_handler(&mut self, handler: F) -> &mut Self +// where +// F: 'static +// + Send +// + Sync +// + Fn(Arc, &mut Store, TypedEnvelope) -> tide::Result, +// M: RequestMessage, +// { +// let handler = Arc::new(handler); +// self.add_message_handler(move |server, envelope| { +// let receipt = envelope.receipt(); +// let handler = handler.clone(); +// async move { +// let mut store = server.store.write().await; +// let response = (handler)(server.clone(), &mut *store, envelope); +// match response { +// Ok(response) => { +// server.peer.respond(receipt, response)?; +// Ok(()) +// } +// Err(error) => { +// server.peer.respond_with_error( +// receipt, +// proto::Error { +// message: error.to_string(), +// }, +// )?; +// Err(error) +// } +// } +// } +// }) +// } + +// pub fn handle_connection( +// self: &Arc, +// connection: Connection, +// addr: String, +// user_id: UserId, +// mut send_connection_id: Option>, +// executor: E, +// ) -> impl Future { +// let mut this = self.clone(); +// async move { +// let (connection_id, handle_io, mut incoming_rx) = this +// .peer +// .add_connection(connection, { +// let executor = executor.clone(); +// move |duration| { +// let timer = executor.timer(duration); +// async move { +// timer.await; +// } +// } +// }) +// .await; + +// if let Some(send_connection_id) = send_connection_id.as_mut() { +// let _ = send_connection_id.send(connection_id).await; +// } + +// { +// let mut state = this.state_mut().await; +// state.add_connection(connection_id, user_id); +// this.update_contacts_for_users(&*state, &[user_id]); +// } + +// let handle_io = handle_io.fuse(); +// futures::pin_mut!(handle_io); +// loop { +// let next_message = incoming_rx.next().fuse(); +// futures::pin_mut!(next_message); +// futures::select_biased! { +// result = handle_io => { +// if let Err(err) = result { +// log::error!("error handling rpc connection {:?} - {:?}", addr, err); +// } +// break; +// } +// message = next_message => { +// if let Some(message) = message { +// let start_time = Instant::now(); +// let type_name = message.payload_type_name(); +// log::info!(connection_id = connection_id.0, type_name = type_name; "rpc message received"); +// if let Some(handler) = this.handlers.get(&message.payload_type_id()) { +// let notifications = this.notifications.clone(); +// let is_background = message.is_background(); +// let handle_message = (handler)(this.clone(), message); +// let handle_message = async move { +// if let Err(err) = handle_message.await { +// log::error!(connection_id = connection_id.0, type = type_name, error = as_display!(err); "rpc message error"); +// } else { +// log::info!(connection_id = connection_id.0, type = type_name, duration = as_debug!(start_time.elapsed()); "rpc message handled"); +// } +// if let Some(mut notifications) = notifications { +// let _ = notifications.send(()).await; +// } +// }; +// if is_background { +// executor.spawn_detached(handle_message); +// } else { +// handle_message.await; +// } +// } else { +// log::warn!("unhandled message: {}", type_name); +// } +// } else { +// log::info!(address = as_debug!(addr); "rpc connection closed"); +// break; +// } +// } +// } +// } + +// if let Err(err) = this.sign_out(connection_id).await { +// log::error!("error signing out connection {:?} - {:?}", addr, err); +// } +// } +// } + +// async fn sign_out(self: &mut Arc, connection_id: ConnectionId) -> tide::Result<()> { +// self.peer.disconnect(connection_id); +// let mut state = self.state_mut().await; +// let removed_connection = state.remove_connection(connection_id)?; + +// for (project_id, project) in removed_connection.hosted_projects { +// if let Some(share) = project.share { +// broadcast( +// connection_id, +// share.guests.keys().copied().collect(), +// |conn_id| { +// self.peer +// .send(conn_id, proto::UnshareProject { project_id }) +// }, +// ); +// } +// } + +// for (project_id, peer_ids) in removed_connection.guest_project_ids { +// broadcast(connection_id, peer_ids, |conn_id| { +// self.peer.send( +// conn_id, +// proto::RemoveProjectCollaborator { +// project_id, +// peer_id: connection_id.0, +// }, +// ) +// }); +// } + +// self.update_contacts_for_users(&*state, removed_connection.contact_ids.iter()); +// Ok(()) +// } + +// async fn ping(self: Arc, _: TypedEnvelope) -> tide::Result { +// Ok(proto::Ack {}) +// } + +// async fn register_project( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let project_id = { +// let mut state = self.state_mut().await; +// let user_id = state.user_id_for_connection(request.sender_id)?; +// state.register_project(request.sender_id, user_id) +// }; +// Ok(proto::RegisterProjectResponse { project_id }) +// } + +// async fn unregister_project( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let mut state = self.state_mut().await; +// let project = state.unregister_project(request.payload.project_id, request.sender_id)?; +// self.update_contacts_for_users(&*state, &project.authorized_user_ids()); +// Ok(()) +// } + +// async fn share_project( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let mut state = self.state_mut().await; +// let project = state.share_project(request.payload.project_id, request.sender_id)?; +// self.update_contacts_for_users(&mut *state, &project.authorized_user_ids); +// Ok(proto::Ack {}) +// } + +// async fn unshare_project( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let project_id = request.payload.project_id; +// let mut state = self.state_mut().await; +// let project = state.unshare_project(project_id, request.sender_id)?; +// broadcast(request.sender_id, project.connection_ids, |conn_id| { +// self.peer +// .send(conn_id, proto::UnshareProject { project_id }) +// }); +// self.update_contacts_for_users(&mut *state, &project.authorized_user_ids); +// Ok(()) +// } + +// fn join_project( +// self: Arc, +// state: &mut Store, +// request: TypedEnvelope, +// ) -> tide::Result { +// let project_id = request.payload.project_id; + +// let user_id = state.user_id_for_connection(request.sender_id)?; +// let (response, connection_ids, contact_user_ids) = state +// .join_project(request.sender_id, user_id, project_id) +// .and_then(|joined| { +// let share = joined.project.share()?; +// let peer_count = share.guests.len(); +// let mut collaborators = Vec::with_capacity(peer_count); +// collaborators.push(proto::Collaborator { +// peer_id: joined.project.host_connection_id.0, +// replica_id: 0, +// user_id: joined.project.host_user_id.to_proto(), +// }); +// let worktrees = share +// .worktrees +// .iter() +// .filter_map(|(id, shared_worktree)| { +// let worktree = joined.project.worktrees.get(&id)?; +// Some(proto::Worktree { +// id: *id, +// root_name: worktree.root_name.clone(), +// entries: shared_worktree.entries.values().cloned().collect(), +// diagnostic_summaries: shared_worktree +// .diagnostic_summaries +// .values() +// .cloned() +// .collect(), +// visible: worktree.visible, +// }) +// }) +// .collect(); +// for (peer_conn_id, (peer_replica_id, peer_user_id)) in &share.guests { +// if *peer_conn_id != request.sender_id { +// collaborators.push(proto::Collaborator { +// peer_id: peer_conn_id.0, +// replica_id: *peer_replica_id as u32, +// user_id: peer_user_id.to_proto(), +// }); +// } +// } +// let response = proto::JoinProjectResponse { +// worktrees, +// replica_id: joined.replica_id as u32, +// collaborators, +// language_servers: joined.project.language_servers.clone(), +// }; +// let connection_ids = joined.project.connection_ids(); +// let contact_user_ids = joined.project.authorized_user_ids(); +// Ok((response, connection_ids, contact_user_ids)) +// })?; + +// broadcast(request.sender_id, connection_ids, |conn_id| { +// self.peer.send( +// conn_id, +// proto::AddProjectCollaborator { +// project_id, +// collaborator: Some(proto::Collaborator { +// peer_id: request.sender_id.0, +// replica_id: response.replica_id, +// user_id: user_id.to_proto(), +// }), +// }, +// ) +// }); +// self.update_contacts_for_users(state, &contact_user_ids); +// Ok(response) +// } + +// async fn leave_project( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let sender_id = request.sender_id; +// let project_id = request.payload.project_id; +// let mut state = self.state_mut().await; +// let worktree = state.leave_project(sender_id, project_id)?; +// broadcast(sender_id, worktree.connection_ids, |conn_id| { +// self.peer.send( +// conn_id, +// proto::RemoveProjectCollaborator { +// project_id, +// peer_id: sender_id.0, +// }, +// ) +// }); +// self.update_contacts_for_users(&*state, &worktree.authorized_user_ids); +// Ok(()) +// } + +// async fn register_worktree( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let mut contact_user_ids = HashSet::default(); +// for github_login in &request.payload.authorized_logins { +// let contact_user_id = self.app_state.db.create_user(github_login, false).await?; +// contact_user_ids.insert(contact_user_id); +// } + +// let mut state = self.state_mut().await; +// let host_user_id = state.user_id_for_connection(request.sender_id)?; +// contact_user_ids.insert(host_user_id); + +// let contact_user_ids = contact_user_ids.into_iter().collect::>(); +// let guest_connection_ids = state +// .read_project(request.payload.project_id, request.sender_id)? +// .guest_connection_ids(); +// state.register_worktree( +// request.payload.project_id, +// request.payload.worktree_id, +// request.sender_id, +// Worktree { +// authorized_user_ids: contact_user_ids.clone(), +// root_name: request.payload.root_name.clone(), +// visible: request.payload.visible, +// }, +// )?; + +// broadcast(request.sender_id, guest_connection_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); +// self.update_contacts_for_users(&*state, &contact_user_ids); +// Ok(proto::Ack {}) +// } + +// async fn unregister_worktree( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let project_id = request.payload.project_id; +// let worktree_id = request.payload.worktree_id; +// let mut state = self.state_mut().await; +// let (worktree, guest_connection_ids) = +// state.unregister_worktree(project_id, worktree_id, request.sender_id)?; +// broadcast(request.sender_id, guest_connection_ids, |conn_id| { +// self.peer.send( +// conn_id, +// proto::UnregisterWorktree { +// project_id, +// worktree_id, +// }, +// ) +// }); +// self.update_contacts_for_users(&*state, &worktree.authorized_user_ids); +// Ok(()) +// } + +// async fn update_worktree( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let connection_ids = self.state_mut().await.update_worktree( +// request.sender_id, +// request.payload.project_id, +// request.payload.worktree_id, +// &request.payload.removed_entries, +// &request.payload.updated_entries, +// )?; + +// broadcast(request.sender_id, connection_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); + +// Ok(proto::Ack {}) +// } + +// async fn update_diagnostic_summary( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let summary = request +// .payload +// .summary +// .clone() +// .ok_or_else(|| anyhow!("invalid summary"))?; +// let receiver_ids = self.state_mut().await.update_diagnostic_summary( +// request.payload.project_id, +// request.payload.worktree_id, +// request.sender_id, +// summary, +// )?; + +// broadcast(request.sender_id, receiver_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); +// Ok(()) +// } + +// async fn start_language_server( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let receiver_ids = self.state_mut().await.start_language_server( +// request.payload.project_id, +// request.sender_id, +// request +// .payload +// .server +// .clone() +// .ok_or_else(|| anyhow!("invalid language server"))?, +// )?; +// broadcast(request.sender_id, receiver_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); +// Ok(()) +// } + +// async fn update_language_server( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let receiver_ids = self +// .state() +// .await +// .project_connection_ids(request.payload.project_id, request.sender_id)?; +// broadcast(request.sender_id, receiver_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); +// Ok(()) +// } + +// async fn forward_project_request( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result +// where +// T: EntityMessage + RequestMessage, +// { +// let host_connection_id = self +// .state() +// .await +// .read_project(request.payload.remote_entity_id(), request.sender_id)? +// .host_connection_id; +// Ok(self +// .peer +// .forward_request(request.sender_id, host_connection_id, request.payload) +// .await?) +// } + +// async fn save_buffer( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let host = self +// .state() +// .await +// .read_project(request.payload.project_id, request.sender_id)? +// .host_connection_id; +// let response = self +// .peer +// .forward_request(request.sender_id, host, request.payload.clone()) +// .await?; + +// let mut guests = self +// .state() +// .await +// .read_project(request.payload.project_id, request.sender_id)? +// .connection_ids(); +// guests.retain(|guest_connection_id| *guest_connection_id != request.sender_id); +// broadcast(host, guests, |conn_id| { +// self.peer.forward_send(host, conn_id, response.clone()) +// }); + +// Ok(response) +// } + +// async fn update_buffer( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let receiver_ids = self +// .state() +// .await +// .project_connection_ids(request.payload.project_id, request.sender_id)?; +// broadcast(request.sender_id, receiver_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); +// Ok(proto::Ack {}) +// } + +// async fn update_buffer_file( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let receiver_ids = self +// .state() +// .await +// .project_connection_ids(request.payload.project_id, request.sender_id)?; +// broadcast(request.sender_id, receiver_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); +// Ok(()) +// } + +// async fn buffer_reloaded( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let receiver_ids = self +// .state() +// .await +// .project_connection_ids(request.payload.project_id, request.sender_id)?; +// broadcast(request.sender_id, receiver_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); +// Ok(()) +// } + +// async fn buffer_saved( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let receiver_ids = self +// .state() +// .await +// .project_connection_ids(request.payload.project_id, request.sender_id)?; +// broadcast(request.sender_id, receiver_ids, |connection_id| { +// self.peer +// .forward_send(request.sender_id, connection_id, request.payload.clone()) +// }); +// Ok(()) +// } + +// async fn follow( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let leader_id = ConnectionId(request.payload.leader_id); +// let follower_id = request.sender_id; +// if !self +// .state() +// .await +// .project_connection_ids(request.payload.project_id, follower_id)? +// .contains(&leader_id) +// { +// Err(anyhow!("no such peer"))?; +// } +// let mut response = self +// .peer +// .forward_request(request.sender_id, leader_id, request.payload) +// .await?; +// response +// .views +// .retain(|view| view.leader_id != Some(follower_id.0)); +// Ok(response) +// } + +// async fn unfollow( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let leader_id = ConnectionId(request.payload.leader_id); +// if !self +// .state() +// .await +// .project_connection_ids(request.payload.project_id, request.sender_id)? +// .contains(&leader_id) +// { +// Err(anyhow!("no such peer"))?; +// } +// self.peer +// .forward_send(request.sender_id, leader_id, request.payload)?; +// Ok(()) +// } + +// async fn update_followers( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let connection_ids = self +// .state() +// .await +// .project_connection_ids(request.payload.project_id, request.sender_id)?; +// let leader_id = request +// .payload +// .variant +// .as_ref() +// .and_then(|variant| match variant { +// proto::update_followers::Variant::CreateView(payload) => payload.leader_id, +// proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, +// proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, +// }); +// for follower_id in &request.payload.follower_ids { +// let follower_id = ConnectionId(*follower_id); +// if connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { +// self.peer +// .forward_send(request.sender_id, follower_id, request.payload.clone())?; +// } +// } +// Ok(()) +// } + +// async fn get_channels( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let user_id = self +// .state() +// .await +// .user_id_for_connection(request.sender_id)?; +// let channels = self.app_state.db.get_accessible_channels(user_id).await?; +// Ok(proto::GetChannelsResponse { +// channels: channels +// .into_iter() +// .map(|chan| proto::Channel { +// id: chan.id.to_proto(), +// name: chan.name, +// }) +// .collect(), +// }) +// } + +// async fn get_users( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let user_ids = request +// .payload +// .user_ids +// .into_iter() +// .map(UserId::from_proto) +// .collect(); +// let users = self +// .app_state +// .db +// .get_users_by_ids(user_ids) +// .await? +// .into_iter() +// .map(|user| proto::User { +// id: user.id.to_proto(), +// avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), +// github_login: user.github_login, +// }) +// .collect(); +// Ok(proto::GetUsersResponse { users }) +// } + +// fn update_contacts_for_users<'a>( +// self: &Arc, +// state: &Store, +// user_ids: impl IntoIterator, +// ) { +// for user_id in user_ids { +// let contacts = state.contacts_for_user(*user_id); +// for connection_id in state.connection_ids_for_user(*user_id) { +// self.peer +// .send( +// connection_id, +// proto::UpdateContacts { +// contacts: contacts.clone(), +// }, +// ) +// .log_err(); +// } +// } +// } + +// async fn join_channel( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let user_id = self +// .state() +// .await +// .user_id_for_connection(request.sender_id)?; +// let channel_id = ChannelId::from_proto(request.payload.channel_id); +// if !self +// .app_state +// .db +// .can_user_access_channel(user_id, channel_id) +// .await? +// { +// Err(anyhow!("access denied"))?; +// } + +// self.state_mut() +// .await +// .join_channel(request.sender_id, channel_id); +// let messages = self +// .app_state +// .db +// .get_channel_messages(channel_id, MESSAGE_COUNT_PER_PAGE, None) +// .await? +// .into_iter() +// .map(|msg| proto::ChannelMessage { +// id: msg.id.to_proto(), +// body: msg.body, +// timestamp: msg.sent_at.unix_timestamp() as u64, +// sender_id: msg.sender_id.to_proto(), +// nonce: Some(msg.nonce.as_u128().into()), +// }) +// .collect::>(); +// Ok(proto::JoinChannelResponse { +// done: messages.len() < MESSAGE_COUNT_PER_PAGE, +// messages, +// }) +// } + +// async fn leave_channel( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result<()> { +// let user_id = self +// .state() +// .await +// .user_id_for_connection(request.sender_id)?; +// let channel_id = ChannelId::from_proto(request.payload.channel_id); +// if !self +// .app_state +// .db +// .can_user_access_channel(user_id, channel_id) +// .await? +// { +// Err(anyhow!("access denied"))?; +// } + +// self.state_mut() +// .await +// .leave_channel(request.sender_id, channel_id); + +// Ok(()) +// } + +// async fn send_channel_message( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let channel_id = ChannelId::from_proto(request.payload.channel_id); +// let user_id; +// let connection_ids; +// { +// let state = self.state().await; +// user_id = state.user_id_for_connection(request.sender_id)?; +// connection_ids = state.channel_connection_ids(channel_id)?; +// } + +// // Validate the message body. +// let body = request.payload.body.trim().to_string(); +// if body.len() > MAX_MESSAGE_LEN { +// return Err(anyhow!("message is too long"))?; +// } +// if body.is_empty() { +// return Err(anyhow!("message can't be blank"))?; +// } + +// let timestamp = OffsetDateTime::now_utc(); +// let nonce = request +// .payload +// .nonce +// .ok_or_else(|| anyhow!("nonce can't be blank"))?; + +// let message_id = self +// .app_state +// .db +// .create_channel_message(channel_id, user_id, &body, timestamp, nonce.clone().into()) +// .await? +// .to_proto(); +// let message = proto::ChannelMessage { +// sender_id: user_id.to_proto(), +// id: message_id, +// body, +// timestamp: timestamp.unix_timestamp() as u64, +// nonce: Some(nonce), +// }; +// broadcast(request.sender_id, connection_ids, |conn_id| { +// self.peer.send( +// conn_id, +// proto::ChannelMessageSent { +// channel_id: channel_id.to_proto(), +// message: Some(message.clone()), +// }, +// ) +// }); +// Ok(proto::SendChannelMessageResponse { +// message: Some(message), +// }) +// } + +// async fn get_channel_messages( +// self: Arc, +// request: TypedEnvelope, +// ) -> tide::Result { +// let user_id = self +// .state() +// .await +// .user_id_for_connection(request.sender_id)?; +// let channel_id = ChannelId::from_proto(request.payload.channel_id); +// if !self +// .app_state +// .db +// .can_user_access_channel(user_id, channel_id) +// .await? +// { +// Err(anyhow!("access denied"))?; +// } + +// let messages = self +// .app_state +// .db +// .get_channel_messages( +// channel_id, +// MESSAGE_COUNT_PER_PAGE, +// Some(MessageId::from_proto(request.payload.before_message_id)), +// ) +// .await? +// .into_iter() +// .map(|msg| proto::ChannelMessage { +// id: msg.id.to_proto(), +// body: msg.body, +// timestamp: msg.sent_at.unix_timestamp() as u64, +// sender_id: msg.sender_id.to_proto(), +// nonce: Some(msg.nonce.as_u128().into()), +// }) +// .collect::>(); + +// Ok(proto::GetChannelMessagesResponse { +// done: messages.len() < MESSAGE_COUNT_PER_PAGE, +// messages, +// }) +// } + +// async fn state<'a>(self: &'a Arc) -> StoreReadGuard<'a> { +// #[cfg(test)] +// async_std::task::yield_now().await; +// let guard = self.store.read().await; +// #[cfg(test)] +// async_std::task::yield_now().await; +// StoreReadGuard { +// guard, +// _not_send: PhantomData, +// } +// } + +// async fn state_mut<'a>(self: &'a Arc) -> StoreWriteGuard<'a> { +// #[cfg(test)] +// async_std::task::yield_now().await; +// let guard = self.store.write().await; +// #[cfg(test)] +// async_std::task::yield_now().await; +// StoreWriteGuard { +// guard, +// _not_send: PhantomData, +// } +// } +// } + +// impl<'a> Deref for StoreReadGuard<'a> { +// type Target = Store; + +// fn deref(&self) -> &Self::Target { +// &*self.guard +// } +// } + +// impl<'a> Deref for StoreWriteGuard<'a> { +// type Target = Store; + +// fn deref(&self) -> &Self::Target { +// &*self.guard +// } +// } + +// impl<'a> DerefMut for StoreWriteGuard<'a> { +// fn deref_mut(&mut self) -> &mut Self::Target { +// &mut *self.guard +// } +// } + +// impl<'a> Drop for StoreWriteGuard<'a> { +// fn drop(&mut self) { +// #[cfg(test)] +// self.check_invariants(); +// } +// } + +// impl Executor for RealExecutor { +// type Timer = Timer; + +// fn spawn_detached>(&self, future: F) { +// task::spawn(future); +// } + +// fn timer(&self, duration: Duration) -> Self::Timer { +// Timer::after(duration) +// } +// } + +// fn broadcast(sender_id: ConnectionId, receiver_ids: Vec, mut f: F) +// where +// F: FnMut(ConnectionId) -> anyhow::Result<()>, +// { +// for receiver_id in receiver_ids { +// if receiver_id != sender_id { +// f(receiver_id).log_err(); +// } +// } +// } + +// pub fn add_routes(app: &mut tide::Server>, rpc: &Arc) { +// let server = Server::new(app.state().clone(), rpc.clone(), None); +// app.at("/rpc").get(move |request: Request>| { +// let server = server.clone(); +// async move { +// const WEBSOCKET_GUID: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; + +// let connection_upgrade = header_contains_ignore_case(&request, CONNECTION, "upgrade"); +// let upgrade_to_websocket = header_contains_ignore_case(&request, UPGRADE, "websocket"); +// let upgrade_requested = connection_upgrade && upgrade_to_websocket; +// let client_protocol_version: Option = request +// .header("X-Zed-Protocol-Version") +// .and_then(|v| v.as_str().parse().ok()); + +// if !upgrade_requested || client_protocol_version != Some(rpc::PROTOCOL_VERSION) { +// return Ok(Response::new(StatusCode::UpgradeRequired)); +// } + +// let header = match request.header("Sec-Websocket-Key") { +// Some(h) => h.as_str(), +// None => return Err(anyhow!("expected sec-websocket-key"))?, +// }; + +// let user_id = process_auth_header(&request).await?; + +// let mut response = Response::new(StatusCode::SwitchingProtocols); +// response.insert_header(UPGRADE, "websocket"); +// response.insert_header(CONNECTION, "Upgrade"); +// let hash = Sha1::new().chain(header).chain(WEBSOCKET_GUID).finalize(); +// response.insert_header("Sec-Websocket-Accept", base64::encode(&hash[..])); +// response.insert_header("Sec-Websocket-Version", "13"); + +// let http_res: &mut tide::http::Response = response.as_mut(); +// let upgrade_receiver = http_res.recv_upgrade().await; +// let addr = request.remote().unwrap_or("unknown").to_string(); +// task::spawn(async move { +// if let Some(stream) = upgrade_receiver.await { +// server +// .handle_connection( +// Connection::new( +// WebSocketStream::from_raw_socket(stream, Role::Server, None).await, +// ), +// addr, +// user_id, +// None, +// RealExecutor, +// ) +// .await; +// } +// }); + +// Ok(response) +// } +// }); +// } + +// fn header_contains_ignore_case( +// request: &tide::Request, +// header_name: HeaderName, +// value: &str, +// ) -> bool { +// request +// .header(header_name) +// .map(|h| { +// h.as_str() +// .split(',') +// .any(|s| s.trim().eq_ignore_ascii_case(value.trim())) +// }) +// .unwrap_or(false) +// } + +// #[cfg(test)] +// mod tests { +// use super::*; +// use crate::{ +// db::{tests::TestDb, UserId}, +// AppState, Config, +// }; +// use ::rpc::Peer; +// use client::{ +// self, test::FakeHttpClient, Channel, ChannelDetails, ChannelList, Client, Credentials, +// EstablishConnectionError, UserStore, RECEIVE_TIMEOUT, +// }; +// use collections::BTreeMap; +// use editor::{ +// self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Input, Redo, Rename, +// ToOffset, ToggleCodeActions, Undo, +// }; +// use gpui::{ +// executor::{self, Deterministic}, +// geometry::vector::vec2f, +// ModelHandle, TestAppContext, ViewHandle, +// }; +// use language::{ +// range_to_lsp, tree_sitter_rust, Diagnostic, DiagnosticEntry, FakeLspAdapter, Language, +// LanguageConfig, LanguageRegistry, OffsetRangeExt, Point, Rope, +// }; +// use lsp::{self, FakeLanguageServer}; +// use parking_lot::Mutex; +// use project::{ +// fs::{FakeFs, Fs as _}, +// search::SearchQuery, +// worktree::WorktreeHandle, +// DiagnosticSummary, Project, ProjectPath, WorktreeId, +// }; +// use rand::prelude::*; +// use rpc::PeerId; +// use serde_json::json; +// use settings::Settings; +// use sqlx::types::time::OffsetDateTime; +// use std::{ +// env, +// ops::Deref, +// path::{Path, PathBuf}, +// rc::Rc, +// sync::{ +// atomic::{AtomicBool, Ordering::SeqCst}, +// Arc, +// }, +// time::Duration, +// }; +// use theme::ThemeRegistry; +// use workspace::{Item, SplitDirection, ToggleFollow, Workspace, WorkspaceParams}; + +// #[cfg(test)] +// #[ctor::ctor] +// fn init_logger() { +// if std::env::var("RUST_LOG").is_ok() { +// env_logger::init(); +// } +// } + +// #[gpui::test(iterations = 10)] +// async fn test_share_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// let (window_b, _) = cx_b.add_window(|_| EmptyView); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// cx_a.foreground().forbid_parking(); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.txt": "a-contents", +// "b.txt": "b-contents", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join that project as client B +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// let replica_id_b = project_b.read_with(cx_b, |project, _| { +// assert_eq!( +// project +// .collaborators() +// .get(&client_a.peer_id) +// .unwrap() +// .user +// .github_login, +// "user_a" +// ); +// project.replica_id() +// }); +// project_a +// .condition(&cx_a, |tree, _| { +// tree.collaborators() +// .get(&client_b.peer_id) +// .map_or(false, |collaborator| { +// collaborator.replica_id == replica_id_b +// && collaborator.user.github_login == "user_b" +// }) +// }) +// .await; + +// // Open the same file as client B and client A. +// let buffer_b = project_b +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.txt"), cx)) +// .await +// .unwrap(); +// buffer_b.read_with(cx_b, |buf, _| assert_eq!(buf.text(), "b-contents")); +// project_a.read_with(cx_a, |project, cx| { +// assert!(project.has_open_buffer((worktree_id, "b.txt"), cx)) +// }); +// let buffer_a = project_a +// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "b.txt"), cx)) +// .await +// .unwrap(); + +// let editor_b = cx_b.add_view(window_b, |cx| Editor::for_buffer(buffer_b, None, cx)); + +// // TODO +// // // Create a selection set as client B and see that selection set as client A. +// // buffer_a +// // .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 1) +// // .await; + +// // Edit the buffer as client B and see that edit as client A. +// editor_b.update(cx_b, |editor, cx| { +// editor.handle_input(&Input("ok, ".into()), cx) +// }); +// buffer_a +// .condition(&cx_a, |buffer, _| buffer.text() == "ok, b-contents") +// .await; + +// // TODO +// // // Remove the selection set as client B, see those selections disappear as client A. +// cx_b.update(move |_| drop(editor_b)); +// // buffer_a +// // .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 0) +// // .await; + +// // Dropping the client B's project removes client B from client A's collaborators. +// cx_b.update(move |_| drop(project_b)); +// project_a +// .condition(&cx_a, |project, _| project.collaborators().is_empty()) +// .await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_unshare_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// cx_a.foreground().forbid_parking(); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.txt": "a-contents", +// "b.txt": "b-contents", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); +// assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + +// // Join that project as client B +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); +// project_b +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) +// .await +// .unwrap(); + +// // Unshare the project as client A +// project_a.update(cx_a, |project, cx| project.unshare(cx)); +// project_b +// .condition(cx_b, |project, _| project.is_read_only()) +// .await; +// assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared())); +// cx_b.update(|_| { +// drop(project_b); +// }); + +// // Share the project again and ensure guests can still join. +// project_a +// .update(cx_a, |project, cx| project.share(cx)) +// .await +// .unwrap(); +// assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + +// let project_b2 = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); +// project_b2 +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) +// .await +// .unwrap(); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_host_disconnect(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// cx_a.foreground().forbid_parking(); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.txt": "a-contents", +// "b.txt": "b-contents", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); +// assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + +// // Join that project as client B +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); +// project_b +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) +// .await +// .unwrap(); + +// // Drop client A's connection. Collaborators should disappear and the project should not be shown as shared. +// server.disconnect_client(client_a.current_user_id(cx_a)); +// cx_a.foreground().advance_clock(rpc::RECEIVE_TIMEOUT); +// project_a +// .condition(cx_a, |project, _| project.collaborators().is_empty()) +// .await; +// project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); +// project_b +// .condition(cx_b, |project, _| project.is_read_only()) +// .await; +// assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared())); +// cx_b.update(|_| { +// drop(project_b); +// }); + +// // Await reconnection +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + +// // Share the project again and ensure guests can still join. +// project_a +// .update(cx_a, |project, cx| project.share(cx)) +// .await +// .unwrap(); +// assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + +// let project_b2 = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); +// project_b2 +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) +// .await +// .unwrap(); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_propagate_saves_and_fs_changes( +// cx_a: &mut TestAppContext, +// cx_b: &mut TestAppContext, +// cx_c: &mut TestAppContext, +// ) { +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// cx_a.foreground().forbid_parking(); + +// // Connect to a server as 3 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; +// let client_c = server.create_client(cx_c, "user_c").await; + +// // Share a worktree as client A. +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, +// "file1": "", +// "file2": "" +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join that worktree as clients B and C. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); +// let project_c = Project::remote( +// project_id, +// client_c.clone(), +// client_c.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_c.to_async(), +// ) +// .await +// .unwrap(); +// let worktree_b = project_b.read_with(cx_b, |p, cx| p.worktrees(cx).next().unwrap()); +// let worktree_c = project_c.read_with(cx_c, |p, cx| p.worktrees(cx).next().unwrap()); + +// // Open and edit a buffer as both guests B and C. +// let buffer_b = project_b +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) +// .await +// .unwrap(); +// let buffer_c = project_c +// .update(cx_c, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) +// .await +// .unwrap(); +// buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "i-am-b, ", cx)); +// buffer_c.update(cx_c, |buf, cx| buf.edit([0..0], "i-am-c, ", cx)); + +// // Open and edit that buffer as the host. +// let buffer_a = project_a +// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) +// .await +// .unwrap(); + +// buffer_a +// .condition(cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, ") +// .await; +// buffer_a.update(cx_a, |buf, cx| { +// buf.edit([buf.len()..buf.len()], "i-am-a", cx) +// }); + +// // Wait for edits to propagate +// buffer_a +// .condition(cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") +// .await; +// buffer_b +// .condition(cx_b, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") +// .await; +// buffer_c +// .condition(cx_c, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") +// .await; + +// // Edit the buffer as the host and concurrently save as guest B. +// let save_b = buffer_b.update(cx_b, |buf, cx| buf.save(cx)); +// buffer_a.update(cx_a, |buf, cx| buf.edit([0..0], "hi-a, ", cx)); +// save_b.await.unwrap(); +// assert_eq!( +// fs.load("/a/file1".as_ref()).await.unwrap(), +// "hi-a, i-am-c, i-am-b, i-am-a" +// ); +// buffer_a.read_with(cx_a, |buf, _| assert!(!buf.is_dirty())); +// buffer_b.read_with(cx_b, |buf, _| assert!(!buf.is_dirty())); +// buffer_c.condition(cx_c, |buf, _| !buf.is_dirty()).await; + +// worktree_a.flush_fs_events(cx_a).await; + +// // Make changes on host's file system, see those changes on guest worktrees. +// fs.rename( +// "/a/file1".as_ref(), +// "/a/file1-renamed".as_ref(), +// Default::default(), +// ) +// .await +// .unwrap(); + +// fs.rename("/a/file2".as_ref(), "/a/file3".as_ref(), Default::default()) +// .await +// .unwrap(); +// fs.insert_file(Path::new("/a/file4"), "4".into()).await; + +// worktree_a +// .condition(&cx_a, |tree, _| { +// tree.paths() +// .map(|p| p.to_string_lossy()) +// .collect::>() +// == [".zed.toml", "file1-renamed", "file3", "file4"] +// }) +// .await; +// worktree_b +// .condition(&cx_b, |tree, _| { +// tree.paths() +// .map(|p| p.to_string_lossy()) +// .collect::>() +// == [".zed.toml", "file1-renamed", "file3", "file4"] +// }) +// .await; +// worktree_c +// .condition(&cx_c, |tree, _| { +// tree.paths() +// .map(|p| p.to_string_lossy()) +// .collect::>() +// == [".zed.toml", "file1-renamed", "file3", "file4"] +// }) +// .await; + +// // Ensure buffer files are updated as well. +// buffer_a +// .condition(&cx_a, |buf, _| { +// buf.file().unwrap().path().to_str() == Some("file1-renamed") +// }) +// .await; +// buffer_b +// .condition(&cx_b, |buf, _| { +// buf.file().unwrap().path().to_str() == Some("file1-renamed") +// }) +// .await; +// buffer_c +// .condition(&cx_c, |buf, _| { +// buf.file().unwrap().path().to_str() == Some("file1-renamed") +// }) +// .await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_buffer_conflict_after_save(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/dir", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, +// "a.txt": "a-contents", +// }), +// ) +// .await; + +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/dir", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join that project as client B +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Open a buffer as client B +// let buffer_b = project_b +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) +// .await +// .unwrap(); + +// buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "world ", cx)); +// buffer_b.read_with(cx_b, |buf, _| { +// assert!(buf.is_dirty()); +// assert!(!buf.has_conflict()); +// }); + +// buffer_b.update(cx_b, |buf, cx| buf.save(cx)).await.unwrap(); +// buffer_b +// .condition(&cx_b, |buffer_b, _| !buffer_b.is_dirty()) +// .await; +// buffer_b.read_with(cx_b, |buf, _| { +// assert!(!buf.has_conflict()); +// }); + +// buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "hello ", cx)); +// buffer_b.read_with(cx_b, |buf, _| { +// assert!(buf.is_dirty()); +// assert!(!buf.has_conflict()); +// }); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_buffer_reloading(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/dir", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, +// "a.txt": "a-contents", +// }), +// ) +// .await; + +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/dir", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join that project as client B +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); +// let _worktree_b = project_b.update(cx_b, |p, cx| p.worktrees(cx).next().unwrap()); + +// // Open a buffer as client B +// let buffer_b = project_b +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) +// .await +// .unwrap(); +// buffer_b.read_with(cx_b, |buf, _| { +// assert!(!buf.is_dirty()); +// assert!(!buf.has_conflict()); +// }); + +// fs.save(Path::new("/dir/a.txt"), &"new contents".into()) +// .await +// .unwrap(); +// buffer_b +// .condition(&cx_b, |buf, _| { +// buf.text() == "new contents" && !buf.is_dirty() +// }) +// .await; +// buffer_b.read_with(cx_b, |buf, _| { +// assert!(!buf.has_conflict()); +// }); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_editing_while_guest_opens_buffer( +// cx_a: &mut TestAppContext, +// cx_b: &mut TestAppContext, +// ) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/dir", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.txt": "a-contents", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/dir", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join that project as client B +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Open a buffer as client A +// let buffer_a = project_a +// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) +// .await +// .unwrap(); + +// // Start opening the same buffer as client B +// let buffer_b = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); + +// // Edit the buffer as client A while client B is still opening it. +// cx_b.background().simulate_random_delay().await; +// buffer_a.update(cx_a, |buf, cx| buf.edit([0..0], "X", cx)); +// cx_b.background().simulate_random_delay().await; +// buffer_a.update(cx_a, |buf, cx| buf.edit([1..1], "Y", cx)); + +// let text = buffer_a.read_with(cx_a, |buf, _| buf.text()); +// let buffer_b = buffer_b.await.unwrap(); +// buffer_b.condition(&cx_b, |buf, _| buf.text() == text).await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_leaving_worktree_while_opening_buffer( +// cx_a: &mut TestAppContext, +// cx_b: &mut TestAppContext, +// ) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/dir", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.txt": "a-contents", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/dir", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join that project as client B +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // See that a guest has joined as client A. +// project_a +// .condition(&cx_a, |p, _| p.collaborators().len() == 1) +// .await; + +// // Begin opening a buffer as client B, but leave the project before the open completes. +// let buffer_b = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); +// cx_b.update(|_| drop(project_b)); +// drop(buffer_b); + +// // See that the guest has left. +// project_a +// .condition(&cx_a, |p, _| p.collaborators().len() == 0) +// .await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_leaving_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.txt": "a-contents", +// "b.txt": "b-contents", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a +// .update(cx_a, |project, _| project.next_remote_id()) +// .await; +// project_a +// .update(cx_a, |project, cx| project.share(cx)) +// .await +// .unwrap(); + +// // Join that project as client B +// let _project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Client A sees that a guest has joined. +// project_a +// .condition(cx_a, |p, _| p.collaborators().len() == 1) +// .await; + +// // Drop client B's connection and ensure client A observes client B leaving the project. +// client_b.disconnect(&cx_b.to_async()).unwrap(); +// project_a +// .condition(cx_a, |p, _| p.collaborators().len() == 0) +// .await; + +// // Rejoin the project as client B +// let _project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Client A sees that a guest has re-joined. +// project_a +// .condition(cx_a, |p, _| p.collaborators().len() == 1) +// .await; + +// // Simulate connection loss for client B and ensure client A observes client B leaving the project. +// client_b.wait_for_current_user(cx_b).await; +// server.disconnect_client(client_b.current_user_id(cx_b)); +// cx_a.foreground().advance_clock(Duration::from_secs(3)); +// project_a +// .condition(cx_a, |p, _| p.collaborators().len() == 0) +// .await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_collaborating_with_diagnostics( +// cx_a: &mut TestAppContext, +// cx_b: &mut TestAppContext, +// ) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.rs": "let one = two", +// "other.rs": "", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Cause the language server to start. +// let _ = cx_a +// .background() +// .spawn(project_a.update(cx_a, |project, cx| { +// project.open_buffer( +// ProjectPath { +// worktree_id, +// path: Path::new("other.rs").into(), +// }, +// cx, +// ) +// })) +// .await +// .unwrap(); + +// // Simulate a language server reporting errors for a file. +// let mut fake_language_server = fake_language_servers.next().await.unwrap(); +// fake_language_server +// .receive_notification::() +// .await; +// fake_language_server.notify::( +// lsp::PublishDiagnosticsParams { +// uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), +// version: None, +// diagnostics: vec![lsp::Diagnostic { +// severity: Some(lsp::DiagnosticSeverity::ERROR), +// range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 7)), +// message: "message 1".to_string(), +// ..Default::default() +// }], +// }, +// ); + +// // Wait for server to see the diagnostics update. +// server +// .condition(|store| { +// let worktree = store +// .project(project_id) +// .unwrap() +// .share +// .as_ref() +// .unwrap() +// .worktrees +// .get(&worktree_id.to_proto()) +// .unwrap(); + +// !worktree.diagnostic_summaries.is_empty() +// }) +// .await; + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// project_b.read_with(cx_b, |project, cx| { +// assert_eq!( +// project.diagnostic_summaries(cx).collect::>(), +// &[( +// ProjectPath { +// worktree_id, +// path: Arc::from(Path::new("a.rs")), +// }, +// DiagnosticSummary { +// error_count: 1, +// warning_count: 0, +// ..Default::default() +// }, +// )] +// ) +// }); + +// // Simulate a language server reporting more errors for a file. +// fake_language_server.notify::( +// lsp::PublishDiagnosticsParams { +// uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), +// version: None, +// diagnostics: vec![ +// lsp::Diagnostic { +// severity: Some(lsp::DiagnosticSeverity::ERROR), +// range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 7)), +// message: "message 1".to_string(), +// ..Default::default() +// }, +// lsp::Diagnostic { +// severity: Some(lsp::DiagnosticSeverity::WARNING), +// range: lsp::Range::new( +// lsp::Position::new(0, 10), +// lsp::Position::new(0, 13), +// ), +// message: "message 2".to_string(), +// ..Default::default() +// }, +// ], +// }, +// ); + +// // Client b gets the updated summaries +// project_b +// .condition(&cx_b, |project, cx| { +// project.diagnostic_summaries(cx).collect::>() +// == &[( +// ProjectPath { +// worktree_id, +// path: Arc::from(Path::new("a.rs")), +// }, +// DiagnosticSummary { +// error_count: 1, +// warning_count: 1, +// ..Default::default() +// }, +// )] +// }) +// .await; + +// // Open the file with the errors on client B. They should be present. +// let buffer_b = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) +// .await +// .unwrap(); + +// buffer_b.read_with(cx_b, |buffer, _| { +// assert_eq!( +// buffer +// .snapshot() +// .diagnostics_in_range::<_, Point>(0..buffer.len(), false) +// .map(|entry| entry) +// .collect::>(), +// &[ +// DiagnosticEntry { +// range: Point::new(0, 4)..Point::new(0, 7), +// diagnostic: Diagnostic { +// group_id: 0, +// message: "message 1".to_string(), +// severity: lsp::DiagnosticSeverity::ERROR, +// is_primary: true, +// ..Default::default() +// } +// }, +// DiagnosticEntry { +// range: Point::new(0, 10)..Point::new(0, 13), +// diagnostic: Diagnostic { +// group_id: 1, +// severity: lsp::DiagnosticSeverity::WARNING, +// message: "message 2".to_string(), +// is_primary: true, +// ..Default::default() +// } +// } +// ] +// ); +// }); + +// // Simulate a language server reporting no errors for a file. +// fake_language_server.notify::( +// lsp::PublishDiagnosticsParams { +// uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), +// version: None, +// diagnostics: vec![], +// }, +// ); +// project_a +// .condition(cx_a, |project, cx| { +// project.diagnostic_summaries(cx).collect::>() == &[] +// }) +// .await; +// project_b +// .condition(cx_b, |project, cx| { +// project.diagnostic_summaries(cx).collect::>() == &[] +// }) +// .await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_collaborating_with_completion( +// cx_a: &mut TestAppContext, +// cx_b: &mut TestAppContext, +// ) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(FakeLspAdapter { +// capabilities: lsp::ServerCapabilities { +// completion_provider: Some(lsp::CompletionOptions { +// trigger_characters: Some(vec![".".to_string()]), +// ..Default::default() +// }), +// ..Default::default() +// }, +// ..Default::default() +// }); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "main.rs": "fn main() { a }", +// "other.rs": "", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Open a file in an editor as the guest. +// let buffer_b = project_b +// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx)) +// .await +// .unwrap(); +// let (window_b, _) = cx_b.add_window(|_| EmptyView); +// let editor_b = cx_b.add_view(window_b, |cx| { +// Editor::for_buffer(buffer_b.clone(), Some(project_b.clone()), cx) +// }); + +// let fake_language_server = fake_language_servers.next().await.unwrap(); +// buffer_b +// .condition(&cx_b, |buffer, _| !buffer.completion_triggers().is_empty()) +// .await; + +// // Type a completion trigger character as the guest. +// editor_b.update(cx_b, |editor, cx| { +// editor.select_ranges([13..13], None, cx); +// editor.handle_input(&Input(".".into()), cx); +// cx.focus(&editor_b); +// }); + +// // Receive a completion request as the host's language server. +// // Return some completions from the host's language server. +// cx_a.foreground().start_waiting(); +// fake_language_server +// .handle_request::(|params, _| async move { +// assert_eq!( +// params.text_document_position.text_document.uri, +// lsp::Url::from_file_path("/a/main.rs").unwrap(), +// ); +// assert_eq!( +// params.text_document_position.position, +// lsp::Position::new(0, 14), +// ); + +// Ok(Some(lsp::CompletionResponse::Array(vec![ +// lsp::CompletionItem { +// label: "first_method(…)".into(), +// detail: Some("fn(&mut self, B) -> C".into()), +// text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { +// new_text: "first_method($1)".to_string(), +// range: lsp::Range::new( +// lsp::Position::new(0, 14), +// lsp::Position::new(0, 14), +// ), +// })), +// insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), +// ..Default::default() +// }, +// lsp::CompletionItem { +// label: "second_method(…)".into(), +// detail: Some("fn(&mut self, C) -> D".into()), +// text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { +// new_text: "second_method()".to_string(), +// range: lsp::Range::new( +// lsp::Position::new(0, 14), +// lsp::Position::new(0, 14), +// ), +// })), +// insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), +// ..Default::default() +// }, +// ]))) +// }) +// .next() +// .await +// .unwrap(); +// cx_a.foreground().finish_waiting(); + +// // Open the buffer on the host. +// let buffer_a = project_a +// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx)) +// .await +// .unwrap(); +// buffer_a +// .condition(&cx_a, |buffer, _| buffer.text() == "fn main() { a. }") +// .await; + +// // Confirm a completion on the guest. +// editor_b +// .condition(&cx_b, |editor, _| editor.context_menu_visible()) +// .await; +// editor_b.update(cx_b, |editor, cx| { +// editor.confirm_completion(&ConfirmCompletion { item_ix: Some(0) }, cx); +// assert_eq!(editor.text(cx), "fn main() { a.first_method() }"); +// }); + +// // Return a resolved completion from the host's language server. +// // The resolved completion has an additional text edit. +// fake_language_server.handle_request::( +// |params, _| async move { +// assert_eq!(params.label, "first_method(…)"); +// Ok(lsp::CompletionItem { +// label: "first_method(…)".into(), +// detail: Some("fn(&mut self, B) -> C".into()), +// text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { +// new_text: "first_method($1)".to_string(), +// range: lsp::Range::new( +// lsp::Position::new(0, 14), +// lsp::Position::new(0, 14), +// ), +// })), +// additional_text_edits: Some(vec![lsp::TextEdit { +// new_text: "use d::SomeTrait;\n".to_string(), +// range: lsp::Range::new(lsp::Position::new(0, 0), lsp::Position::new(0, 0)), +// }]), +// insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), +// ..Default::default() +// }) +// }, +// ); + +// // The additional edit is applied. +// buffer_a +// .condition(&cx_a, |buffer, _| { +// buffer.text() == "use d::SomeTrait;\nfn main() { a.first_method() }" +// }) +// .await; +// buffer_b +// .condition(&cx_b, |buffer, _| { +// buffer.text() == "use d::SomeTrait;\nfn main() { a.first_method() }" +// }) +// .await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_reloading_buffer_manually(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.rs": "let one = 1;", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); +// let buffer_a = project_a +// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx)) +// .await +// .unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// let buffer_b = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) +// .await +// .unwrap(); +// buffer_b.update(cx_b, |buffer, cx| { +// buffer.edit([4..7], "six", cx); +// buffer.edit([10..11], "6", cx); +// assert_eq!(buffer.text(), "let six = 6;"); +// assert!(buffer.is_dirty()); +// assert!(!buffer.has_conflict()); +// }); +// buffer_a +// .condition(cx_a, |buffer, _| buffer.text() == "let six = 6;") +// .await; + +// fs.save(Path::new("/a/a.rs"), &Rope::from("let seven = 7;")) +// .await +// .unwrap(); +// buffer_a +// .condition(cx_a, |buffer, _| buffer.has_conflict()) +// .await; +// buffer_b +// .condition(cx_b, |buffer, _| buffer.has_conflict()) +// .await; + +// project_b +// .update(cx_b, |project, cx| { +// project.reload_buffers(HashSet::from_iter([buffer_b.clone()]), true, cx) +// }) +// .await +// .unwrap(); +// buffer_a.read_with(cx_a, |buffer, _| { +// assert_eq!(buffer.text(), "let seven = 7;"); +// assert!(!buffer.is_dirty()); +// assert!(!buffer.has_conflict()); +// }); +// buffer_b.read_with(cx_b, |buffer, _| { +// assert_eq!(buffer.text(), "let seven = 7;"); +// assert!(!buffer.is_dirty()); +// assert!(!buffer.has_conflict()); +// }); + +// buffer_a.update(cx_a, |buffer, cx| { +// // Undoing on the host is a no-op when the reload was initiated by the guest. +// buffer.undo(cx); +// assert_eq!(buffer.text(), "let seven = 7;"); +// assert!(!buffer.is_dirty()); +// assert!(!buffer.has_conflict()); +// }); +// buffer_b.update(cx_b, |buffer, cx| { +// // Undoing on the guest rolls back the buffer to before it was reloaded but the conflict gets cleared. +// buffer.undo(cx); +// assert_eq!(buffer.text(), "let six = 6;"); +// assert!(buffer.is_dirty()); +// assert!(!buffer.has_conflict()); +// }); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_formatting_buffer(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.rs": "let one = two", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// let buffer_b = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) +// .await +// .unwrap(); + +// let fake_language_server = fake_language_servers.next().await.unwrap(); +// fake_language_server.handle_request::(|_, _| async move { +// Ok(Some(vec![ +// lsp::TextEdit { +// range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 4)), +// new_text: "h".to_string(), +// }, +// lsp::TextEdit { +// range: lsp::Range::new(lsp::Position::new(0, 7), lsp::Position::new(0, 7)), +// new_text: "y".to_string(), +// }, +// ])) +// }); + +// project_b +// .update(cx_b, |project, cx| { +// project.format(HashSet::from_iter([buffer_b.clone()]), true, cx) +// }) +// .await +// .unwrap(); +// assert_eq!( +// buffer_b.read_with(cx_b, |buffer, _| buffer.text()), +// "let honey = two" +// ); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_definition(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// fs.insert_tree( +// "/root-1", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.rs": "const ONE: usize = b::TWO + b::THREE;", +// }), +// ) +// .await; +// fs.insert_tree( +// "/root-2", +// json!({ +// "b.rs": "const TWO: usize = 2;\nconst THREE: usize = 3;", +// }), +// ) +// .await; + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/root-1", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Open the file on client B. +// let buffer_b = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) +// .await +// .unwrap(); + +// // Request the definition of a symbol as the guest. +// let fake_language_server = fake_language_servers.next().await.unwrap(); +// fake_language_server.handle_request::( +// |_, _| async move { +// Ok(Some(lsp::GotoDefinitionResponse::Scalar( +// lsp::Location::new( +// lsp::Url::from_file_path("/root-2/b.rs").unwrap(), +// lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), +// ), +// ))) +// }, +// ); + +// let definitions_1 = project_b +// .update(cx_b, |p, cx| p.definition(&buffer_b, 23, cx)) +// .await +// .unwrap(); +// cx_b.read(|cx| { +// assert_eq!(definitions_1.len(), 1); +// assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); +// let target_buffer = definitions_1[0].buffer.read(cx); +// assert_eq!( +// target_buffer.text(), +// "const TWO: usize = 2;\nconst THREE: usize = 3;" +// ); +// assert_eq!( +// definitions_1[0].range.to_point(target_buffer), +// Point::new(0, 6)..Point::new(0, 9) +// ); +// }); + +// // Try getting more definitions for the same buffer, ensuring the buffer gets reused from +// // the previous call to `definition`. +// fake_language_server.handle_request::( +// |_, _| async move { +// Ok(Some(lsp::GotoDefinitionResponse::Scalar( +// lsp::Location::new( +// lsp::Url::from_file_path("/root-2/b.rs").unwrap(), +// lsp::Range::new(lsp::Position::new(1, 6), lsp::Position::new(1, 11)), +// ), +// ))) +// }, +// ); + +// let definitions_2 = project_b +// .update(cx_b, |p, cx| p.definition(&buffer_b, 33, cx)) +// .await +// .unwrap(); +// cx_b.read(|cx| { +// assert_eq!(definitions_2.len(), 1); +// assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); +// let target_buffer = definitions_2[0].buffer.read(cx); +// assert_eq!( +// target_buffer.text(), +// "const TWO: usize = 2;\nconst THREE: usize = 3;" +// ); +// assert_eq!( +// definitions_2[0].range.to_point(target_buffer), +// Point::new(1, 6)..Point::new(1, 11) +// ); +// }); +// assert_eq!(definitions_1[0].buffer, definitions_2[0].buffer); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_references(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// fs.insert_tree( +// "/root-1", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "one.rs": "const ONE: usize = 1;", +// "two.rs": "const TWO: usize = one::ONE + one::ONE;", +// }), +// ) +// .await; +// fs.insert_tree( +// "/root-2", +// json!({ +// "three.rs": "const THREE: usize = two::TWO + one::ONE;", +// }), +// ) +// .await; + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/root-1", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Open the file on client B. +// let buffer_b = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "one.rs"), cx))) +// .await +// .unwrap(); + +// // Request references to a symbol as the guest. +// let fake_language_server = fake_language_servers.next().await.unwrap(); +// fake_language_server.handle_request::( +// |params, _| async move { +// assert_eq!( +// params.text_document_position.text_document.uri.as_str(), +// "file:///root-1/one.rs" +// ); +// Ok(Some(vec![ +// lsp::Location { +// uri: lsp::Url::from_file_path("/root-1/two.rs").unwrap(), +// range: lsp::Range::new( +// lsp::Position::new(0, 24), +// lsp::Position::new(0, 27), +// ), +// }, +// lsp::Location { +// uri: lsp::Url::from_file_path("/root-1/two.rs").unwrap(), +// range: lsp::Range::new( +// lsp::Position::new(0, 35), +// lsp::Position::new(0, 38), +// ), +// }, +// lsp::Location { +// uri: lsp::Url::from_file_path("/root-2/three.rs").unwrap(), +// range: lsp::Range::new( +// lsp::Position::new(0, 37), +// lsp::Position::new(0, 40), +// ), +// }, +// ])) +// }, +// ); + +// let references = project_b +// .update(cx_b, |p, cx| p.references(&buffer_b, 7, cx)) +// .await +// .unwrap(); +// cx_b.read(|cx| { +// assert_eq!(references.len(), 3); +// assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); + +// let two_buffer = references[0].buffer.read(cx); +// let three_buffer = references[2].buffer.read(cx); +// assert_eq!( +// two_buffer.file().unwrap().path().as_ref(), +// Path::new("two.rs") +// ); +// assert_eq!(references[1].buffer, references[0].buffer); +// assert_eq!( +// three_buffer.file().unwrap().full_path(cx), +// Path::new("three.rs") +// ); + +// assert_eq!(references[0].range.to_offset(&two_buffer), 24..27); +// assert_eq!(references[1].range.to_offset(&two_buffer), 35..38); +// assert_eq!(references[2].range.to_offset(&three_buffer), 37..40); +// }); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_project_search(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// fs.insert_tree( +// "/root-1", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a": "hello world", +// "b": "goodnight moon", +// "c": "a world of goo", +// "d": "world champion of clown world", +// }), +// ) +// .await; +// fs.insert_tree( +// "/root-2", +// json!({ +// "e": "disney world is fun", +// }), +// ) +// .await; + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + +// let (worktree_1, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/root-1", true, cx) +// }) +// .await +// .unwrap(); +// worktree_1 +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let (worktree_2, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/root-2", true, cx) +// }) +// .await +// .unwrap(); +// worktree_2 +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; + +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// let results = project_b +// .update(cx_b, |project, cx| { +// project.search(SearchQuery::text("world", false, false), cx) +// }) +// .await +// .unwrap(); + +// let mut ranges_by_path = results +// .into_iter() +// .map(|(buffer, ranges)| { +// buffer.read_with(cx_b, |buffer, cx| { +// let path = buffer.file().unwrap().full_path(cx); +// let offset_ranges = ranges +// .into_iter() +// .map(|range| range.to_offset(buffer)) +// .collect::>(); +// (path, offset_ranges) +// }) +// }) +// .collect::>(); +// ranges_by_path.sort_by_key(|(path, _)| path.clone()); + +// assert_eq!( +// ranges_by_path, +// &[ +// (PathBuf::from("root-1/a"), vec![6..11]), +// (PathBuf::from("root-1/c"), vec![2..7]), +// (PathBuf::from("root-1/d"), vec![0..5, 24..29]), +// (PathBuf::from("root-2/e"), vec![7..12]), +// ] +// ); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_document_highlights(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// fs.insert_tree( +// "/root-1", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "main.rs": "fn double(number: i32) -> i32 { number + number }", +// }), +// ) +// .await; + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/root-1", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Open the file on client B. +// let buffer_b = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx))) +// .await +// .unwrap(); + +// // Request document highlights as the guest. +// let fake_language_server = fake_language_servers.next().await.unwrap(); +// fake_language_server.handle_request::( +// |params, _| async move { +// assert_eq!( +// params +// .text_document_position_params +// .text_document +// .uri +// .as_str(), +// "file:///root-1/main.rs" +// ); +// assert_eq!( +// params.text_document_position_params.position, +// lsp::Position::new(0, 34) +// ); +// Ok(Some(vec![ +// lsp::DocumentHighlight { +// kind: Some(lsp::DocumentHighlightKind::WRITE), +// range: lsp::Range::new( +// lsp::Position::new(0, 10), +// lsp::Position::new(0, 16), +// ), +// }, +// lsp::DocumentHighlight { +// kind: Some(lsp::DocumentHighlightKind::READ), +// range: lsp::Range::new( +// lsp::Position::new(0, 32), +// lsp::Position::new(0, 38), +// ), +// }, +// lsp::DocumentHighlight { +// kind: Some(lsp::DocumentHighlightKind::READ), +// range: lsp::Range::new( +// lsp::Position::new(0, 41), +// lsp::Position::new(0, 47), +// ), +// }, +// ])) +// }, +// ); + +// let highlights = project_b +// .update(cx_b, |p, cx| p.document_highlights(&buffer_b, 34, cx)) +// .await +// .unwrap(); +// buffer_b.read_with(cx_b, |buffer, _| { +// let snapshot = buffer.snapshot(); + +// let highlights = highlights +// .into_iter() +// .map(|highlight| (highlight.kind, highlight.range.to_offset(&snapshot))) +// .collect::>(); +// assert_eq!( +// highlights, +// &[ +// (lsp::DocumentHighlightKind::WRITE, 10..16), +// (lsp::DocumentHighlightKind::READ, 32..38), +// (lsp::DocumentHighlightKind::READ, 41..47) +// ] +// ) +// }); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_project_symbols(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// fs.insert_tree( +// "/code", +// json!({ +// "crate-1": { +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "one.rs": "const ONE: usize = 1;", +// }, +// "crate-2": { +// "two.rs": "const TWO: usize = 2; const THREE: usize = 3;", +// }, +// "private": { +// "passwords.txt": "the-password", +// } +// }), +// ) +// .await; + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/code/crate-1", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// // Cause the language server to start. +// let _buffer = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "one.rs"), cx))) +// .await +// .unwrap(); + +// let fake_language_server = fake_language_servers.next().await.unwrap(); +// fake_language_server.handle_request::( +// |_, _| async move { +// #[allow(deprecated)] +// Ok(Some(vec![lsp::SymbolInformation { +// name: "TWO".into(), +// location: lsp::Location { +// uri: lsp::Url::from_file_path("/code/crate-2/two.rs").unwrap(), +// range: lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), +// }, +// kind: lsp::SymbolKind::CONSTANT, +// tags: None, +// container_name: None, +// deprecated: None, +// }])) +// }, +// ); + +// // Request the definition of a symbol as the guest. +// let symbols = project_b +// .update(cx_b, |p, cx| p.symbols("two", cx)) +// .await +// .unwrap(); +// assert_eq!(symbols.len(), 1); +// assert_eq!(symbols[0].name, "TWO"); + +// // Open one of the returned symbols. +// let buffer_b_2 = project_b +// .update(cx_b, |project, cx| { +// project.open_buffer_for_symbol(&symbols[0], cx) +// }) +// .await +// .unwrap(); +// buffer_b_2.read_with(cx_b, |buffer, _| { +// assert_eq!( +// buffer.file().unwrap().path().as_ref(), +// Path::new("../crate-2/two.rs") +// ); +// }); + +// // Attempt to craft a symbol and violate host's privacy by opening an arbitrary file. +// let mut fake_symbol = symbols[0].clone(); +// fake_symbol.path = Path::new("/code/secrets").into(); +// let error = project_b +// .update(cx_b, |project, cx| { +// project.open_buffer_for_symbol(&fake_symbol, cx) +// }) +// .await +// .unwrap_err(); +// assert!(error.to_string().contains("invalid symbol signature")); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_open_buffer_while_getting_definition_pointing_to_it( +// cx_a: &mut TestAppContext, +// cx_b: &mut TestAppContext, +// mut rng: StdRng, +// ) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// fs.insert_tree( +// "/root", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "a.rs": "const ONE: usize = b::TWO;", +// "b.rs": "const TWO: usize = 2", +// }), +// ) +// .await; + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); + +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/root", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// let buffer_b1 = cx_b +// .background() +// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) +// .await +// .unwrap(); + +// let fake_language_server = fake_language_servers.next().await.unwrap(); +// fake_language_server.handle_request::( +// |_, _| async move { +// Ok(Some(lsp::GotoDefinitionResponse::Scalar( +// lsp::Location::new( +// lsp::Url::from_file_path("/root/b.rs").unwrap(), +// lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), +// ), +// ))) +// }, +// ); + +// let definitions; +// let buffer_b2; +// if rng.gen() { +// definitions = project_b.update(cx_b, |p, cx| p.definition(&buffer_b1, 23, cx)); +// buffer_b2 = project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.rs"), cx)); +// } else { +// buffer_b2 = project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.rs"), cx)); +// definitions = project_b.update(cx_b, |p, cx| p.definition(&buffer_b1, 23, cx)); +// } + +// let buffer_b2 = buffer_b2.await.unwrap(); +// let definitions = definitions.await.unwrap(); +// assert_eq!(definitions.len(), 1); +// assert_eq!(definitions[0].buffer, buffer_b2); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_collaborating_with_code_actions( +// cx_a: &mut TestAppContext, +// cx_b: &mut TestAppContext, +// ) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// cx_b.update(|cx| editor::init(cx)); + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "main.rs": "mod other;\nfn main() { let foo = other::foo(); }", +// "other.rs": "pub fn foo() -> usize { 4 }", +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); +// let mut params = cx_b.update(WorkspaceParams::test); +// params.languages = lang_registry.clone(); +// params.client = client_b.client.clone(); +// params.user_store = client_b.user_store.clone(); +// params.project = project_b; + +// let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(¶ms, cx)); +// let editor_b = workspace_b +// .update(cx_b, |workspace, cx| { +// workspace.open_path((worktree_id, "main.rs"), cx) +// }) +// .await +// .unwrap() +// .downcast::() +// .unwrap(); + +// let mut fake_language_server = fake_language_servers.next().await.unwrap(); +// fake_language_server +// .handle_request::(|params, _| async move { +// assert_eq!( +// params.text_document.uri, +// lsp::Url::from_file_path("/a/main.rs").unwrap(), +// ); +// assert_eq!(params.range.start, lsp::Position::new(0, 0)); +// assert_eq!(params.range.end, lsp::Position::new(0, 0)); +// Ok(None) +// }) +// .next() +// .await; + +// // Move cursor to a location that contains code actions. +// editor_b.update(cx_b, |editor, cx| { +// editor.select_ranges([Point::new(1, 31)..Point::new(1, 31)], None, cx); +// cx.focus(&editor_b); +// }); + +// fake_language_server +// .handle_request::(|params, _| async move { +// assert_eq!( +// params.text_document.uri, +// lsp::Url::from_file_path("/a/main.rs").unwrap(), +// ); +// assert_eq!(params.range.start, lsp::Position::new(1, 31)); +// assert_eq!(params.range.end, lsp::Position::new(1, 31)); + +// Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( +// lsp::CodeAction { +// title: "Inline into all callers".to_string(), +// edit: Some(lsp::WorkspaceEdit { +// changes: Some( +// [ +// ( +// lsp::Url::from_file_path("/a/main.rs").unwrap(), +// vec![lsp::TextEdit::new( +// lsp::Range::new( +// lsp::Position::new(1, 22), +// lsp::Position::new(1, 34), +// ), +// "4".to_string(), +// )], +// ), +// ( +// lsp::Url::from_file_path("/a/other.rs").unwrap(), +// vec![lsp::TextEdit::new( +// lsp::Range::new( +// lsp::Position::new(0, 0), +// lsp::Position::new(0, 27), +// ), +// "".to_string(), +// )], +// ), +// ] +// .into_iter() +// .collect(), +// ), +// ..Default::default() +// }), +// data: Some(json!({ +// "codeActionParams": { +// "range": { +// "start": {"line": 1, "column": 31}, +// "end": {"line": 1, "column": 31}, +// } +// } +// })), +// ..Default::default() +// }, +// )])) +// }) +// .next() +// .await; + +// // Toggle code actions and wait for them to display. +// editor_b.update(cx_b, |editor, cx| { +// editor.toggle_code_actions( +// &ToggleCodeActions { +// deployed_from_indicator: false, +// }, +// cx, +// ); +// }); +// editor_b +// .condition(&cx_b, |editor, _| editor.context_menu_visible()) +// .await; + +// fake_language_server.remove_request_handler::(); + +// // Confirming the code action will trigger a resolve request. +// let confirm_action = workspace_b +// .update(cx_b, |workspace, cx| { +// Editor::confirm_code_action(workspace, &ConfirmCodeAction { item_ix: Some(0) }, cx) +// }) +// .unwrap(); +// fake_language_server.handle_request::( +// |_, _| async move { +// Ok(lsp::CodeAction { +// title: "Inline into all callers".to_string(), +// edit: Some(lsp::WorkspaceEdit { +// changes: Some( +// [ +// ( +// lsp::Url::from_file_path("/a/main.rs").unwrap(), +// vec![lsp::TextEdit::new( +// lsp::Range::new( +// lsp::Position::new(1, 22), +// lsp::Position::new(1, 34), +// ), +// "4".to_string(), +// )], +// ), +// ( +// lsp::Url::from_file_path("/a/other.rs").unwrap(), +// vec![lsp::TextEdit::new( +// lsp::Range::new( +// lsp::Position::new(0, 0), +// lsp::Position::new(0, 27), +// ), +// "".to_string(), +// )], +// ), +// ] +// .into_iter() +// .collect(), +// ), +// ..Default::default() +// }), +// ..Default::default() +// }) +// }, +// ); + +// // After the action is confirmed, an editor containing both modified files is opened. +// confirm_action.await.unwrap(); +// let code_action_editor = workspace_b.read_with(cx_b, |workspace, cx| { +// workspace +// .active_item(cx) +// .unwrap() +// .downcast::() +// .unwrap() +// }); +// code_action_editor.update(cx_b, |editor, cx| { +// assert_eq!(editor.text(cx), "\nmod other;\nfn main() { let foo = 4; }"); +// editor.undo(&Undo, cx); +// assert_eq!( +// editor.text(cx), +// "pub fn foo() -> usize { 4 }\nmod other;\nfn main() { let foo = other::foo(); }" +// ); +// editor.redo(&Redo, cx); +// assert_eq!(editor.text(cx), "\nmod other;\nfn main() { let foo = 4; }"); +// }); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); +// cx_b.update(|cx| editor::init(cx)); + +// // Set up a fake language server. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// Some(tree_sitter_rust::language()), +// ); +// let mut fake_language_servers = language.set_fake_lsp_adapter(FakeLspAdapter { +// capabilities: lsp::ServerCapabilities { +// rename_provider: Some(lsp::OneOf::Right(lsp::RenameOptions { +// prepare_provider: Some(true), +// work_done_progress_options: Default::default(), +// })), +// ..Default::default() +// }, +// ..Default::default() +// }); +// lang_registry.add(Arc::new(language)); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Share a project as client A +// fs.insert_tree( +// "/dir", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "one.rs": "const ONE: usize = 1;", +// "two.rs": "const TWO: usize = one::ONE + one::ONE;" +// }), +// ) +// .await; +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/dir", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; +// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); +// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + +// // Join the worktree as client B. +// let project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); +// let mut params = cx_b.update(WorkspaceParams::test); +// params.languages = lang_registry.clone(); +// params.client = client_b.client.clone(); +// params.user_store = client_b.user_store.clone(); +// params.project = project_b; + +// let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(¶ms, cx)); +// let editor_b = workspace_b +// .update(cx_b, |workspace, cx| { +// workspace.open_path((worktree_id, "one.rs"), cx) +// }) +// .await +// .unwrap() +// .downcast::() +// .unwrap(); +// let fake_language_server = fake_language_servers.next().await.unwrap(); + +// // Move cursor to a location that can be renamed. +// let prepare_rename = editor_b.update(cx_b, |editor, cx| { +// editor.select_ranges([7..7], None, cx); +// editor.rename(&Rename, cx).unwrap() +// }); + +// fake_language_server +// .handle_request::(|params, _| async move { +// assert_eq!(params.text_document.uri.as_str(), "file:///dir/one.rs"); +// assert_eq!(params.position, lsp::Position::new(0, 7)); +// Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( +// lsp::Position::new(0, 6), +// lsp::Position::new(0, 9), +// )))) +// }) +// .next() +// .await +// .unwrap(); +// prepare_rename.await.unwrap(); +// editor_b.update(cx_b, |editor, cx| { +// let rename = editor.pending_rename().unwrap(); +// let buffer = editor.buffer().read(cx).snapshot(cx); +// assert_eq!( +// rename.range.start.to_offset(&buffer)..rename.range.end.to_offset(&buffer), +// 6..9 +// ); +// rename.editor.update(cx, |rename_editor, cx| { +// rename_editor.buffer().update(cx, |rename_buffer, cx| { +// rename_buffer.edit([0..3], "THREE", cx); +// }); +// }); +// }); + +// let confirm_rename = workspace_b.update(cx_b, |workspace, cx| { +// Editor::confirm_rename(workspace, &ConfirmRename, cx).unwrap() +// }); +// fake_language_server +// .handle_request::(|params, _| async move { +// assert_eq!( +// params.text_document_position.text_document.uri.as_str(), +// "file:///dir/one.rs" +// ); +// assert_eq!( +// params.text_document_position.position, +// lsp::Position::new(0, 6) +// ); +// assert_eq!(params.new_name, "THREE"); +// Ok(Some(lsp::WorkspaceEdit { +// changes: Some( +// [ +// ( +// lsp::Url::from_file_path("/dir/one.rs").unwrap(), +// vec![lsp::TextEdit::new( +// lsp::Range::new( +// lsp::Position::new(0, 6), +// lsp::Position::new(0, 9), +// ), +// "THREE".to_string(), +// )], +// ), +// ( +// lsp::Url::from_file_path("/dir/two.rs").unwrap(), +// vec![ +// lsp::TextEdit::new( +// lsp::Range::new( +// lsp::Position::new(0, 24), +// lsp::Position::new(0, 27), +// ), +// "THREE".to_string(), +// ), +// lsp::TextEdit::new( +// lsp::Range::new( +// lsp::Position::new(0, 35), +// lsp::Position::new(0, 38), +// ), +// "THREE".to_string(), +// ), +// ], +// ), +// ] +// .into_iter() +// .collect(), +// ), +// ..Default::default() +// })) +// }) +// .next() +// .await +// .unwrap(); +// confirm_rename.await.unwrap(); + +// let rename_editor = workspace_b.read_with(cx_b, |workspace, cx| { +// workspace +// .active_item(cx) +// .unwrap() +// .downcast::() +// .unwrap() +// }); +// rename_editor.update(cx_b, |editor, cx| { +// assert_eq!( +// editor.text(cx), +// "const TWO: usize = one::THREE + one::THREE;\nconst THREE: usize = 1;" +// ); +// editor.undo(&Undo, cx); +// assert_eq!( +// editor.text(cx), +// "const TWO: usize = one::ONE + one::ONE;\nconst ONE: usize = 1;" +// ); +// editor.redo(&Redo, cx); +// assert_eq!( +// editor.text(cx), +// "const TWO: usize = one::THREE + one::THREE;\nconst THREE: usize = 1;" +// ); +// }); + +// // Ensure temporary rename edits cannot be undone/redone. +// editor_b.update(cx_b, |editor, cx| { +// editor.undo(&Undo, cx); +// assert_eq!(editor.text(cx), "const ONE: usize = 1;"); +// editor.undo(&Undo, cx); +// assert_eq!(editor.text(cx), "const ONE: usize = 1;"); +// editor.redo(&Redo, cx); +// assert_eq!(editor.text(cx), "const THREE: usize = 1;"); +// }) +// } + +// #[gpui::test(iterations = 10)] +// async fn test_basic_chat(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; + +// // Create an org that includes these 2 users. +// let db = &server.app_state.db; +// let org_id = db.create_org("Test Org", "test-org").await.unwrap(); +// db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) +// .await +// .unwrap(); +// db.add_org_member(org_id, client_b.current_user_id(&cx_b), false) +// .await +// .unwrap(); + +// // Create a channel that includes all the users. +// let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); +// db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) +// .await +// .unwrap(); +// db.add_channel_member(channel_id, client_b.current_user_id(&cx_b), false) +// .await +// .unwrap(); +// db.create_channel_message( +// channel_id, +// client_b.current_user_id(&cx_b), +// "hello A, it's B.", +// OffsetDateTime::now_utc(), +// 1, +// ) +// .await +// .unwrap(); + +// let channels_a = cx_a +// .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); +// channels_a +// .condition(cx_a, |list, _| list.available_channels().is_some()) +// .await; +// channels_a.read_with(cx_a, |list, _| { +// assert_eq!( +// list.available_channels().unwrap(), +// &[ChannelDetails { +// id: channel_id.to_proto(), +// name: "test-channel".to_string() +// }] +// ) +// }); +// let channel_a = channels_a.update(cx_a, |this, cx| { +// this.get_channel(channel_id.to_proto(), cx).unwrap() +// }); +// channel_a.read_with(cx_a, |channel, _| assert!(channel.messages().is_empty())); +// channel_a +// .condition(&cx_a, |channel, _| { +// channel_messages(channel) +// == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] +// }) +// .await; + +// let channels_b = cx_b +// .add_model(|cx| ChannelList::new(client_b.user_store.clone(), client_b.clone(), cx)); +// channels_b +// .condition(cx_b, |list, _| list.available_channels().is_some()) +// .await; +// channels_b.read_with(cx_b, |list, _| { +// assert_eq!( +// list.available_channels().unwrap(), +// &[ChannelDetails { +// id: channel_id.to_proto(), +// name: "test-channel".to_string() +// }] +// ) +// }); + +// let channel_b = channels_b.update(cx_b, |this, cx| { +// this.get_channel(channel_id.to_proto(), cx).unwrap() +// }); +// channel_b.read_with(cx_b, |channel, _| assert!(channel.messages().is_empty())); +// channel_b +// .condition(&cx_b, |channel, _| { +// channel_messages(channel) +// == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] +// }) +// .await; + +// channel_a +// .update(cx_a, |channel, cx| { +// channel +// .send_message("oh, hi B.".to_string(), cx) +// .unwrap() +// .detach(); +// let task = channel.send_message("sup".to_string(), cx).unwrap(); +// assert_eq!( +// channel_messages(channel), +// &[ +// ("user_b".to_string(), "hello A, it's B.".to_string(), false), +// ("user_a".to_string(), "oh, hi B.".to_string(), true), +// ("user_a".to_string(), "sup".to_string(), true) +// ] +// ); +// task +// }) +// .await +// .unwrap(); + +// channel_b +// .condition(&cx_b, |channel, _| { +// channel_messages(channel) +// == [ +// ("user_b".to_string(), "hello A, it's B.".to_string(), false), +// ("user_a".to_string(), "oh, hi B.".to_string(), false), +// ("user_a".to_string(), "sup".to_string(), false), +// ] +// }) +// .await; + +// assert_eq!( +// server +// .state() +// .await +// .channel(channel_id) +// .unwrap() +// .connection_ids +// .len(), +// 2 +// ); +// cx_b.update(|_| drop(channel_b)); +// server +// .condition(|state| state.channel(channel_id).unwrap().connection_ids.len() == 1) +// .await; + +// cx_a.update(|_| drop(channel_a)); +// server +// .condition(|state| state.channel(channel_id).is_none()) +// .await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_chat_message_validation(cx_a: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); + +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; + +// let db = &server.app_state.db; +// let org_id = db.create_org("Test Org", "test-org").await.unwrap(); +// let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); +// db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) +// .await +// .unwrap(); +// db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) +// .await +// .unwrap(); + +// let channels_a = cx_a +// .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); +// channels_a +// .condition(cx_a, |list, _| list.available_channels().is_some()) +// .await; +// let channel_a = channels_a.update(cx_a, |this, cx| { +// this.get_channel(channel_id.to_proto(), cx).unwrap() +// }); + +// // Messages aren't allowed to be too long. +// channel_a +// .update(cx_a, |channel, cx| { +// let long_body = "this is long.\n".repeat(1024); +// channel.send_message(long_body, cx).unwrap() +// }) +// .await +// .unwrap_err(); + +// // Messages aren't allowed to be blank. +// channel_a.update(cx_a, |channel, cx| { +// channel.send_message(String::new(), cx).unwrap_err() +// }); + +// // Leading and trailing whitespace are trimmed. +// channel_a +// .update(cx_a, |channel, cx| { +// channel +// .send_message("\n surrounded by whitespace \n".to_string(), cx) +// .unwrap() +// }) +// .await +// .unwrap(); +// assert_eq!( +// db.get_channel_messages(channel_id, 10, None) +// .await +// .unwrap() +// .iter() +// .map(|m| &m.body) +// .collect::>(), +// &["surrounded by whitespace"] +// ); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_chat_reconnection(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); + +// // Connect to a server as 2 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; +// let mut status_b = client_b.status(); + +// // Create an org that includes these 2 users. +// let db = &server.app_state.db; +// let org_id = db.create_org("Test Org", "test-org").await.unwrap(); +// db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) +// .await +// .unwrap(); +// db.add_org_member(org_id, client_b.current_user_id(&cx_b), false) +// .await +// .unwrap(); + +// // Create a channel that includes all the users. +// let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); +// db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) +// .await +// .unwrap(); +// db.add_channel_member(channel_id, client_b.current_user_id(&cx_b), false) +// .await +// .unwrap(); +// db.create_channel_message( +// channel_id, +// client_b.current_user_id(&cx_b), +// "hello A, it's B.", +// OffsetDateTime::now_utc(), +// 2, +// ) +// .await +// .unwrap(); + +// let channels_a = cx_a +// .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); +// channels_a +// .condition(cx_a, |list, _| list.available_channels().is_some()) +// .await; + +// channels_a.read_with(cx_a, |list, _| { +// assert_eq!( +// list.available_channels().unwrap(), +// &[ChannelDetails { +// id: channel_id.to_proto(), +// name: "test-channel".to_string() +// }] +// ) +// }); +// let channel_a = channels_a.update(cx_a, |this, cx| { +// this.get_channel(channel_id.to_proto(), cx).unwrap() +// }); +// channel_a.read_with(cx_a, |channel, _| assert!(channel.messages().is_empty())); +// channel_a +// .condition(&cx_a, |channel, _| { +// channel_messages(channel) +// == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] +// }) +// .await; + +// let channels_b = cx_b +// .add_model(|cx| ChannelList::new(client_b.user_store.clone(), client_b.clone(), cx)); +// channels_b +// .condition(cx_b, |list, _| list.available_channels().is_some()) +// .await; +// channels_b.read_with(cx_b, |list, _| { +// assert_eq!( +// list.available_channels().unwrap(), +// &[ChannelDetails { +// id: channel_id.to_proto(), +// name: "test-channel".to_string() +// }] +// ) +// }); + +// let channel_b = channels_b.update(cx_b, |this, cx| { +// this.get_channel(channel_id.to_proto(), cx).unwrap() +// }); +// channel_b.read_with(cx_b, |channel, _| assert!(channel.messages().is_empty())); +// channel_b +// .condition(&cx_b, |channel, _| { +// channel_messages(channel) +// == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] +// }) +// .await; + +// // Disconnect client B, ensuring we can still access its cached channel data. +// server.forbid_connections(); +// server.disconnect_client(client_b.current_user_id(&cx_b)); +// cx_b.foreground().advance_clock(Duration::from_secs(3)); +// while !matches!( +// status_b.next().await, +// Some(client::Status::ReconnectionError { .. }) +// ) {} + +// channels_b.read_with(cx_b, |channels, _| { +// assert_eq!( +// channels.available_channels().unwrap(), +// [ChannelDetails { +// id: channel_id.to_proto(), +// name: "test-channel".to_string() +// }] +// ) +// }); +// channel_b.read_with(cx_b, |channel, _| { +// assert_eq!( +// channel_messages(channel), +// [("user_b".to_string(), "hello A, it's B.".to_string(), false)] +// ) +// }); + +// // Send a message from client B while it is disconnected. +// channel_b +// .update(cx_b, |channel, cx| { +// let task = channel +// .send_message("can you see this?".to_string(), cx) +// .unwrap(); +// assert_eq!( +// channel_messages(channel), +// &[ +// ("user_b".to_string(), "hello A, it's B.".to_string(), false), +// ("user_b".to_string(), "can you see this?".to_string(), true) +// ] +// ); +// task +// }) +// .await +// .unwrap_err(); + +// // Send a message from client A while B is disconnected. +// channel_a +// .update(cx_a, |channel, cx| { +// channel +// .send_message("oh, hi B.".to_string(), cx) +// .unwrap() +// .detach(); +// let task = channel.send_message("sup".to_string(), cx).unwrap(); +// assert_eq!( +// channel_messages(channel), +// &[ +// ("user_b".to_string(), "hello A, it's B.".to_string(), false), +// ("user_a".to_string(), "oh, hi B.".to_string(), true), +// ("user_a".to_string(), "sup".to_string(), true) +// ] +// ); +// task +// }) +// .await +// .unwrap(); + +// // Give client B a chance to reconnect. +// server.allow_connections(); +// cx_b.foreground().advance_clock(Duration::from_secs(10)); + +// // Verify that B sees the new messages upon reconnection, as well as the message client B +// // sent while offline. +// channel_b +// .condition(&cx_b, |channel, _| { +// channel_messages(channel) +// == [ +// ("user_b".to_string(), "hello A, it's B.".to_string(), false), +// ("user_a".to_string(), "oh, hi B.".to_string(), false), +// ("user_a".to_string(), "sup".to_string(), false), +// ("user_b".to_string(), "can you see this?".to_string(), false), +// ] +// }) +// .await; + +// // Ensure client A and B can communicate normally after reconnection. +// channel_a +// .update(cx_a, |channel, cx| { +// channel.send_message("you online?".to_string(), cx).unwrap() +// }) +// .await +// .unwrap(); +// channel_b +// .condition(&cx_b, |channel, _| { +// channel_messages(channel) +// == [ +// ("user_b".to_string(), "hello A, it's B.".to_string(), false), +// ("user_a".to_string(), "oh, hi B.".to_string(), false), +// ("user_a".to_string(), "sup".to_string(), false), +// ("user_b".to_string(), "can you see this?".to_string(), false), +// ("user_a".to_string(), "you online?".to_string(), false), +// ] +// }) +// .await; + +// channel_b +// .update(cx_b, |channel, cx| { +// channel.send_message("yep".to_string(), cx).unwrap() +// }) +// .await +// .unwrap(); +// channel_a +// .condition(&cx_a, |channel, _| { +// channel_messages(channel) +// == [ +// ("user_b".to_string(), "hello A, it's B.".to_string(), false), +// ("user_a".to_string(), "oh, hi B.".to_string(), false), +// ("user_a".to_string(), "sup".to_string(), false), +// ("user_b".to_string(), "can you see this?".to_string(), false), +// ("user_a".to_string(), "you online?".to_string(), false), +// ("user_b".to_string(), "yep".to_string(), false), +// ] +// }) +// .await; +// } + +// #[gpui::test(iterations = 10)] +// async fn test_contacts( +// cx_a: &mut TestAppContext, +// cx_b: &mut TestAppContext, +// cx_c: &mut TestAppContext, +// ) { +// cx_a.foreground().forbid_parking(); +// let lang_registry = Arc::new(LanguageRegistry::test()); +// let fs = FakeFs::new(cx_a.background()); + +// // Connect to a server as 3 clients. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let client_a = server.create_client(cx_a, "user_a").await; +// let client_b = server.create_client(cx_b, "user_b").await; +// let client_c = server.create_client(cx_c, "user_c").await; + +// // Share a worktree as client A. +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, +// }), +// ) +// .await; + +// let project_a = cx_a.update(|cx| { +// Project::local( +// client_a.clone(), +// client_a.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let (worktree_a, _) = project_a +// .update(cx_a, |p, cx| { +// p.find_or_create_local_worktree("/a", true, cx) +// }) +// .await +// .unwrap(); +// worktree_a +// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; + +// client_a +// .user_store +// .condition(&cx_a, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] +// }) +// .await; +// client_b +// .user_store +// .condition(&cx_b, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] +// }) +// .await; +// client_c +// .user_store +// .condition(&cx_c, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] +// }) +// .await; + +// let project_id = project_a +// .update(cx_a, |project, _| project.next_remote_id()) +// .await; +// project_a +// .update(cx_a, |project, cx| project.share(cx)) +// .await +// .unwrap(); +// client_a +// .user_store +// .condition(&cx_a, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] +// }) +// .await; +// client_b +// .user_store +// .condition(&cx_b, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] +// }) +// .await; +// client_c +// .user_store +// .condition(&cx_c, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] +// }) +// .await; + +// let _project_b = Project::remote( +// project_id, +// client_b.clone(), +// client_b.user_store.clone(), +// lang_registry.clone(), +// fs.clone(), +// &mut cx_b.to_async(), +// ) +// .await +// .unwrap(); + +// client_a +// .user_store +// .condition(&cx_a, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] +// }) +// .await; +// client_b +// .user_store +// .condition(&cx_b, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] +// }) +// .await; +// client_c +// .user_store +// .condition(&cx_c, |user_store, _| { +// contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] +// }) +// .await; + +// project_a +// .condition(&cx_a, |project, _| { +// project.collaborators().contains_key(&client_b.peer_id) +// }) +// .await; + +// cx_a.update(move |_| drop(project_a)); +// client_a +// .user_store +// .condition(&cx_a, |user_store, _| contacts(user_store) == vec![]) +// .await; +// client_b +// .user_store +// .condition(&cx_b, |user_store, _| contacts(user_store) == vec![]) +// .await; +// client_c +// .user_store +// .condition(&cx_c, |user_store, _| contacts(user_store) == vec![]) +// .await; + +// fn contacts(user_store: &UserStore) -> Vec<(&str, Vec<(&str, bool, Vec<&str>)>)> { +// user_store +// .contacts() +// .iter() +// .map(|contact| { +// let worktrees = contact +// .projects +// .iter() +// .map(|p| { +// ( +// p.worktree_root_names[0].as_str(), +// p.is_shared, +// p.guests.iter().map(|p| p.github_login.as_str()).collect(), +// ) +// }) +// .collect(); +// (contact.user.github_login.as_str(), worktrees) +// }) +// .collect() +// } +// } + +// #[gpui::test(iterations = 10)] +// async fn test_following(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let fs = FakeFs::new(cx_a.background()); + +// // 2 clients connect to a server. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let mut client_a = server.create_client(cx_a, "user_a").await; +// let mut client_b = server.create_client(cx_b, "user_b").await; +// cx_a.update(editor::init); +// cx_b.update(editor::init); + +// // Client A shares a project. +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "1.txt": "one", +// "2.txt": "two", +// "3.txt": "three", +// }), +// ) +// .await; +// let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; +// project_a +// .update(cx_a, |project, cx| project.share(cx)) +// .await +// .unwrap(); + +// // Client B joins the project. +// let project_b = client_b +// .build_remote_project( +// project_a +// .read_with(cx_a, |project, _| project.remote_id()) +// .unwrap(), +// cx_b, +// ) +// .await; + +// // Client A opens some editors. +// let workspace_a = client_a.build_workspace(&project_a, cx_a); +// let pane_a = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); +// let editor_a1 = workspace_a +// .update(cx_a, |workspace, cx| { +// workspace.open_path((worktree_id, "1.txt"), cx) +// }) +// .await +// .unwrap() +// .downcast::() +// .unwrap(); +// let editor_a2 = workspace_a +// .update(cx_a, |workspace, cx| { +// workspace.open_path((worktree_id, "2.txt"), cx) +// }) +// .await +// .unwrap() +// .downcast::() +// .unwrap(); + +// // Client B opens an editor. +// let workspace_b = client_b.build_workspace(&project_b, cx_b); +// let editor_b1 = workspace_b +// .update(cx_b, |workspace, cx| { +// workspace.open_path((worktree_id, "1.txt"), cx) +// }) +// .await +// .unwrap() +// .downcast::() +// .unwrap(); + +// let client_a_id = project_b.read_with(cx_b, |project, _| { +// project.collaborators().values().next().unwrap().peer_id +// }); +// let client_b_id = project_a.read_with(cx_a, |project, _| { +// project.collaborators().values().next().unwrap().peer_id +// }); + +// // When client B starts following client A, all visible view states are replicated to client B. +// editor_a1.update(cx_a, |editor, cx| editor.select_ranges([0..1], None, cx)); +// editor_a2.update(cx_a, |editor, cx| editor.select_ranges([2..3], None, cx)); +// workspace_b +// .update(cx_b, |workspace, cx| { +// workspace +// .toggle_follow(&ToggleFollow(client_a_id), cx) +// .unwrap() +// }) +// .await +// .unwrap(); +// let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| { +// workspace +// .active_item(cx) +// .unwrap() +// .downcast::() +// .unwrap() +// }); +// assert!(cx_b.read(|cx| editor_b2.is_focused(cx))); +// assert_eq!( +// editor_b2.read_with(cx_b, |editor, cx| editor.project_path(cx)), +// Some((worktree_id, "2.txt").into()) +// ); +// assert_eq!( +// editor_b2.read_with(cx_b, |editor, cx| editor.selected_ranges(cx)), +// vec![2..3] +// ); +// assert_eq!( +// editor_b1.read_with(cx_b, |editor, cx| editor.selected_ranges(cx)), +// vec![0..1] +// ); + +// // When client A activates a different editor, client B does so as well. +// workspace_a.update(cx_a, |workspace, cx| { +// workspace.activate_item(&editor_a1, cx) +// }); +// workspace_b +// .condition(cx_b, |workspace, cx| { +// workspace.active_item(cx).unwrap().id() == editor_b1.id() +// }) +// .await; + +// // When client A navigates back and forth, client B does so as well. +// workspace_a +// .update(cx_a, |workspace, cx| { +// workspace::Pane::go_back(workspace, None, cx) +// }) +// .await; +// workspace_b +// .condition(cx_b, |workspace, cx| { +// workspace.active_item(cx).unwrap().id() == editor_b2.id() +// }) +// .await; + +// workspace_a +// .update(cx_a, |workspace, cx| { +// workspace::Pane::go_forward(workspace, None, cx) +// }) +// .await; +// workspace_b +// .condition(cx_b, |workspace, cx| { +// workspace.active_item(cx).unwrap().id() == editor_b1.id() +// }) +// .await; + +// // Changes to client A's editor are reflected on client B. +// editor_a1.update(cx_a, |editor, cx| { +// editor.select_ranges([1..1, 2..2], None, cx); +// }); +// editor_b1 +// .condition(cx_b, |editor, cx| { +// editor.selected_ranges(cx) == vec![1..1, 2..2] +// }) +// .await; + +// editor_a1.update(cx_a, |editor, cx| editor.set_text("TWO", cx)); +// editor_b1 +// .condition(cx_b, |editor, cx| editor.text(cx) == "TWO") +// .await; + +// editor_a1.update(cx_a, |editor, cx| { +// editor.select_ranges([3..3], None, cx); +// editor.set_scroll_position(vec2f(0., 100.), cx); +// }); +// editor_b1 +// .condition(cx_b, |editor, cx| editor.selected_ranges(cx) == vec![3..3]) +// .await; + +// // After unfollowing, client B stops receiving updates from client A. +// workspace_b.update(cx_b, |workspace, cx| { +// workspace.unfollow(&workspace.active_pane().clone(), cx) +// }); +// workspace_a.update(cx_a, |workspace, cx| { +// workspace.activate_item(&editor_a2, cx) +// }); +// cx_a.foreground().run_until_parked(); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, cx| workspace +// .active_item(cx) +// .unwrap() +// .id()), +// editor_b1.id() +// ); + +// // Client A starts following client B. +// workspace_a +// .update(cx_a, |workspace, cx| { +// workspace +// .toggle_follow(&ToggleFollow(client_b_id), cx) +// .unwrap() +// }) +// .await +// .unwrap(); +// assert_eq!( +// workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), +// Some(client_b_id) +// ); +// assert_eq!( +// workspace_a.read_with(cx_a, |workspace, cx| workspace +// .active_item(cx) +// .unwrap() +// .id()), +// editor_a1.id() +// ); + +// // Following interrupts when client B disconnects. +// client_b.disconnect(&cx_b.to_async()).unwrap(); +// cx_a.foreground().run_until_parked(); +// assert_eq!( +// workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), +// None +// ); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_peers_following_each_other(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let fs = FakeFs::new(cx_a.background()); + +// // 2 clients connect to a server. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let mut client_a = server.create_client(cx_a, "user_a").await; +// let mut client_b = server.create_client(cx_b, "user_b").await; +// cx_a.update(editor::init); +// cx_b.update(editor::init); + +// // Client A shares a project. +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "1.txt": "one", +// "2.txt": "two", +// "3.txt": "three", +// "4.txt": "four", +// }), +// ) +// .await; +// let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; +// project_a +// .update(cx_a, |project, cx| project.share(cx)) +// .await +// .unwrap(); + +// // Client B joins the project. +// let project_b = client_b +// .build_remote_project( +// project_a +// .read_with(cx_a, |project, _| project.remote_id()) +// .unwrap(), +// cx_b, +// ) +// .await; + +// // Client A opens some editors. +// let workspace_a = client_a.build_workspace(&project_a, cx_a); +// let pane_a1 = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); +// let _editor_a1 = workspace_a +// .update(cx_a, |workspace, cx| { +// workspace.open_path((worktree_id, "1.txt"), cx) +// }) +// .await +// .unwrap() +// .downcast::() +// .unwrap(); + +// // Client B opens an editor. +// let workspace_b = client_b.build_workspace(&project_b, cx_b); +// let pane_b1 = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); +// let _editor_b1 = workspace_b +// .update(cx_b, |workspace, cx| { +// workspace.open_path((worktree_id, "2.txt"), cx) +// }) +// .await +// .unwrap() +// .downcast::() +// .unwrap(); + +// // Clients A and B follow each other in split panes +// workspace_a +// .update(cx_a, |workspace, cx| { +// workspace.split_pane(workspace.active_pane().clone(), SplitDirection::Right, cx); +// assert_ne!(*workspace.active_pane(), pane_a1); +// let leader_id = *project_a.read(cx).collaborators().keys().next().unwrap(); +// workspace +// .toggle_follow(&workspace::ToggleFollow(leader_id), cx) +// .unwrap() +// }) +// .await +// .unwrap(); +// workspace_b +// .update(cx_b, |workspace, cx| { +// workspace.split_pane(workspace.active_pane().clone(), SplitDirection::Right, cx); +// assert_ne!(*workspace.active_pane(), pane_b1); +// let leader_id = *project_b.read(cx).collaborators().keys().next().unwrap(); +// workspace +// .toggle_follow(&workspace::ToggleFollow(leader_id), cx) +// .unwrap() +// }) +// .await +// .unwrap(); + +// workspace_a +// .update(cx_a, |workspace, cx| { +// workspace.activate_next_pane(cx); +// assert_eq!(*workspace.active_pane(), pane_a1); +// workspace.open_path((worktree_id, "3.txt"), cx) +// }) +// .await +// .unwrap(); +// workspace_b +// .update(cx_b, |workspace, cx| { +// workspace.activate_next_pane(cx); +// assert_eq!(*workspace.active_pane(), pane_b1); +// workspace.open_path((worktree_id, "4.txt"), cx) +// }) +// .await +// .unwrap(); +// cx_a.foreground().run_until_parked(); + +// // Ensure leader updates don't change the active pane of followers +// workspace_a.read_with(cx_a, |workspace, _| { +// assert_eq!(*workspace.active_pane(), pane_a1); +// }); +// workspace_b.read_with(cx_b, |workspace, _| { +// assert_eq!(*workspace.active_pane(), pane_b1); +// }); + +// // Ensure peers following each other doesn't cause an infinite loop. +// assert_eq!( +// workspace_a.read_with(cx_a, |workspace, cx| workspace +// .active_item(cx) +// .unwrap() +// .project_path(cx)), +// Some((worktree_id, "3.txt").into()) +// ); +// workspace_a.update(cx_a, |workspace, cx| { +// assert_eq!( +// workspace.active_item(cx).unwrap().project_path(cx), +// Some((worktree_id, "3.txt").into()) +// ); +// workspace.activate_next_pane(cx); +// assert_eq!( +// workspace.active_item(cx).unwrap().project_path(cx), +// Some((worktree_id, "4.txt").into()) +// ); +// }); +// workspace_b.update(cx_b, |workspace, cx| { +// assert_eq!( +// workspace.active_item(cx).unwrap().project_path(cx), +// Some((worktree_id, "4.txt").into()) +// ); +// workspace.activate_next_pane(cx); +// assert_eq!( +// workspace.active_item(cx).unwrap().project_path(cx), +// Some((worktree_id, "3.txt").into()) +// ); +// }); +// } + +// #[gpui::test(iterations = 10)] +// async fn test_auto_unfollowing(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { +// cx_a.foreground().forbid_parking(); +// let fs = FakeFs::new(cx_a.background()); + +// // 2 clients connect to a server. +// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; +// let mut client_a = server.create_client(cx_a, "user_a").await; +// let mut client_b = server.create_client(cx_b, "user_b").await; +// cx_a.update(editor::init); +// cx_b.update(editor::init); + +// // Client A shares a project. +// fs.insert_tree( +// "/a", +// json!({ +// ".zed.toml": r#"collaborators = ["user_b"]"#, +// "1.txt": "one", +// "2.txt": "two", +// "3.txt": "three", +// }), +// ) +// .await; +// let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; +// project_a +// .update(cx_a, |project, cx| project.share(cx)) +// .await +// .unwrap(); + +// // Client B joins the project. +// let project_b = client_b +// .build_remote_project( +// project_a +// .read_with(cx_a, |project, _| project.remote_id()) +// .unwrap(), +// cx_b, +// ) +// .await; + +// // Client A opens some editors. +// let workspace_a = client_a.build_workspace(&project_a, cx_a); +// let _editor_a1 = workspace_a +// .update(cx_a, |workspace, cx| { +// workspace.open_path((worktree_id, "1.txt"), cx) +// }) +// .await +// .unwrap() +// .downcast::() +// .unwrap(); + +// // Client B starts following client A. +// let workspace_b = client_b.build_workspace(&project_b, cx_b); +// let pane_b = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); +// let leader_id = project_b.read_with(cx_b, |project, _| { +// project.collaborators().values().next().unwrap().peer_id +// }); +// workspace_b +// .update(cx_b, |workspace, cx| { +// workspace +// .toggle_follow(&ToggleFollow(leader_id), cx) +// .unwrap() +// }) +// .await +// .unwrap(); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// Some(leader_id) +// ); +// let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| { +// workspace +// .active_item(cx) +// .unwrap() +// .downcast::() +// .unwrap() +// }); + +// // When client B moves, it automatically stops following client A. +// editor_b2.update(cx_b, |editor, cx| editor.move_right(&editor::MoveRight, cx)); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// None +// ); + +// workspace_b +// .update(cx_b, |workspace, cx| { +// workspace +// .toggle_follow(&ToggleFollow(leader_id), cx) +// .unwrap() +// }) +// .await +// .unwrap(); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// Some(leader_id) +// ); + +// // When client B edits, it automatically stops following client A. +// editor_b2.update(cx_b, |editor, cx| editor.insert("X", cx)); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// None +// ); + +// workspace_b +// .update(cx_b, |workspace, cx| { +// workspace +// .toggle_follow(&ToggleFollow(leader_id), cx) +// .unwrap() +// }) +// .await +// .unwrap(); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// Some(leader_id) +// ); + +// // When client B scrolls, it automatically stops following client A. +// editor_b2.update(cx_b, |editor, cx| { +// editor.set_scroll_position(vec2f(0., 3.), cx) +// }); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// None +// ); + +// workspace_b +// .update(cx_b, |workspace, cx| { +// workspace +// .toggle_follow(&ToggleFollow(leader_id), cx) +// .unwrap() +// }) +// .await +// .unwrap(); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// Some(leader_id) +// ); + +// // When client B activates a different pane, it continues following client A in the original pane. +// workspace_b.update(cx_b, |workspace, cx| { +// workspace.split_pane(pane_b.clone(), SplitDirection::Right, cx) +// }); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// Some(leader_id) +// ); + +// workspace_b.update(cx_b, |workspace, cx| workspace.activate_next_pane(cx)); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// Some(leader_id) +// ); + +// // When client B activates a different item in the original pane, it automatically stops following client A. +// workspace_b +// .update(cx_b, |workspace, cx| { +// workspace.open_path((worktree_id, "2.txt"), cx) +// }) +// .await +// .unwrap(); +// assert_eq!( +// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), +// None +// ); +// } + +// #[gpui::test(iterations = 100)] +// async fn test_random_collaboration( +// cx: &mut TestAppContext, +// deterministic: Arc, +// rng: StdRng, +// ) { +// cx.foreground().forbid_parking(); +// let max_peers = env::var("MAX_PEERS") +// .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) +// .unwrap_or(5); +// assert!(max_peers <= 5); + +// let max_operations = env::var("OPERATIONS") +// .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) +// .unwrap_or(10); + +// let rng = Arc::new(Mutex::new(rng)); + +// let guest_lang_registry = Arc::new(LanguageRegistry::test()); +// let host_language_registry = Arc::new(LanguageRegistry::test()); + +// let fs = FakeFs::new(cx.background()); +// fs.insert_tree( +// "/_collab", +// json!({ +// ".zed.toml": r#"collaborators = ["guest-1", "guest-2", "guest-3", "guest-4"]"# +// }), +// ) +// .await; + +// let mut server = TestServer::start(cx.foreground(), cx.background()).await; +// let mut clients = Vec::new(); +// let mut user_ids = Vec::new(); +// let mut op_start_signals = Vec::new(); +// let files = Arc::new(Mutex::new(Vec::new())); + +// let mut next_entity_id = 100000; +// let mut host_cx = TestAppContext::new( +// cx.foreground_platform(), +// cx.platform(), +// deterministic.build_foreground(next_entity_id), +// deterministic.build_background(), +// cx.font_cache(), +// cx.leak_detector(), +// next_entity_id, +// ); +// let host = server.create_client(&mut host_cx, "host").await; +// let host_project = host_cx.update(|cx| { +// Project::local( +// host.client.clone(), +// host.user_store.clone(), +// host_language_registry.clone(), +// fs.clone(), +// cx, +// ) +// }); +// let host_project_id = host_project +// .update(&mut host_cx, |p, _| p.next_remote_id()) +// .await; + +// let (collab_worktree, _) = host_project +// .update(&mut host_cx, |project, cx| { +// project.find_or_create_local_worktree("/_collab", true, cx) +// }) +// .await +// .unwrap(); +// collab_worktree +// .read_with(&host_cx, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// host_project +// .update(&mut host_cx, |project, cx| project.share(cx)) +// .await +// .unwrap(); + +// // Set up fake language servers. +// let mut language = Language::new( +// LanguageConfig { +// name: "Rust".into(), +// path_suffixes: vec!["rs".to_string()], +// ..Default::default() +// }, +// None, +// ); +// let _fake_servers = language.set_fake_lsp_adapter(FakeLspAdapter { +// name: "the-fake-language-server", +// capabilities: lsp::LanguageServer::full_capabilities(), +// initializer: Some(Box::new({ +// let rng = rng.clone(); +// let files = files.clone(); +// let project = host_project.downgrade(); +// move |fake_server: &mut FakeLanguageServer| { +// fake_server.handle_request::( +// |_, _| async move { +// Ok(Some(lsp::CompletionResponse::Array(vec![ +// lsp::CompletionItem { +// text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { +// range: lsp::Range::new( +// lsp::Position::new(0, 0), +// lsp::Position::new(0, 0), +// ), +// new_text: "the-new-text".to_string(), +// })), +// ..Default::default() +// }, +// ]))) +// }, +// ); + +// fake_server.handle_request::( +// |_, _| async move { +// Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( +// lsp::CodeAction { +// title: "the-code-action".to_string(), +// ..Default::default() +// }, +// )])) +// }, +// ); + +// fake_server.handle_request::( +// |params, _| async move { +// Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( +// params.position, +// params.position, +// )))) +// }, +// ); + +// fake_server.handle_request::({ +// let files = files.clone(); +// let rng = rng.clone(); +// move |_, _| { +// let files = files.clone(); +// let rng = rng.clone(); +// async move { +// let files = files.lock(); +// let mut rng = rng.lock(); +// let count = rng.gen_range::(1..3); +// let files = (0..count) +// .map(|_| files.choose(&mut *rng).unwrap()) +// .collect::>(); +// log::info!("LSP: Returning definitions in files {:?}", &files); +// Ok(Some(lsp::GotoDefinitionResponse::Array( +// files +// .into_iter() +// .map(|file| lsp::Location { +// uri: lsp::Url::from_file_path(file).unwrap(), +// range: Default::default(), +// }) +// .collect(), +// ))) +// } +// } +// }); + +// fake_server.handle_request::({ +// let rng = rng.clone(); +// let project = project.clone(); +// move |params, mut cx| { +// let highlights = if let Some(project) = project.upgrade(&cx) { +// project.update(&mut cx, |project, cx| { +// let path = params +// .text_document_position_params +// .text_document +// .uri +// .to_file_path() +// .unwrap(); +// let (worktree, relative_path) = +// project.find_local_worktree(&path, cx)?; +// let project_path = +// ProjectPath::from((worktree.read(cx).id(), relative_path)); +// let buffer = +// project.get_open_buffer(&project_path, cx)?.read(cx); + +// let mut highlights = Vec::new(); +// let highlight_count = rng.lock().gen_range(1..=5); +// let mut prev_end = 0; +// for _ in 0..highlight_count { +// let range = +// buffer.random_byte_range(prev_end, &mut *rng.lock()); + +// highlights.push(lsp::DocumentHighlight { +// range: range_to_lsp(range.to_point_utf16(buffer)), +// kind: Some(lsp::DocumentHighlightKind::READ), +// }); +// prev_end = range.end; +// } +// Some(highlights) +// }) +// } else { +// None +// }; +// async move { Ok(highlights) } +// } +// }); +// } +// })), +// ..Default::default() +// }); +// host_language_registry.add(Arc::new(language)); + +// let op_start_signal = futures::channel::mpsc::unbounded(); +// user_ids.push(host.current_user_id(&host_cx)); +// op_start_signals.push(op_start_signal.0); +// clients.push(host_cx.foreground().spawn(host.simulate_host( +// host_project, +// files, +// op_start_signal.1, +// rng.clone(), +// host_cx, +// ))); + +// let disconnect_host_at = if rng.lock().gen_bool(0.2) { +// rng.lock().gen_range(0..max_operations) +// } else { +// max_operations +// }; +// let mut available_guests = vec![ +// "guest-1".to_string(), +// "guest-2".to_string(), +// "guest-3".to_string(), +// "guest-4".to_string(), +// ]; +// let mut operations = 0; +// while operations < max_operations { +// if operations == disconnect_host_at { +// server.disconnect_client(user_ids[0]); +// cx.foreground().advance_clock(RECEIVE_TIMEOUT); +// drop(op_start_signals); +// let mut clients = futures::future::join_all(clients).await; +// cx.foreground().run_until_parked(); + +// let (host, mut host_cx, host_err) = clients.remove(0); +// if let Some(host_err) = host_err { +// log::error!("host error - {}", host_err); +// } +// host.project +// .as_ref() +// .unwrap() +// .read_with(&host_cx, |project, _| assert!(!project.is_shared())); +// for (guest, mut guest_cx, guest_err) in clients { +// if let Some(guest_err) = guest_err { +// log::error!("{} error - {}", guest.username, guest_err); +// } +// let contacts = server +// .store +// .read() +// .await +// .contacts_for_user(guest.current_user_id(&guest_cx)); +// assert!(!contacts +// .iter() +// .flat_map(|contact| &contact.projects) +// .any(|project| project.id == host_project_id)); +// guest +// .project +// .as_ref() +// .unwrap() +// .read_with(&guest_cx, |project, _| assert!(project.is_read_only())); +// guest_cx.update(|_| drop(guest)); +// } +// host_cx.update(|_| drop(host)); + +// return; +// } + +// let distribution = rng.lock().gen_range(0..100); +// match distribution { +// 0..=19 if !available_guests.is_empty() => { +// let guest_ix = rng.lock().gen_range(0..available_guests.len()); +// let guest_username = available_guests.remove(guest_ix); +// log::info!("Adding new connection for {}", guest_username); +// next_entity_id += 100000; +// let mut guest_cx = TestAppContext::new( +// cx.foreground_platform(), +// cx.platform(), +// deterministic.build_foreground(next_entity_id), +// deterministic.build_background(), +// cx.font_cache(), +// cx.leak_detector(), +// next_entity_id, +// ); +// let guest = server.create_client(&mut guest_cx, &guest_username).await; +// let guest_project = Project::remote( +// host_project_id, +// guest.client.clone(), +// guest.user_store.clone(), +// guest_lang_registry.clone(), +// FakeFs::new(cx.background()), +// &mut guest_cx.to_async(), +// ) +// .await +// .unwrap(); +// let op_start_signal = futures::channel::mpsc::unbounded(); +// user_ids.push(guest.current_user_id(&guest_cx)); +// op_start_signals.push(op_start_signal.0); +// clients.push(guest_cx.foreground().spawn(guest.simulate_guest( +// guest_username.clone(), +// guest_project, +// op_start_signal.1, +// rng.clone(), +// guest_cx, +// ))); + +// log::info!("Added connection for {}", guest_username); +// operations += 1; +// } +// 20..=29 if clients.len() > 1 => { +// log::info!("Removing guest"); +// let guest_ix = rng.lock().gen_range(1..clients.len()); +// let removed_guest_id = user_ids.remove(guest_ix); +// let guest = clients.remove(guest_ix); +// op_start_signals.remove(guest_ix); +// server.disconnect_client(removed_guest_id); +// cx.foreground().advance_clock(RECEIVE_TIMEOUT); +// let (guest, mut guest_cx, guest_err) = guest.await; +// if let Some(guest_err) = guest_err { +// log::error!("{} error - {}", guest.username, guest_err); +// } +// guest +// .project +// .as_ref() +// .unwrap() +// .read_with(&guest_cx, |project, _| assert!(project.is_read_only())); +// for user_id in &user_ids { +// for contact in server.store.read().await.contacts_for_user(*user_id) { +// assert_ne!( +// contact.user_id, removed_guest_id.0 as u64, +// "removed guest is still a contact of another peer" +// ); +// for project in contact.projects { +// for project_guest_id in project.guests { +// assert_ne!( +// project_guest_id, removed_guest_id.0 as u64, +// "removed guest appears as still participating on a project" +// ); +// } +// } +// } +// } + +// log::info!("{} removed", guest.username); +// available_guests.push(guest.username.clone()); +// guest_cx.update(|_| drop(guest)); + +// operations += 1; +// } +// _ => { +// while operations < max_operations && rng.lock().gen_bool(0.7) { +// op_start_signals +// .choose(&mut *rng.lock()) +// .unwrap() +// .unbounded_send(()) +// .unwrap(); +// operations += 1; +// } + +// if rng.lock().gen_bool(0.8) { +// cx.foreground().run_until_parked(); +// } +// } +// } +// } + +// drop(op_start_signals); +// let mut clients = futures::future::join_all(clients).await; +// cx.foreground().run_until_parked(); + +// let (host_client, mut host_cx, host_err) = clients.remove(0); +// if let Some(host_err) = host_err { +// panic!("host error - {}", host_err); +// } +// let host_project = host_client.project.as_ref().unwrap(); +// let host_worktree_snapshots = host_project.read_with(&host_cx, |project, cx| { +// project +// .worktrees(cx) +// .map(|worktree| { +// let snapshot = worktree.read(cx).snapshot(); +// (snapshot.id(), snapshot) +// }) +// .collect::>() +// }); + +// host_client +// .project +// .as_ref() +// .unwrap() +// .read_with(&host_cx, |project, cx| project.check_invariants(cx)); + +// for (guest_client, mut guest_cx, guest_err) in clients.into_iter() { +// if let Some(guest_err) = guest_err { +// panic!("{} error - {}", guest_client.username, guest_err); +// } +// let worktree_snapshots = +// guest_client +// .project +// .as_ref() +// .unwrap() +// .read_with(&guest_cx, |project, cx| { +// project +// .worktrees(cx) +// .map(|worktree| { +// let worktree = worktree.read(cx); +// (worktree.id(), worktree.snapshot()) +// }) +// .collect::>() +// }); + +// assert_eq!( +// worktree_snapshots.keys().collect::>(), +// host_worktree_snapshots.keys().collect::>(), +// "{} has different worktrees than the host", +// guest_client.username +// ); +// for (id, host_snapshot) in &host_worktree_snapshots { +// let guest_snapshot = &worktree_snapshots[id]; +// assert_eq!( +// guest_snapshot.root_name(), +// host_snapshot.root_name(), +// "{} has different root name than the host for worktree {}", +// guest_client.username, +// id +// ); +// assert_eq!( +// guest_snapshot.entries(false).collect::>(), +// host_snapshot.entries(false).collect::>(), +// "{} has different snapshot than the host for worktree {}", +// guest_client.username, +// id +// ); +// } + +// guest_client +// .project +// .as_ref() +// .unwrap() +// .read_with(&guest_cx, |project, cx| project.check_invariants(cx)); + +// for guest_buffer in &guest_client.buffers { +// let buffer_id = guest_buffer.read_with(&guest_cx, |buffer, _| buffer.remote_id()); +// let host_buffer = host_project.read_with(&host_cx, |project, cx| { +// project.buffer_for_id(buffer_id, cx).expect(&format!( +// "host does not have buffer for guest:{}, peer:{}, id:{}", +// guest_client.username, guest_client.peer_id, buffer_id +// )) +// }); +// let path = host_buffer +// .read_with(&host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + +// assert_eq!( +// guest_buffer.read_with(&guest_cx, |buffer, _| buffer.deferred_ops_len()), +// 0, +// "{}, buffer {}, path {:?} has deferred operations", +// guest_client.username, +// buffer_id, +// path, +// ); +// assert_eq!( +// guest_buffer.read_with(&guest_cx, |buffer, _| buffer.text()), +// host_buffer.read_with(&host_cx, |buffer, _| buffer.text()), +// "{}, buffer {}, path {:?}, differs from the host's buffer", +// guest_client.username, +// buffer_id, +// path +// ); +// } + +// guest_cx.update(|_| drop(guest_client)); +// } + +// host_cx.update(|_| drop(host_client)); +// } + +// struct TestServer { +// peer: Arc, +// app_state: Arc, +// server: Arc, +// foreground: Rc, +// notifications: mpsc::UnboundedReceiver<()>, +// connection_killers: Arc>>>, +// forbid_connections: Arc, +// _test_db: TestDb, +// } + +// impl TestServer { +// async fn start( +// foreground: Rc, +// background: Arc, +// ) -> Self { +// let test_db = TestDb::fake(background); +// let app_state = Self::build_app_state(&test_db).await; +// let peer = Peer::new(); +// let notifications = mpsc::unbounded(); +// let server = Server::new(app_state.clone(), peer.clone(), Some(notifications.0)); +// Self { +// peer, +// app_state, +// server, +// foreground, +// notifications: notifications.1, +// connection_killers: Default::default(), +// forbid_connections: Default::default(), +// _test_db: test_db, +// } +// } + +// async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient { +// cx.update(|cx| { +// let settings = Settings::test(cx); +// cx.set_global(settings); +// }); + +// let http = FakeHttpClient::with_404_response(); +// let user_id = self.app_state.db.create_user(name, false).await.unwrap(); +// let client_name = name.to_string(); +// let mut client = Client::new(http.clone()); +// let server = self.server.clone(); +// let connection_killers = self.connection_killers.clone(); +// let forbid_connections = self.forbid_connections.clone(); +// let (connection_id_tx, mut connection_id_rx) = mpsc::channel(16); + +// Arc::get_mut(&mut client) +// .unwrap() +// .override_authenticate(move |cx| { +// cx.spawn(|_| async move { +// let access_token = "the-token".to_string(); +// Ok(Credentials { +// user_id: user_id.0 as u64, +// access_token, +// }) +// }) +// }) +// .override_establish_connection(move |credentials, cx| { +// assert_eq!(credentials.user_id, user_id.0 as u64); +// assert_eq!(credentials.access_token, "the-token"); + +// let server = server.clone(); +// let connection_killers = connection_killers.clone(); +// let forbid_connections = forbid_connections.clone(); +// let client_name = client_name.clone(); +// let connection_id_tx = connection_id_tx.clone(); +// cx.spawn(move |cx| async move { +// if forbid_connections.load(SeqCst) { +// Err(EstablishConnectionError::other(anyhow!( +// "server is forbidding connections" +// ))) +// } else { +// let (client_conn, server_conn, killed) = +// Connection::in_memory(cx.background()); +// connection_killers.lock().insert(user_id, killed); +// cx.background() +// .spawn(server.handle_connection( +// server_conn, +// client_name, +// user_id, +// Some(connection_id_tx), +// cx.background(), +// )) +// .detach(); +// Ok(client_conn) +// } +// }) +// }); + +// client +// .authenticate_and_connect(false, &cx.to_async()) +// .await +// .unwrap(); + +// Channel::init(&client); +// Project::init(&client); +// cx.update(|cx| { +// workspace::init(&client, cx); +// }); + +// let peer_id = PeerId(connection_id_rx.next().await.unwrap().0); +// let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx)); + +// let client = TestClient { +// client, +// peer_id, +// username: name.to_string(), +// user_store, +// language_registry: Arc::new(LanguageRegistry::test()), +// project: Default::default(), +// buffers: Default::default(), +// }; +// client.wait_for_current_user(cx).await; +// client +// } + +// fn disconnect_client(&self, user_id: UserId) { +// self.connection_killers +// .lock() +// .remove(&user_id) +// .unwrap() +// .store(true, SeqCst); +// } + +// fn forbid_connections(&self) { +// self.forbid_connections.store(true, SeqCst); +// } + +// fn allow_connections(&self) { +// self.forbid_connections.store(false, SeqCst); +// } + +// async fn build_app_state(test_db: &TestDb) -> Arc { +// let mut config = Config::default(); +// config.database_url = test_db.url.clone(); +// Arc::new(AppState { +// db: test_db.db().clone(), +// config, +// }) +// } + +// async fn state<'a>(&'a self) -> RwLockReadGuard<'a, Store> { +// self.server.store.read().await +// } + +// async fn condition(&mut self, mut predicate: F) +// where +// F: FnMut(&Store) -> bool, +// { +// async_std::future::timeout(Duration::from_millis(500), async { +// while !(predicate)(&*self.server.store.read().await) { +// self.foreground.start_waiting(); +// self.notifications.next().await; +// self.foreground.finish_waiting(); +// } +// }) +// .await +// .expect("condition timed out"); +// } +// } + +// impl Deref for TestServer { +// type Target = Server; + +// fn deref(&self) -> &Self::Target { +// &self.server +// } +// } + +// impl Drop for TestServer { +// fn drop(&mut self) { +// self.peer.reset(); +// } +// } + +// struct TestClient { +// client: Arc, +// username: String, +// pub peer_id: PeerId, +// pub user_store: ModelHandle, +// language_registry: Arc, +// project: Option>, +// buffers: HashSet>, +// } + +// impl Deref for TestClient { +// type Target = Arc; + +// fn deref(&self) -> &Self::Target { +// &self.client +// } +// } + +// impl TestClient { +// pub fn current_user_id(&self, cx: &TestAppContext) -> UserId { +// UserId::from_proto( +// self.user_store +// .read_with(cx, |user_store, _| user_store.current_user().unwrap().id), +// ) +// } + +// async fn wait_for_current_user(&self, cx: &TestAppContext) { +// let mut authed_user = self +// .user_store +// .read_with(cx, |user_store, _| user_store.watch_current_user()); +// while authed_user.next().await.unwrap().is_none() {} +// } + +// async fn build_local_project( +// &mut self, +// fs: Arc, +// root_path: impl AsRef, +// cx: &mut TestAppContext, +// ) -> (ModelHandle, WorktreeId) { +// let project = cx.update(|cx| { +// Project::local( +// self.client.clone(), +// self.user_store.clone(), +// self.language_registry.clone(), +// fs, +// cx, +// ) +// }); +// self.project = Some(project.clone()); +// let (worktree, _) = project +// .update(cx, |p, cx| { +// p.find_or_create_local_worktree(root_path, true, cx) +// }) +// .await +// .unwrap(); +// worktree +// .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete()) +// .await; +// project +// .update(cx, |project, _| project.next_remote_id()) +// .await; +// (project, worktree.read_with(cx, |tree, _| tree.id())) +// } + +// async fn build_remote_project( +// &mut self, +// project_id: u64, +// cx: &mut TestAppContext, +// ) -> ModelHandle { +// let project = Project::remote( +// project_id, +// self.client.clone(), +// self.user_store.clone(), +// self.language_registry.clone(), +// FakeFs::new(cx.background()), +// &mut cx.to_async(), +// ) +// .await +// .unwrap(); +// self.project = Some(project.clone()); +// project +// } + +// fn build_workspace( +// &self, +// project: &ModelHandle, +// cx: &mut TestAppContext, +// ) -> ViewHandle { +// let (window_id, _) = cx.add_window(|_| EmptyView); +// cx.add_view(window_id, |cx| { +// let fs = project.read(cx).fs().clone(); +// Workspace::new( +// &WorkspaceParams { +// fs, +// project: project.clone(), +// user_store: self.user_store.clone(), +// languages: self.language_registry.clone(), +// themes: ThemeRegistry::new((), cx.font_cache().clone()), +// channel_list: cx.add_model(|cx| { +// ChannelList::new(self.user_store.clone(), self.client.clone(), cx) +// }), +// client: self.client.clone(), +// }, +// cx, +// ) +// }) +// } + +// async fn simulate_host( +// mut self, +// project: ModelHandle, +// files: Arc>>, +// op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, +// rng: Arc>, +// mut cx: TestAppContext, +// ) -> (Self, TestAppContext, Option) { +// async fn simulate_host_internal( +// client: &mut TestClient, +// project: ModelHandle, +// files: Arc>>, +// mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, +// rng: Arc>, +// cx: &mut TestAppContext, +// ) -> anyhow::Result<()> { +// let fs = project.read_with(cx, |project, _| project.fs().clone()); + +// while op_start_signal.next().await.is_some() { +// let distribution = rng.lock().gen_range::(0..100); +// match distribution { +// 0..=20 if !files.lock().is_empty() => { +// let path = files.lock().choose(&mut *rng.lock()).unwrap().clone(); +// let mut path = path.as_path(); +// while let Some(parent_path) = path.parent() { +// path = parent_path; +// if rng.lock().gen() { +// break; +// } +// } + +// log::info!("Host: find/create local worktree {:?}", path); +// let find_or_create_worktree = project.update(cx, |project, cx| { +// project.find_or_create_local_worktree(path, true, cx) +// }); +// if rng.lock().gen() { +// cx.background().spawn(find_or_create_worktree).detach(); +// } else { +// find_or_create_worktree.await?; +// } +// } +// 10..=80 if !files.lock().is_empty() => { +// let buffer = if client.buffers.is_empty() || rng.lock().gen() { +// let file = files.lock().choose(&mut *rng.lock()).unwrap().clone(); +// let (worktree, path) = project +// .update(cx, |project, cx| { +// project.find_or_create_local_worktree( +// file.clone(), +// true, +// cx, +// ) +// }) +// .await?; +// let project_path = +// worktree.read_with(cx, |worktree, _| (worktree.id(), path)); +// log::info!( +// "Host: opening path {:?}, worktree {}, relative_path {:?}", +// file, +// project_path.0, +// project_path.1 +// ); +// let buffer = project +// .update(cx, |project, cx| project.open_buffer(project_path, cx)) +// .await +// .unwrap(); +// client.buffers.insert(buffer.clone()); +// buffer +// } else { +// client +// .buffers +// .iter() +// .choose(&mut *rng.lock()) +// .unwrap() +// .clone() +// }; + +// if rng.lock().gen_bool(0.1) { +// cx.update(|cx| { +// log::info!( +// "Host: dropping buffer {:?}", +// buffer.read(cx).file().unwrap().full_path(cx) +// ); +// client.buffers.remove(&buffer); +// drop(buffer); +// }); +// } else { +// buffer.update(cx, |buffer, cx| { +// log::info!( +// "Host: updating buffer {:?} ({})", +// buffer.file().unwrap().full_path(cx), +// buffer.remote_id() +// ); + +// if rng.lock().gen_bool(0.7) { +// buffer.randomly_edit(&mut *rng.lock(), 5, cx); +// } else { +// buffer.randomly_undo_redo(&mut *rng.lock(), cx); +// } +// }); +// } +// } +// _ => loop { +// let path_component_count = rng.lock().gen_range::(1..=5); +// let mut path = PathBuf::new(); +// path.push("/"); +// for _ in 0..path_component_count { +// let letter = rng.lock().gen_range(b'a'..=b'z'); +// path.push(std::str::from_utf8(&[letter]).unwrap()); +// } +// path.set_extension("rs"); +// let parent_path = path.parent().unwrap(); + +// log::info!("Host: creating file {:?}", path,); + +// if fs.create_dir(&parent_path).await.is_ok() +// && fs.create_file(&path, Default::default()).await.is_ok() +// { +// files.lock().push(path); +// break; +// } else { +// log::info!("Host: cannot create file"); +// } +// }, +// } + +// cx.background().simulate_random_delay().await; +// } + +// Ok(()) +// } + +// let result = simulate_host_internal( +// &mut self, +// project.clone(), +// files, +// op_start_signal, +// rng, +// &mut cx, +// ) +// .await; +// log::info!("Host done"); +// self.project = Some(project); +// (self, cx, result.err()) +// } + +// pub async fn simulate_guest( +// mut self, +// guest_username: String, +// project: ModelHandle, +// op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, +// rng: Arc>, +// mut cx: TestAppContext, +// ) -> (Self, TestAppContext, Option) { +// async fn simulate_guest_internal( +// client: &mut TestClient, +// guest_username: &str, +// project: ModelHandle, +// mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, +// rng: Arc>, +// cx: &mut TestAppContext, +// ) -> anyhow::Result<()> { +// while op_start_signal.next().await.is_some() { +// let buffer = if client.buffers.is_empty() || rng.lock().gen() { +// let worktree = if let Some(worktree) = +// project.read_with(cx, |project, cx| { +// project +// .worktrees(&cx) +// .filter(|worktree| { +// let worktree = worktree.read(cx); +// worktree.is_visible() +// && worktree.entries(false).any(|e| e.is_file()) +// }) +// .choose(&mut *rng.lock()) +// }) { +// worktree +// } else { +// cx.background().simulate_random_delay().await; +// continue; +// }; + +// let (worktree_root_name, project_path) = +// worktree.read_with(cx, |worktree, _| { +// let entry = worktree +// .entries(false) +// .filter(|e| e.is_file()) +// .choose(&mut *rng.lock()) +// .unwrap(); +// ( +// worktree.root_name().to_string(), +// (worktree.id(), entry.path.clone()), +// ) +// }); +// log::info!( +// "{}: opening path {:?} in worktree {} ({})", +// guest_username, +// project_path.1, +// project_path.0, +// worktree_root_name, +// ); +// let buffer = project +// .update(cx, |project, cx| { +// project.open_buffer(project_path.clone(), cx) +// }) +// .await?; +// log::info!( +// "{}: opened path {:?} in worktree {} ({}) with buffer id {}", +// guest_username, +// project_path.1, +// project_path.0, +// worktree_root_name, +// buffer.read_with(cx, |buffer, _| buffer.remote_id()) +// ); +// client.buffers.insert(buffer.clone()); +// buffer +// } else { +// client +// .buffers +// .iter() +// .choose(&mut *rng.lock()) +// .unwrap() +// .clone() +// }; + +// let choice = rng.lock().gen_range(0..100); +// match choice { +// 0..=9 => { +// cx.update(|cx| { +// log::info!( +// "{}: dropping buffer {:?}", +// guest_username, +// buffer.read(cx).file().unwrap().full_path(cx) +// ); +// client.buffers.remove(&buffer); +// drop(buffer); +// }); +// } +// 10..=19 => { +// let completions = project.update(cx, |project, cx| { +// log::info!( +// "{}: requesting completions for buffer {} ({:?})", +// guest_username, +// buffer.read(cx).remote_id(), +// buffer.read(cx).file().unwrap().full_path(cx) +// ); +// let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); +// project.completions(&buffer, offset, cx) +// }); +// let completions = cx.background().spawn(async move { +// completions +// .await +// .map_err(|err| anyhow!("completions request failed: {:?}", err)) +// }); +// if rng.lock().gen_bool(0.3) { +// log::info!("{}: detaching completions request", guest_username); +// cx.update(|cx| completions.detach_and_log_err(cx)); +// } else { +// completions.await?; +// } +// } +// 20..=29 => { +// let code_actions = project.update(cx, |project, cx| { +// log::info!( +// "{}: requesting code actions for buffer {} ({:?})", +// guest_username, +// buffer.read(cx).remote_id(), +// buffer.read(cx).file().unwrap().full_path(cx) +// ); +// let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock()); +// project.code_actions(&buffer, range, cx) +// }); +// let code_actions = cx.background().spawn(async move { +// code_actions.await.map_err(|err| { +// anyhow!("code actions request failed: {:?}", err) +// }) +// }); +// if rng.lock().gen_bool(0.3) { +// log::info!("{}: detaching code actions request", guest_username); +// cx.update(|cx| code_actions.detach_and_log_err(cx)); +// } else { +// code_actions.await?; +// } +// } +// 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => { +// let (requested_version, save) = buffer.update(cx, |buffer, cx| { +// log::info!( +// "{}: saving buffer {} ({:?})", +// guest_username, +// buffer.remote_id(), +// buffer.file().unwrap().full_path(cx) +// ); +// (buffer.version(), buffer.save(cx)) +// }); +// let save = cx.background().spawn(async move { +// let (saved_version, _) = save +// .await +// .map_err(|err| anyhow!("save request failed: {:?}", err))?; +// assert!(saved_version.observed_all(&requested_version)); +// Ok::<_, anyhow::Error>(()) +// }); +// if rng.lock().gen_bool(0.3) { +// log::info!("{}: detaching save request", guest_username); +// cx.update(|cx| save.detach_and_log_err(cx)); +// } else { +// save.await?; +// } +// } +// 40..=44 => { +// let prepare_rename = project.update(cx, |project, cx| { +// log::info!( +// "{}: preparing rename for buffer {} ({:?})", +// guest_username, +// buffer.read(cx).remote_id(), +// buffer.read(cx).file().unwrap().full_path(cx) +// ); +// let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); +// project.prepare_rename(buffer, offset, cx) +// }); +// let prepare_rename = cx.background().spawn(async move { +// prepare_rename.await.map_err(|err| { +// anyhow!("prepare rename request failed: {:?}", err) +// }) +// }); +// if rng.lock().gen_bool(0.3) { +// log::info!("{}: detaching prepare rename request", guest_username); +// cx.update(|cx| prepare_rename.detach_and_log_err(cx)); +// } else { +// prepare_rename.await?; +// } +// } +// 45..=49 => { +// let definitions = project.update(cx, |project, cx| { +// log::info!( +// "{}: requesting definitions for buffer {} ({:?})", +// guest_username, +// buffer.read(cx).remote_id(), +// buffer.read(cx).file().unwrap().full_path(cx) +// ); +// let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); +// project.definition(&buffer, offset, cx) +// }); +// let definitions = cx.background().spawn(async move { +// definitions +// .await +// .map_err(|err| anyhow!("definitions request failed: {:?}", err)) +// }); +// if rng.lock().gen_bool(0.3) { +// log::info!("{}: detaching definitions request", guest_username); +// cx.update(|cx| definitions.detach_and_log_err(cx)); +// } else { +// client +// .buffers +// .extend(definitions.await?.into_iter().map(|loc| loc.buffer)); +// } +// } +// 50..=54 => { +// let highlights = project.update(cx, |project, cx| { +// log::info!( +// "{}: requesting highlights for buffer {} ({:?})", +// guest_username, +// buffer.read(cx).remote_id(), +// buffer.read(cx).file().unwrap().full_path(cx) +// ); +// let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); +// project.document_highlights(&buffer, offset, cx) +// }); +// let highlights = cx.background().spawn(async move { +// highlights +// .await +// .map_err(|err| anyhow!("highlights request failed: {:?}", err)) +// }); +// if rng.lock().gen_bool(0.3) { +// log::info!("{}: detaching highlights request", guest_username); +// cx.update(|cx| highlights.detach_and_log_err(cx)); +// } else { +// highlights.await?; +// } +// } +// 55..=59 => { +// let search = project.update(cx, |project, cx| { +// let query = rng.lock().gen_range('a'..='z'); +// log::info!("{}: project-wide search {:?}", guest_username, query); +// project.search(SearchQuery::text(query, false, false), cx) +// }); +// let search = cx.background().spawn(async move { +// search +// .await +// .map_err(|err| anyhow!("search request failed: {:?}", err)) +// }); +// if rng.lock().gen_bool(0.3) { +// log::info!("{}: detaching search request", guest_username); +// cx.update(|cx| search.detach_and_log_err(cx)); +// } else { +// client.buffers.extend(search.await?.into_keys()); +// } +// } +// _ => { +// buffer.update(cx, |buffer, cx| { +// log::info!( +// "{}: updating buffer {} ({:?})", +// guest_username, +// buffer.remote_id(), +// buffer.file().unwrap().full_path(cx) +// ); +// if rng.lock().gen_bool(0.7) { +// buffer.randomly_edit(&mut *rng.lock(), 5, cx); +// } else { +// buffer.randomly_undo_redo(&mut *rng.lock(), cx); +// } +// }); +// } +// } +// cx.background().simulate_random_delay().await; +// } +// Ok(()) +// } + +// let result = simulate_guest_internal( +// &mut self, +// &guest_username, +// project.clone(), +// op_start_signal, +// rng, +// &mut cx, +// ) +// .await; +// log::info!("{}: done", guest_username); + +// self.project = Some(project); +// (self, cx, result.err()) +// } +// } + +// impl Drop for TestClient { +// fn drop(&mut self) { +// self.client.tear_down(); +// } +// } + +// impl Executor for Arc { +// type Timer = gpui::executor::Timer; + +// fn spawn_detached>(&self, future: F) { +// self.spawn(future).detach(); +// } + +// fn timer(&self, duration: Duration) -> Self::Timer { +// self.as_ref().timer(duration) +// } +// } + +// fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> { +// channel +// .messages() +// .cursor::<()>() +// .map(|m| { +// ( +// m.sender.github_login.clone(), +// m.body.clone(), +// m.is_pending(), +// ) +// }) +// .collect() +// } + +// struct EmptyView; + +// impl gpui::Entity for EmptyView { +// type Event = (); +// } + +// impl gpui::View for EmptyView { +// fn ui_name() -> &'static str { +// "empty view" +// } + +// fn render(&mut self, _: &mut gpui::RenderContext) -> gpui::ElementBox { +// gpui::Element::boxed(gpui::elements::Empty) +// } +// } +// } diff --git a/crates/collab/src/rpc/store.rs b/crates/collab/src/rpc/store.rs index 946e9f84209ad3cef8d6e58cd8f54ab6479acee9..7a123ee484664b7d8177c0f81a1c96d99d00f9cd 100644 --- a/crates/collab/src/rpc/store.rs +++ b/crates/collab/src/rpc/store.rs @@ -1,5 +1,5 @@ use crate::db::{ChannelId, UserId}; -use anyhow::anyhow; +use anyhow::{anyhow, Result}; use collections::{BTreeMap, HashMap, HashSet}; use rpc::{proto, ConnectionId}; use std::{collections::hash_map, path::PathBuf}; @@ -99,7 +99,7 @@ impl Store { pub fn remove_connection( &mut self, connection_id: ConnectionId, - ) -> tide::Result { + ) -> Result { let connection = if let Some(connection) = self.connections.remove(&connection_id) { connection } else { @@ -165,7 +165,7 @@ impl Store { } } - pub fn user_id_for_connection(&self, connection_id: ConnectionId) -> tide::Result { + pub fn user_id_for_connection(&self, connection_id: ConnectionId) -> Result { Ok(self .connections .get(&connection_id) @@ -258,7 +258,7 @@ impl Store { worktree_id: u64, connection_id: ConnectionId, worktree: Worktree, - ) -> tide::Result<()> { + ) -> Result<()> { let project = self .projects .get_mut(&project_id) @@ -286,7 +286,7 @@ impl Store { &mut self, project_id: u64, connection_id: ConnectionId, - ) -> tide::Result { + ) -> Result { match self.projects.entry(project_id) { hash_map::Entry::Occupied(e) => { if e.get().host_connection_id == connection_id { @@ -326,7 +326,7 @@ impl Store { project_id: u64, worktree_id: u64, acting_connection_id: ConnectionId, - ) -> tide::Result<(Worktree, Vec)> { + ) -> Result<(Worktree, Vec)> { let project = self .projects .get_mut(&project_id) @@ -363,7 +363,7 @@ impl Store { &mut self, project_id: u64, connection_id: ConnectionId, - ) -> tide::Result { + ) -> Result { if let Some(project) = self.projects.get_mut(&project_id) { if project.host_connection_id == connection_id { let mut share = ProjectShare::default(); @@ -383,7 +383,7 @@ impl Store { &mut self, project_id: u64, acting_connection_id: ConnectionId, - ) -> tide::Result { + ) -> Result { let project = if let Some(project) = self.projects.get_mut(&project_id) { project } else { @@ -418,7 +418,7 @@ impl Store { worktree_id: u64, connection_id: ConnectionId, summary: proto::DiagnosticSummary, - ) -> tide::Result> { + ) -> Result> { let project = self .projects .get_mut(&project_id) @@ -443,7 +443,7 @@ impl Store { project_id: u64, connection_id: ConnectionId, language_server: proto::LanguageServer, - ) -> tide::Result> { + ) -> Result> { let project = self .projects .get_mut(&project_id) @@ -461,7 +461,7 @@ impl Store { connection_id: ConnectionId, user_id: UserId, project_id: u64, - ) -> tide::Result { + ) -> Result { let connection = self .connections .get_mut(&connection_id) @@ -498,7 +498,7 @@ impl Store { &mut self, connection_id: ConnectionId, project_id: u64, - ) -> tide::Result { + ) -> Result { let project = self .projects .get_mut(&project_id) @@ -533,7 +533,7 @@ impl Store { worktree_id: u64, removed_entries: &[u64], updated_entries: &[proto::Entry], - ) -> tide::Result> { + ) -> Result> { let project = self.write_project(project_id, connection_id)?; let worktree = project .share_mut()? @@ -554,13 +554,13 @@ impl Store { &self, project_id: u64, acting_connection_id: ConnectionId, - ) -> tide::Result> { + ) -> Result> { Ok(self .read_project(project_id, acting_connection_id)? .connection_ids()) } - pub fn channel_connection_ids(&self, channel_id: ChannelId) -> tide::Result> { + pub fn channel_connection_ids(&self, channel_id: ChannelId) -> Result> { Ok(self .channels .get(&channel_id) @@ -573,11 +573,7 @@ impl Store { self.projects.get(&project_id) } - pub fn read_project( - &self, - project_id: u64, - connection_id: ConnectionId, - ) -> tide::Result<&Project> { + pub fn read_project(&self, project_id: u64, connection_id: ConnectionId) -> Result<&Project> { let project = self .projects .get(&project_id) @@ -600,7 +596,7 @@ impl Store { &mut self, project_id: u64, connection_id: ConnectionId, - ) -> tide::Result<&mut Project> { + ) -> Result<&mut Project> { let project = self .projects .get_mut(&project_id) @@ -755,14 +751,14 @@ impl Project { } } - pub fn share(&self) -> tide::Result<&ProjectShare> { + pub fn share(&self) -> Result<&ProjectShare> { Ok(self .share .as_ref() .ok_or_else(|| anyhow!("worktree is not shared"))?) } - fn share_mut(&mut self) -> tide::Result<&mut ProjectShare> { + fn share_mut(&mut self) -> Result<&mut ProjectShare> { Ok(self .share .as_mut() From f7f4aad00f9e38f0804e8fca1b2df31480f813ba Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Fri, 22 Apr 2022 18:46:31 -0600 Subject: [PATCH 02/11] WIP --- crates/collab/src/main.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 210eeb422044b1c7bdece7eb24affc183772f0ee..5bcee291b3feedc5eb0ee593677d503e6ab41f02 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -8,9 +8,13 @@ use ::rpc::Peer; use anyhow::Result; use async_trait::async_trait; use db::{Db, PostgresDb}; +use hyper::{ + server::conn::AddrStream, + service::{make_service_fn, service_fn}, + Body, Request, Response, Server, +}; use serde::Deserialize; -use std::sync::Arc; -use tokio::net::TcpListener; +use std::{convert::Infallible, net::TcpListener, sync::Arc}; // type Request = tide::Request>; @@ -71,13 +75,24 @@ async fn main() -> Result<()> { run_server( state.clone(), rpc, - TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port)).await?, + TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port)) + .expect("failed to bind TCP listener"), ) .await?; Ok(()) } pub async fn run_server(state: Arc, rpc: Arc, listener: TcpListener) -> Result<()> { + let make_service = make_service_fn(|_: &AddrStream| async move { + Ok::<_, Infallible>(service_fn(|_: Request| async move { + Response::new(Body::from(format!("hello")) + })) + }); + + Server::from_tcp(listener) + .expect("could not create server") + .serve(make_service); + // let mut app = tide::with_state(state.clone()); // rpc::add_routes(&mut app, &rpc); From 62f7c858e3fed3997f733644449a3330de3f0d21 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Sun, 24 Apr 2022 10:45:20 -0600 Subject: [PATCH 03/11] WIP --- Cargo.lock | 14 +++++++++++ crates/collab/Cargo.toml | 1 + crates/collab/src/api.rs | 36 +++++++++++++++++++++------ crates/collab/src/main.rs | 52 +++++++++++++++++++-------------------- 4 files changed, 69 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0feeb2fa9848594cf740dc411cf494d2aea7e75a..de01c5728e9c6be4117f65e1d3d0b81830bafc13 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -896,6 +896,7 @@ dependencies = [ "parking_lot", "project", "rand 0.8.3", + "routerify", "rpc", "scrypt", "serde", @@ -3792,6 +3793,19 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "routerify" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "496c1d3718081c45ba9c31fbfc07417900aa96f4070ff90dc29961836b7a9945" +dependencies = [ + "http", + "hyper", + "lazy_static", + "percent-encoding", + "regex", +] + [[package]] name = "roxmltree" version = "0.14.1" diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index e675e07e3f048ba42bc9c35c5f2c11d4af539a30..c3a5f0119c3ee63a82f3e187c9ff8b0865306a5c 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -30,6 +30,7 @@ lipsum = { version = "0.8", optional = true } log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" rand = "0.8" +routerify = "3.0" scrypt = "0.7" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index 10a2e0a5fa85be6d228a13f5e216d8b77c63f6e8..f54d74ce9357fc3ed98cc68a362a20ee25e120fe 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -1,10 +1,24 @@ // use crate::{auth, db::UserId, AppState, Request, RequestExt as _}; use async_trait::async_trait; +use hyper::{ + header::{CONTENT_LENGTH, CONTENT_TYPE}, + Body, Request, Response, +}; +use routerify::prelude::*; + +use anyhow::Result; +use routerify::RouterBuilder; use serde::Deserialize; use serde_json::json; use std::sync::Arc; + +use crate::{AppState, RequestExt}; // use surf::StatusCode; +pub fn add_routes(router: &mut RouterBuilder) { + router.get("/users", get_users); +} + // pub fn add_routes(app: &mut tide::Server>) { // app.at("/users").get(get_users); // app.at("/users").post(create_user); @@ -29,15 +43,23 @@ use std::sync::Arc; // .build()) // } -// async fn get_users(request: Request) -> tide::Result { -// request.require_token().await?; +async fn get_users(request: Request) -> Result> { + // request.require_token().await?; -// let users = request.db().get_all_users().await?; + let users = request.db().get_all_users().await?; -// Ok(tide::Response::builder(StatusCode::Ok) -// .body(tide::Body::from_json(&users)?) -// .build()) -// } + // Body::from + + let body = "Hello World"; + Ok(Response::builder() + .header(CONTENT_LENGTH, body.len() as u64) + .header(CONTENT_TYPE, "text/plain") + .body(Body::from(body))?) + + // Ok(tide::Response::builder(StatusCode::Ok) + // .body(tide::Body::from_json(&users)?) + // .build()) +} // async fn create_user(mut request: Request) -> tide::Result { // request.require_token().await?; diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 5bcee291b3feedc5eb0ee593677d503e6ab41f02..7407d5f55d52ac60c2b11bd3ff1acc0cf389c8af 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -5,16 +5,13 @@ mod env; mod rpc; use ::rpc::Peer; -use anyhow::Result; -use async_trait::async_trait; +use anyhow::{anyhow, Result}; use db::{Db, PostgresDb}; -use hyper::{ - server::conn::AddrStream, - service::{make_service_fn, service_fn}, - Body, Request, Response, Server, -}; +use hyper::{Body, Request, Server}; +use routerify::ext::RequestExt as _; +use routerify::{Router, RouterService}; use serde::Deserialize; -use std::{convert::Infallible, net::TcpListener, sync::Arc}; +use std::{net::TcpListener, sync::Arc}; // type Request = tide::Request>; @@ -42,17 +39,15 @@ impl AppState { } } -// #[async_trait] -// trait RequestExt { -// fn db(&self) -> &Arc; -// } +trait RequestExt { + fn db(&self) -> &Arc; +} -// #[async_trait] -// impl RequestExt for Request { -// fn db(&self) -> &Arc { -// &self.state().db -// } -// } +impl RequestExt for Request { + fn db(&self) -> &Arc { + &self.data::>().unwrap().db + } +} #[tokio::main] async fn main() -> Result<()> { @@ -82,16 +77,19 @@ async fn main() -> Result<()> { Ok(()) } -pub async fn run_server(state: Arc, rpc: Arc, listener: TcpListener) -> Result<()> { - let make_service = make_service_fn(|_: &AddrStream| async move { - Ok::<_, Infallible>(service_fn(|_: Request| async move { - Response::new(Body::from(format!("hello")) - })) - }); +fn router(state: Arc, peer: Arc) -> Result> { + let mut router = Router::builder().data(state); + api::add_routes(&mut router); + router.build().map_err(|error| anyhow!(error)) +} - Server::from_tcp(listener) - .expect("could not create server") - .serve(make_service); +pub async fn run_server( + state: Arc, + peer: Arc, + listener: TcpListener, +) -> Result<()> { + let service = RouterService::new(router(state, peer)?).map_err(|error| anyhow!(error))?; + Server::from_tcp(listener)?.serve(service); // let mut app = tide::with_state(state.clone()); // rpc::add_routes(&mut app, &rpc); From e30a3956d65c117f1e419741022107c0f65030be Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Sun, 24 Apr 2022 11:08:25 -0600 Subject: [PATCH 04/11] WIP: Switch to axum --- Cargo.lock | 223 ++++++++++++++++++++++++-------------- crates/collab/Cargo.toml | 3 +- crates/collab/src/api.rs | 57 +++++----- crates/collab/src/main.rs | 52 +++++---- 4 files changed, 196 insertions(+), 139 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de01c5728e9c6be4117f65e1d3d0b81830bafc13..89c04bb990cea45b3fb8419a6c1ece865b776806 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -177,7 +177,7 @@ dependencies = [ "futures-core", "futures-io", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", ] [[package]] @@ -340,7 +340,7 @@ dependencies = [ "memchr", "num_cpus", "once_cell", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "pin-utils", "slab", "wasm-bindgen-futures", @@ -385,7 +385,7 @@ dependencies = [ "futures-io", "futures-util", "log", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "tungstenite 0.16.0", ] @@ -455,6 +455,51 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "axum" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f523b4e98ba6897ae90994bc18423d9877c54f9047b06a00ddc8122a957b1c70" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes 1.0.1", + "futures-util", + "http", + "http-body", + "hyper", + "itoa 1.0.1", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite 0.2.8", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3ddbd16eabff8b45f21b98671fddcc93daaa7ac4c84f8473693437226040de5" +dependencies = [ + "async-trait", + "bytes 1.0.1", + "futures-util", + "http", + "http-body", + "mime", +] + [[package]] name = "backtrace" version = "0.3.64" @@ -528,9 +573,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitvec" @@ -877,6 +922,7 @@ dependencies = [ "anyhow", "async-trait", "async-tungstenite", + "axum", "base64 0.13.0", "client", "collections", @@ -886,7 +932,6 @@ dependencies = [ "envy", "futures", "gpui", - "hyper", "json_env_logger", "language", "lazy_static", @@ -896,7 +941,6 @@ dependencies = [ "parking_lot", "project", "rand 0.8.3", - "routerify", "rpc", "scrypt", "serde", @@ -1770,9 +1814,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -1797,9 +1841,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" +checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] name = "futures-lite" @@ -1812,17 +1856,16 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" +checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" dependencies = [ - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -1830,21 +1873,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.14" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] name = "futures-task" -version = "0.3.14" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-util" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ "futures-channel", "futures-core", @@ -1853,10 +1896,8 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] @@ -2045,25 +2086,6 @@ dependencies = [ "syn", ] -[[package]] -name = "h2" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" -dependencies = [ - "bytes 1.0.1", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "hashbrown" version = "0.9.1" @@ -2150,13 +2172,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes 1.0.1", "fnv", - "itoa 0.4.7", + "itoa 1.0.1", ] [[package]] @@ -2176,7 +2198,7 @@ checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes 1.0.1", "http", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", ] [[package]] @@ -2194,6 +2216,12 @@ dependencies = [ "log", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "http-types" version = "2.11.1" @@ -2207,7 +2235,7 @@ dependencies = [ "cookie", "futures-lite", "infer", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "rand 0.7.3", "serde", "serde_json", @@ -2218,9 +2246,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.4.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +checksum = "6330e8a36bd8c859f3fa6d9382911fbb7147ec39807f63b923933a247240b9ba" [[package]] name = "httpdate" @@ -2236,21 +2264,20 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.11" +version = "0.14.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11" +checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" dependencies = [ "bytes 1.0.1", "futures-channel", "futures-core", "futures-util", - "h2", "http", "http-body", "httparse", "httpdate", - "itoa 0.4.7", - "pin-project-lite 0.2.4", + "itoa 1.0.1", + "pin-project-lite 0.2.8", "socket2 0.4.0", "tokio", "tower-service", @@ -2723,6 +2750,12 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +[[package]] +name = "matchit" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" + [[package]] name = "maybe-uninit" version = "2.0.0" @@ -3279,9 +3312,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -3403,12 +3436,6 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" version = "1.0.36" @@ -3793,19 +3820,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "routerify" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496c1d3718081c45ba9c31fbfc07417900aa96f4070ff90dc29961836b7a9945" -dependencies = [ - "http", - "hyper", - "lazy_static", - "percent-encoding", - "regex", -] - [[package]] name = "roxmltree" version = "0.14.1" @@ -4679,7 +4693,7 @@ dependencies = [ "log", "mime_guess", "once_cell", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "serde", "serde_json", "web-sys", @@ -4731,6 +4745,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "synstructure" version = "0.12.4" @@ -5010,7 +5030,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "signal-hook-registry", "tokio-macros", "winapi 0.3.9", @@ -5048,9 +5068,8 @@ dependencies = [ "bytes 1.0.1", "futures-core", "futures-sink", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "tokio", - "tracing", ] [[package]] @@ -5062,6 +5081,48 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite 0.2.8", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8" +dependencies = [ + "bitflags", + "bytes 1.0.1", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite 0.2.8", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.1" @@ -5076,7 +5137,7 @@ checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.4", + "pin-project-lite 0.2.8", "tracing-attributes", "tracing-core", ] diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index c3a5f0119c3ee63a82f3e187c9ff8b0865306a5c..1b44d1228b1a99f115a6e2bfea02472003308583 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -20,17 +20,16 @@ util = { path = "../util" } anyhow = "1.0.40" async-trait = "0.1.50" async-tungstenite = "0.16" +axum = "0.5" base64 = "0.13" envy = "0.4.2" env_logger = "0.8" futures = "0.3" -hyper = { version = "0.14", features = ["full"] } json_env_logger = "0.1" lipsum = { version = "0.8", optional = true } log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" rand = "0.8" -routerify = "3.0" scrypt = "0.7" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index f54d74ce9357fc3ed98cc68a362a20ee25e120fe..3bb231e0f36220c7263abd37ddd499c42da8de64 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -1,22 +1,20 @@ // use crate::{auth, db::UserId, AppState, Request, RequestExt as _}; -use async_trait::async_trait; -use hyper::{ - header::{CONTENT_LENGTH, CONTENT_TYPE}, - Body, Request, Response, -}; -use routerify::prelude::*; - use anyhow::Result; -use routerify::RouterBuilder; +use axum::{ + body::Body, + http::{Request, Response, StatusCode}, + routing::get, + Router, +}; use serde::Deserialize; use serde_json::json; use std::sync::Arc; -use crate::{AppState, RequestExt}; +use crate::AppState; // use surf::StatusCode; -pub fn add_routes(router: &mut RouterBuilder) { - router.get("/users", get_users); +pub fn add_routes(router: Router) -> Router { + router.route("/users", get(get_users)) } // pub fn add_routes(app: &mut tide::Server>) { @@ -29,6 +27,25 @@ pub fn add_routes(router: &mut RouterBuilder) { // .post(create_access_token); // } +async fn get_users(request: Request) -> Result, (StatusCode, String)> { + // request.require_token().await?; + + // let users = request.db().get_all_users().await?; + + // Body::from + + // let body = "Hello World"; + // Ok(Response::builder() + // .header(CONTENT_LENGTH, body.len() as u64) + // .header(CONTENT_TYPE, "text/plain") + // .body(Body::from(body))?) + + // Ok(tide::Response::builder(StatusCode::Ok) + // .body(tide::Body::from_json(&users)?) + // .build()) + todo!() +} + // async fn get_user(request: Request) -> tide::Result { // request.require_token().await?; @@ -43,24 +60,6 @@ pub fn add_routes(router: &mut RouterBuilder) { // .build()) // } -async fn get_users(request: Request) -> Result> { - // request.require_token().await?; - - let users = request.db().get_all_users().await?; - - // Body::from - - let body = "Hello World"; - Ok(Response::builder() - .header(CONTENT_LENGTH, body.len() as u64) - .header(CONTENT_TYPE, "text/plain") - .body(Body::from(body))?) - - // Ok(tide::Response::builder(StatusCode::Ok) - // .body(tide::Body::from_json(&users)?) - // .build()) -} - // async fn create_user(mut request: Request) -> tide::Result { // request.require_token().await?; diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 7407d5f55d52ac60c2b11bd3ff1acc0cf389c8af..b7737fd17c0953d2b634b1fe1b17b4ebd50dc61e 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -5,11 +5,10 @@ mod env; mod rpc; use ::rpc::Peer; -use anyhow::{anyhow, Result}; +use anyhow::Result; +use axum::{body::Body, http::StatusCode, Router}; use db::{Db, PostgresDb}; -use hyper::{Body, Request, Server}; -use routerify::ext::RequestExt as _; -use routerify::{Router, RouterService}; + use serde::Deserialize; use std::{net::TcpListener, sync::Arc}; @@ -39,15 +38,15 @@ impl AppState { } } -trait RequestExt { - fn db(&self) -> &Arc; -} +// trait RequestExt { +// fn db(&self) -> &Arc; +// } -impl RequestExt for Request { - fn db(&self) -> &Arc { - &self.data::>().unwrap().db - } -} +// impl RequestExt for Request { +// fn db(&self) -> &Arc { +// &self.data::>().unwrap().db +// } +// } #[tokio::main] async fn main() -> Result<()> { @@ -77,10 +76,11 @@ async fn main() -> Result<()> { Ok(()) } -fn router(state: Arc, peer: Arc) -> Result> { - let mut router = Router::builder().data(state); - api::add_routes(&mut router); - router.build().map_err(|error| anyhow!(error)) +async fn handle_anyhow_error(err: anyhow::Error) -> (StatusCode, String) { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Something went wrong: {}", err), + ) } pub async fn run_server( @@ -88,19 +88,17 @@ pub async fn run_server( peer: Arc, listener: TcpListener, ) -> Result<()> { - let service = RouterService::new(router(state, peer)?).map_err(|error| anyhow!(error))?; - Server::from_tcp(listener)?.serve(service); - - // let mut app = tide::with_state(state.clone()); - // rpc::add_routes(&mut app, &rpc); - - // let mut web = tide::with_state(state.clone()); - // web.with(CompressMiddleware::new()); - // api::add_routes(&mut web); + let app = Router::::new(); + // TODO: Assign app state to request somehow + // TODO: Compression on API routes? + // TODO: Authenticate API routes. - // app.at("/").nest(web); + let app = api::add_routes(app); + // TODO: Add rpc routes - // app.listen(listener).await?; + axum::Server::from_tcp(listener)? + .serve(app.into_make_service()) + .await?; Ok(()) } From cb9d608e536e4af9d1ef74b87bf171515c6576dc Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Sun, 24 Apr 2022 18:02:14 -0600 Subject: [PATCH 05/11] WIP Continue adding in more API routes --- Cargo.lock | 1 + crates/collab/Cargo.toml | 3 +- crates/collab/src/api.rs | 125 +++++++++++++++++--------------------- crates/collab/src/main.rs | 44 ++++++++++---- 4 files changed, 93 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 89c04bb990cea45b3fb8419a6c1ece865b776806..0ef11676ebcbb58fc36860bd4fa317cfb3919ec4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -953,6 +953,7 @@ dependencies = [ "tokio", "tokio-tungstenite", "toml", + "tower", "util", "workspace", ] diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 1b44d1228b1a99f115a6e2bfea02472003308583..8489cc2be64b194b22b6ec1311abf430deace0cf 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -20,7 +20,7 @@ util = { path = "../util" } anyhow = "1.0.40" async-trait = "0.1.50" async-tungstenite = "0.16" -axum = "0.5" +axum = { version = "0.5", features = ["json"] } base64 = "0.13" envy = "0.4.2" env_logger = "0.8" @@ -36,6 +36,7 @@ serde_json = "1.0" sha-1 = "0.9" tokio = { version = "1", features = ["full"] } tokio-tungstenite = "0.17" +tower = "0.4" time = "0.2" toml = "0.5.8" diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index 3bb231e0f36220c7263abd37ddd499c42da8de64..ffb25c39daefd8833b826470500a568d97e8498f 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -1,20 +1,33 @@ -// use crate::{auth, db::UserId, AppState, Request, RequestExt as _}; -use anyhow::Result; +use crate::{ + db::{Db, User, UserId}, + AppState, Result, +}; +use anyhow::anyhow; use axum::{ body::Body, - http::{Request, Response, StatusCode}, - routing::get, - Router, + extract::Path, + http::{Request, StatusCode}, + response::{IntoResponse, Response}, + routing::{get, put}, + Json, Router, }; use serde::Deserialize; -use serde_json::json; use std::sync::Arc; -use crate::AppState; -// use surf::StatusCode; - -pub fn add_routes(router: Router) -> Router { - router.route("/users", get(get_users)) +pub fn add_routes(router: Router, app: Arc) -> Router { + router + .route("/users", { + let app = app.clone(); + get(move |req| get_users(req, app)) + }) + .route("/users", { + let app = app.clone(); + get(move |params| create_user(params, app)) + }) + .route("/users/:id", { + let app = app.clone(); + put(move |user_id, params| update_user(user_id, params, app)) + }) } // pub fn add_routes(app: &mut tide::Server>) { @@ -27,65 +40,48 @@ pub fn add_routes(router: Router) -> Router { // .post(create_access_token); // } -async fn get_users(request: Request) -> Result, (StatusCode, String)> { +async fn get_users(request: Request, app: Arc) -> Result>> { // request.require_token().await?; - // let users = request.db().get_all_users().await?; - - // Body::from - - // let body = "Hello World"; - // Ok(Response::builder() - // .header(CONTENT_LENGTH, body.len() as u64) - // .header(CONTENT_TYPE, "text/plain") - // .body(Body::from(body))?) - - // Ok(tide::Response::builder(StatusCode::Ok) - // .body(tide::Body::from_json(&users)?) - // .build()) - todo!() + let users = app.db.get_all_users().await?; + Ok(Json(users)) } -// async fn get_user(request: Request) -> tide::Result { -// request.require_token().await?; - -// let user = request -// .db() -// .get_user_by_github_login(request.param("github_login")?) -// .await? -// .ok_or_else(|| surf::Error::from_str(404, "user not found"))?; - -// Ok(tide::Response::builder(StatusCode::Ok) -// .body(tide::Body::from_json(&user)?) -// .build()) -// } +#[derive(Deserialize)] +struct CreateUser { + github_login: String, + admin: bool, +} -// async fn create_user(mut request: Request) -> tide::Result { -// request.require_token().await?; +async fn create_user(Json(params): Json, app: Arc) -> Result> { + let user_id = app + .db + .create_user(¶ms.github_login, params.admin) + .await?; -// #[derive(Deserialize)] -// struct Params { -// github_login: String, -// admin: bool, -// } -// let params = request.body_json::().await?; + let user = app + .db + .get_user_by_id(user_id) + .await? + .ok_or_else(|| anyhow!("couldn't find the user we just created"))?; -// let user_id = request -// .db() -// .create_user(¶ms.github_login, params.admin) -// .await?; + Ok(Json(user)) +} -// let user = request.db().get_user_by_id(user_id).await?.ok_or_else(|| { -// surf::Error::from_str( -// StatusCode::InternalServerError, -// "couldn't find the user we just created", -// ) -// })?; +#[derive(Deserialize)] +struct UpdateUser { + admin: bool, +} -// Ok(tide::Response::builder(StatusCode::Ok) -// .body(tide::Body::from_json(&user)?) -// .build()) -// } +async fn update_user( + Path(user_id): Path, + Json(params): Json, + app: Arc, +) -> Result { + let user_id = UserId(user_id); + app.db.set_user_is_admin(user_id, params.admin).await?; + Ok(()) +} // async fn update_user(mut request: Request) -> tide::Result { // request.require_token().await?; @@ -94,13 +90,6 @@ async fn get_users(request: Request) -> Result, (StatusCode // struct Params { // admin: bool, // } -// let user_id = UserId( -// request -// .param("id")? -// .parse::() -// .map_err(|error| surf::Error::from_str(StatusCode::BadRequest, error.to_string()))?, -// ); -// let params = request.body_json::().await?; // request // .db() diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index b7737fd17c0953d2b634b1fe1b17b4ebd50dc61e..6cd264074be590859dfa39c05c082827ff6e7a3c 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -5,8 +5,7 @@ mod env; mod rpc; use ::rpc::Peer; -use anyhow::Result; -use axum::{body::Body, http::StatusCode, Router}; +use axum::{body::Body, http::StatusCode, response::IntoResponse, Router}; use db::{Db, PostgresDb}; use serde::Deserialize; @@ -76,24 +75,16 @@ async fn main() -> Result<()> { Ok(()) } -async fn handle_anyhow_error(err: anyhow::Error) -> (StatusCode, String) { - ( - StatusCode::INTERNAL_SERVER_ERROR, - format!("Something went wrong: {}", err), - ) -} - pub async fn run_server( state: Arc, peer: Arc, listener: TcpListener, ) -> Result<()> { let app = Router::::new(); - // TODO: Assign app state to request somehow // TODO: Compression on API routes? // TODO: Authenticate API routes. - let app = api::add_routes(app); + let app = api::add_routes(app, state); // TODO: Add rpc routes axum::Server::from_tcp(listener)? @@ -102,3 +93,34 @@ pub async fn run_server( Ok(()) } + +type Result = std::result::Result; + +struct Error(anyhow::Error); + +impl From for Error +where + E: Into, +{ + fn from(error: E) -> Self { + Self(error.into()) + } +} + +impl IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &self.0)).into_response() + } +} + +impl std::fmt::Debug for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} From 35bec69fa473ceea1e38c5549eb0b1f523380ab4 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 25 Apr 2022 17:51:13 -0600 Subject: [PATCH 06/11] Finish adding API routes We haven't tested them yet. Co-Authored-By: Max Brunsfeld --- crates/collab/src/api.rs | 201 ++++++++++++++++++-------------------- crates/collab/src/auth.rs | 96 +++++++++--------- crates/collab/src/main.rs | 26 +++-- 3 files changed, 159 insertions(+), 164 deletions(-) diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index ffb25c39daefd8833b826470500a568d97e8498f..80f2682f4a9ba458d853819cfa0e2fdadfb0d99a 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -1,59 +1,62 @@ use crate::{ - db::{Db, User, UserId}, - AppState, Result, + auth, + db::{User, UserId}, + AppState, Error, Result, }; use anyhow::anyhow; use axum::{ body::Body, - extract::Path, - http::{Request, StatusCode}, - response::{IntoResponse, Response}, - routing::{get, put}, + extract::{Path, Query}, + http::StatusCode, + routing::{delete, get, post, put}, Json, Router, }; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use std::sync::Arc; pub fn add_routes(router: Router, app: Arc) -> Router { router .route("/users", { let app = app.clone(); - get(move |req| get_users(req, app)) + get(move || get_users(app)) }) .route("/users", { let app = app.clone(); - get(move |params| create_user(params, app)) + post(move |params| create_user(params, app)) }) .route("/users/:id", { let app = app.clone(); put(move |user_id, params| update_user(user_id, params, app)) }) + .route("/users/:id", { + let app = app.clone(); + delete(move |user_id| destroy_user(user_id, app)) + }) + .route("/users/:github_login", { + let app = app.clone(); + get(move |github_login| get_user(github_login, app)) + }) + .route("/users/:github_login/access_tokens", { + let app = app.clone(); + post(move |github_login, params| create_access_token(github_login, params, app)) + }) } -// pub fn add_routes(app: &mut tide::Server>) { -// app.at("/users").get(get_users); -// app.at("/users").post(create_user); -// app.at("/users/:id").put(update_user); -// app.at("/users/:id").delete(destroy_user); -// app.at("/users/:github_login").get(get_user); -// app.at("/users/:github_login/access_tokens") -// .post(create_access_token); -// } - -async fn get_users(request: Request, app: Arc) -> Result>> { - // request.require_token().await?; - +async fn get_users(app: Arc) -> Result>> { let users = app.db.get_all_users().await?; Ok(Json(users)) } #[derive(Deserialize)] -struct CreateUser { +struct CreateUserParams { github_login: String, admin: bool, } -async fn create_user(Json(params): Json, app: Arc) -> Result> { +async fn create_user( + Json(params): Json, + app: Arc, +) -> Result> { let user_id = app .db .create_user(¶ms.github_login, params.admin) @@ -69,102 +72,88 @@ async fn create_user(Json(params): Json, app: Arc) -> Resu } #[derive(Deserialize)] -struct UpdateUser { +struct UpdateUserParams { admin: bool, } async fn update_user( Path(user_id): Path, - Json(params): Json, + Json(params): Json, app: Arc, -) -> Result { - let user_id = UserId(user_id); - app.db.set_user_is_admin(user_id, params.admin).await?; +) -> Result<()> { + app.db + .set_user_is_admin(UserId(user_id), params.admin) + .await?; Ok(()) } -// async fn update_user(mut request: Request) -> tide::Result { -// request.require_token().await?; - -// #[derive(Deserialize)] -// struct Params { -// admin: bool, -// } - -// request -// .db() -// .set_user_is_admin(user_id, params.admin) -// .await?; - -// Ok(tide::Response::builder(StatusCode::Ok).build()) -// } - -// async fn destroy_user(request: Request) -> tide::Result { -// request.require_token().await?; -// let user_id = UserId( -// request -// .param("id")? -// .parse::() -// .map_err(|error| surf::Error::from_str(StatusCode::BadRequest, error.to_string()))?, -// ); - -// request.db().destroy_user(user_id).await?; - -// Ok(tide::Response::builder(StatusCode::Ok).build()) -// } - -// async fn create_access_token(request: Request) -> tide::Result { -// request.require_token().await?; +async fn destroy_user(Path(user_id): Path, app: Arc) -> Result<()> { + app.db.destroy_user(UserId(user_id)).await?; + Ok(()) +} -// let user = request -// .db() -// .get_user_by_github_login(request.param("github_login")?) -// .await? -// .ok_or_else(|| surf::Error::from_str(StatusCode::NotFound, "user not found"))?; +async fn get_user(Path(login): Path, app: Arc) -> Result> { + let user = app + .db + .get_user_by_github_login(&login) + .await? + .ok_or_else(|| anyhow!("user not found"))?; + Ok(Json(user)) +} -// #[derive(Deserialize)] -// struct QueryParams { -// public_key: String, -// impersonate: Option, -// } +#[derive(Deserialize)] +struct CreateAccessTokenQueryParams { + public_key: String, + impersonate: Option, +} -// let query_params: QueryParams = request.query().map_err(|_| { -// surf::Error::from_str(StatusCode::UnprocessableEntity, "invalid query params") -// })?; - -// let mut user_id = user.id; -// if let Some(impersonate) = query_params.impersonate { -// if user.admin { -// if let Some(impersonated_user) = -// request.db().get_user_by_github_login(&impersonate).await? -// { -// user_id = impersonated_user.id; -// } else { -// return Ok(tide::Response::builder(StatusCode::UnprocessableEntity) -// .body(format!( -// "Can't impersonate non-existent user {}", -// impersonate -// )) -// .build()); -// } -// } else { -// return Ok(tide::Response::builder(StatusCode::Unauthorized) -// .body(format!( -// "Can't impersonate user {} because the real user isn't an admin", -// impersonate -// )) -// .build()); -// } -// } +#[derive(Serialize)] +struct CreateAccessTokenResponse { + user_id: UserId, + encrypted_access_token: String, +} -// let access_token = auth::create_access_token(request.db().as_ref(), user_id).await?; -// let encrypted_access_token = -// auth::encrypt_access_token(&access_token, query_params.public_key.clone())?; +async fn create_access_token( + Path(login): Path, + Query(params): Query, + app: Arc, +) -> Result> { + // request.require_token().await?; -// Ok(tide::Response::builder(StatusCode::Ok) -// .body(json!({"user_id": user_id, "encrypted_access_token": encrypted_access_token})) -// .build()) -// } + let user = app + .db + .get_user_by_github_login(&login) + .await? + .ok_or_else(|| anyhow!("user not found"))?; + + let mut user_id = user.id; + if let Some(impersonate) = params.impersonate { + if user.admin { + if let Some(impersonated_user) = app.db.get_user_by_github_login(&impersonate).await? { + user_id = impersonated_user.id; + } else { + return Err(Error::Http( + StatusCode::UNPROCESSABLE_ENTITY, + format!("user {impersonate} does not exist"), + )); + } + } else { + return Err(Error::Http( + StatusCode::UNAUTHORIZED, + format!("you do not have permission to impersonate other users"), + )); + } + } + + let access_token = auth::create_access_token(app.db.as_ref(), user_id).await?; + let encrypted_access_token = + auth::encrypt_access_token(&access_token, params.public_key.clone())?; + + Ok(Json(CreateAccessTokenResponse { + user_id, + encrypted_access_token, + })) +} // #[async_trait] // pub trait RequestExt { diff --git a/crates/collab/src/auth.rs b/crates/collab/src/auth.rs index 9bbd9496416c3917ac39488aa42be812a488258a..4fb31749e887c83a7bd5d3d67f1c0f642fdce723 100644 --- a/crates/collab/src/auth.rs +++ b/crates/collab/src/auth.rs @@ -1,18 +1,10 @@ -// use super::{ -// db::{self, UserId}, -// errors::TideResultExt, -// }; -// use crate::Request; -// use anyhow::{anyhow, Context}; -// use rand::thread_rng; -// use rpc::auth as zed_auth; -// use scrypt::{ -// password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, -// Scrypt, -// }; -// use std::convert::TryFrom; -// use surf::StatusCode; -// use tide::Error; +use super::db::{self, UserId}; +use anyhow::{Context, Result}; +use rand::thread_rng; +use scrypt::{ + password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, + Scrypt, +}; // pub async fn process_auth_header(request: &Request) -> tide::Result { // let mut auth_header = request @@ -58,45 +50,45 @@ // Ok(user_id) // } -// const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; +const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; -// pub async fn create_access_token(db: &dyn db::Db, user_id: UserId) -> tide::Result { -// let access_token = zed_auth::random_token(); -// let access_token_hash = -// hash_access_token(&access_token).context("failed to hash access token")?; -// db.create_access_token_hash(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE) -// .await?; -// Ok(access_token) -// } +pub async fn create_access_token(db: &dyn db::Db, user_id: UserId) -> Result { + let access_token = rpc::auth::random_token(); + let access_token_hash = + hash_access_token(&access_token).context("failed to hash access token")?; + db.create_access_token_hash(user_id, &access_token_hash, MAX_ACCESS_TOKENS_TO_STORE) + .await?; + Ok(access_token) +} -// fn hash_access_token(token: &str) -> tide::Result { -// // Avoid slow hashing in debug mode. -// let params = if cfg!(debug_assertions) { -// scrypt::Params::new(1, 1, 1).unwrap() -// } else { -// scrypt::Params::recommended() -// }; +fn hash_access_token(token: &str) -> Result { + // Avoid slow hashing in debug mode. + let params = if cfg!(debug_assertions) { + scrypt::Params::new(1, 1, 1).unwrap() + } else { + scrypt::Params::recommended() + }; -// Ok(Scrypt -// .hash_password( -// token.as_bytes(), -// None, -// params, -// &SaltString::generate(thread_rng()), -// )? -// .to_string()) -// } + Ok(Scrypt + .hash_password( + token.as_bytes(), + None, + params, + &SaltString::generate(thread_rng()), + )? + .to_string()) +} -// pub fn encrypt_access_token(access_token: &str, public_key: String) -> tide::Result { -// let native_app_public_key = -// zed_auth::PublicKey::try_from(public_key).context("failed to parse app public key")?; -// let encrypted_access_token = native_app_public_key -// .encrypt_string(&access_token) -// .context("failed to encrypt access token with public key")?; -// Ok(encrypted_access_token) -// } +pub fn encrypt_access_token(access_token: &str, public_key: String) -> Result { + let native_app_public_key = + rpc::auth::PublicKey::try_from(public_key).context("failed to parse app public key")?; + let encrypted_access_token = native_app_public_key + .encrypt_string(&access_token) + .context("failed to encrypt access token with public key")?; + Ok(encrypted_access_token) +} -// pub fn verify_access_token(token: &str, hash: &str) -> tide::Result { -// let hash = PasswordHash::new(hash)?; -// Ok(Scrypt.verify_password(token.as_bytes(), &hash).is_ok()) -// } +pub fn verify_access_token(token: &str, hash: &str) -> Result { + let hash = PasswordHash::new(hash)?; + Ok(Scrypt.verify_password(token.as_bytes(), &hash).is_ok()) +} diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 6cd264074be590859dfa39c05c082827ff6e7a3c..c0ea6ba77c316cae8b8d1e1ea4d7bca7404f8e5c 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -94,33 +94,47 @@ pub async fn run_server( Ok(()) } -type Result = std::result::Result; +pub type Result = std::result::Result; -struct Error(anyhow::Error); +pub enum Error { + Http(StatusCode, String), + Internal(anyhow::Error), +} impl From for Error where E: Into, { fn from(error: E) -> Self { - Self(error.into()) + Self::Internal(error.into()) } } impl IntoResponse for Error { fn into_response(self) -> axum::response::Response { - (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &self.0)).into_response() + match self { + Error::Http(code, message) => (code, message).into_response(), + Error::Internal(error) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("{}", &error)).into_response() + } + } } } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.0.fmt(f) + match self { + Error::Http(code, message) => (code, message).fmt(f), + Error::Internal(error) => error.fmt(f), + } } } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.0.fmt(f) + match self { + Error::Http(code, message) => write!(f, "{code}: {message}"), + Error::Internal(error) => error.fmt(f), + } } } From 538fc23a77cc306657ee7221c9a516707f474f31 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 25 Apr 2022 20:05:09 -0600 Subject: [PATCH 07/11] WIP --- crates/collab/src/api.rs | 79 +++++++++++++++++++--------------- crates/collab/src/auth.rs | 90 ++++++++++++++++++++++----------------- crates/collab/src/main.rs | 23 +++------- 3 files changed, 99 insertions(+), 93 deletions(-) diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index 80f2682f4a9ba458d853819cfa0e2fdadfb0d99a..180066907e8c3a53bf1f198ab4c7673e7454971b 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -7,42 +7,45 @@ use anyhow::anyhow; use axum::{ body::Body, extract::{Path, Query}, - http::StatusCode, - routing::{delete, get, post, put}, - Json, Router, + http::{self, Request, StatusCode}, + middleware::{self, Next}, + response::IntoResponse, + routing::{get, post, put}, + Extension, Json, Router, }; use serde::{Deserialize, Serialize}; use std::sync::Arc; +use tower::ServiceBuilder; + +pub fn routes(state: Arc) -> Router { + Router::new() + .route("/users", get(get_users).post(create_user)) + .route("/users/:id", put(update_user).delete(destroy_user)) + .route("/users/:gh_login", get(get_user)) + .route("/users/:gh_login/access_tokens", post(create_access_token)) + .layer( + ServiceBuilder::new() + .layer(Extension(state)) + .layer(middleware::from_fn(validate_api_token)), + ) +} -pub fn add_routes(router: Router, app: Arc) -> Router { - router - .route("/users", { - let app = app.clone(); - get(move || get_users(app)) - }) - .route("/users", { - let app = app.clone(); - post(move |params| create_user(params, app)) - }) - .route("/users/:id", { - let app = app.clone(); - put(move |user_id, params| update_user(user_id, params, app)) - }) - .route("/users/:id", { - let app = app.clone(); - delete(move |user_id| destroy_user(user_id, app)) - }) - .route("/users/:github_login", { - let app = app.clone(); - get(move |github_login| get_user(github_login, app)) - }) - .route("/users/:github_login/access_tokens", { - let app = app.clone(); - post(move |github_login, params| create_access_token(github_login, params, app)) - }) +pub async fn validate_api_token(req: Request, next: Next) -> impl IntoResponse { + let mut auth_header = req + .headers() + .get(http::header::AUTHORIZATION) + .and_then(|header| header.to_str().ok()) + .ok_or_else(|| { + Error::Http( + StatusCode::BAD_REQUEST, + "missing authorization header".to_string(), + ) + })?; + + Ok::<_, Error>(next.run(req).await) } -async fn get_users(app: Arc) -> Result>> { +async fn get_users(Extension(app): Extension>) -> Result>> { let users = app.db.get_all_users().await?; Ok(Json(users)) } @@ -55,7 +58,7 @@ struct CreateUserParams { async fn create_user( Json(params): Json, - app: Arc, + Extension(app): Extension>, ) -> Result> { let user_id = app .db @@ -79,7 +82,7 @@ struct UpdateUserParams { async fn update_user( Path(user_id): Path, Json(params): Json, - app: Arc, + Extension(app): Extension>, ) -> Result<()> { app.db .set_user_is_admin(UserId(user_id), params.admin) @@ -87,12 +90,18 @@ async fn update_user( Ok(()) } -async fn destroy_user(Path(user_id): Path, app: Arc) -> Result<()> { +async fn destroy_user( + Path(user_id): Path, + Extension(app): Extension>, +) -> Result<()> { app.db.destroy_user(UserId(user_id)).await?; Ok(()) } -async fn get_user(Path(login): Path, app: Arc) -> Result> { +async fn get_user( + Path(login): Path, + Extension(app): Extension>, +) -> Result> { let user = app .db .get_user_by_github_login(&login) @@ -116,7 +125,7 @@ struct CreateAccessTokenResponse { async fn create_access_token( Path(login): Path, Query(params): Query, - app: Arc, + Extension(app): Extension>, ) -> Result> { // request.require_token().await?; diff --git a/crates/collab/src/auth.rs b/crates/collab/src/auth.rs index 4fb31749e887c83a7bd5d3d67f1c0f642fdce723..39ae919a69e7eef8403f811cd75fb08fdfe6da50 100644 --- a/crates/collab/src/auth.rs +++ b/crates/collab/src/auth.rs @@ -1,54 +1,64 @@ +use std::sync::Arc; + use super::db::{self, UserId}; +use crate::{AppState, Error}; use anyhow::{Context, Result}; +use axum::{ + http::{self, Request, StatusCode}, + middleware::Next, + response::IntoResponse, +}; use rand::thread_rng; use scrypt::{ password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, Scrypt, }; -// pub async fn process_auth_header(request: &Request) -> tide::Result { -// let mut auth_header = request -// .header("Authorization") -// .ok_or_else(|| { -// Error::new( -// StatusCode::BadRequest, -// anyhow!("missing authorization header"), -// ) -// })? -// .last() -// .as_str() -// .split_whitespace(); -// let user_id = UserId(auth_header.next().unwrap_or("").parse().map_err(|_| { -// Error::new( -// StatusCode::BadRequest, -// anyhow!("missing user id in authorization header"), -// ) -// })?); -// let access_token = auth_header.next().ok_or_else(|| { -// Error::new( -// StatusCode::BadRequest, -// anyhow!("missing access token in authorization header"), -// ) -// })?; +pub async fn validate_header(req: Request, next: Next) -> impl IntoResponse { + let mut auth_header = req + .headers() + .get(http::header::AUTHORIZATION) + .and_then(|header| header.to_str().ok()) + .ok_or_else(|| { + Error::Http( + StatusCode::BAD_REQUEST, + "missing authorization header".to_string(), + ) + })? + .split_whitespace(); + + let user_id = UserId(auth_header.next().unwrap_or("").parse().map_err(|_| { + Error::Http( + StatusCode::BAD_REQUEST, + "missing user id in authorization header".to_string(), + ) + })?); -// let state = request.state().clone(); -// let mut credentials_valid = false; -// for password_hash in state.db.get_access_token_hashes(user_id).await? { -// if verify_access_token(&access_token, &password_hash)? { -// credentials_valid = true; -// break; -// } -// } + let access_token = auth_header.next().ok_or_else(|| { + Error::Http( + StatusCode::BAD_REQUEST, + "missing access token in authorization header".to_string(), + ) + })?; -// if !credentials_valid { -// Err(Error::new( -// StatusCode::Unauthorized, -// anyhow!("invalid credentials"), -// ))?; -// } + let state = req.extensions().get::>().unwrap(); + let mut credentials_valid = false; + for password_hash in state.db.get_access_token_hashes(user_id).await? { + if verify_access_token(&access_token, &password_hash)? { + credentials_valid = true; + break; + } + } -// Ok(user_id) -// } + if !credentials_valid { + Err(Error::Http( + StatusCode::UNAUTHORIZED, + "invalid credentials".to_string(), + ))?; + } + + Ok::<_, Error>(next.run(req).await) +} const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index c0ea6ba77c316cae8b8d1e1ea4d7bca7404f8e5c..b3da6df4a8a569cbe28670779e8e3ae6cda8e294 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -11,8 +11,6 @@ use db::{Db, PostgresDb}; use serde::Deserialize; use std::{net::TcpListener, sync::Arc}; -// type Request = tide::Request>; - #[derive(Default, Deserialize)] pub struct Config { pub http_port: u16, @@ -22,31 +20,20 @@ pub struct Config { pub struct AppState { db: Arc, - config: Config, + api_token: String, } impl AppState { async fn new(config: Config) -> Result> { let db = PostgresDb::new(&config.database_url, 5).await?; - let this = Self { db: Arc::new(db), - config, + api_token: config.api_token.clone(), }; Ok(Arc::new(this)) } } -// trait RequestExt { -// fn db(&self) -> &Arc; -// } - -// impl RequestExt for Request { -// fn db(&self) -> &Arc { -// &self.data::>().unwrap().db -// } -// } - #[tokio::main] async fn main() -> Result<()> { if std::env::var("LOG_JSON").is_ok() { @@ -68,7 +55,7 @@ async fn main() -> Result<()> { run_server( state.clone(), rpc, - TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port)) + TcpListener::bind(&format!("0.0.0.0:{}", config.http_port)) .expect("failed to bind TCP listener"), ) .await?; @@ -80,11 +67,11 @@ pub async fn run_server( peer: Arc, listener: TcpListener, ) -> Result<()> { - let app = Router::::new(); // TODO: Compression on API routes? // TODO: Authenticate API routes. - let app = api::add_routes(app, state); + let app = Router::::new().merge(api::routes(state.clone())); + // TODO: Add rpc routes axum::Server::from_tcp(listener)? From 2bd08a7b3f6523e41e60204e08fced22b3d9af84 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 25 Apr 2022 20:10:14 -0600 Subject: [PATCH 08/11] Validate API token for all API routes --- crates/collab/src/api.rs | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index 180066907e8c3a53bf1f198ab4c7673e7454971b..6cdd28044c48a1bc53a5baeb62ae08d5f422dd9a 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -31,7 +31,7 @@ pub fn routes(state: Arc) -> Router { } pub async fn validate_api_token(req: Request, next: Next) -> impl IntoResponse { - let mut auth_header = req + let token = req .headers() .get(http::header::AUTHORIZATION) .and_then(|header| header.to_str().ok()) @@ -40,8 +40,24 @@ pub async fn validate_api_token(req: Request, next: Next) -> impl IntoR StatusCode::BAD_REQUEST, "missing authorization header".to_string(), ) + })? + .strip_prefix("token ") + .ok_or_else(|| { + Error::Http( + StatusCode::BAD_REQUEST, + "invalid authorization header".to_string(), + ) })?; + let state = req.extensions().get::>().unwrap(); + + if token != state.api_token { + Err(Error::Http( + StatusCode::UNAUTHORIZED, + "invalid authorization token".to_string(), + ))? + } + Ok::<_, Error>(next.run(req).await) } @@ -163,25 +179,3 @@ async fn create_access_token( encrypted_access_token, })) } - -// #[async_trait] -// pub trait RequestExt { -// async fn require_token(&self) -> tide::Result<()>; -// } - -// #[async_trait] -// impl RequestExt for Request { -// async fn require_token(&self) -> tide::Result<()> { -// let token = self -// .header("Authorization") -// .and_then(|header| header.get(0)) -// .and_then(|header| header.as_str().strip_prefix("token ")) -// .ok_or_else(|| surf::Error::from_str(403, "invalid authorization header"))?; - -// if token == self.state().config.api_token { -// Ok(()) -// } else { -// Err(tide::Error::from_str(403, "invalid authorization token")) -// } -// } -// } From 3938f7c364ff0c9bf416da3a25c39116e7f6b00f Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 25 Apr 2022 20:12:32 -0600 Subject: [PATCH 09/11] Fix compile error --- crates/collab/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index b3da6df4a8a569cbe28670779e8e3ae6cda8e294..5fea3699e9d6b73952e50e642dc93dc8308cbf39 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -24,7 +24,7 @@ pub struct AppState { } impl AppState { - async fn new(config: Config) -> Result> { + async fn new(config: &Config) -> Result> { let db = PostgresDb::new(&config.database_url, 5).await?; let this = Self { db: Arc::new(db), @@ -50,7 +50,7 @@ async fn main() -> Result<()> { } let config = envy::from_env::().expect("error loading config"); - let state = AppState::new(config).await?; + let state = AppState::new(&config).await?; let rpc = Peer::new(); run_server( state.clone(), From be040b60b73e5be9f15a2eaf2fb69436529dab37 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 25 Apr 2022 20:21:43 -0600 Subject: [PATCH 10/11] WIP --- crates/collab/src/api.rs | 1 + crates/collab/src/main.rs | 25 +++++-------------------- crates/collab/src/rpc.rs | 7 +++++++ 3 files changed, 13 insertions(+), 20 deletions(-) diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index 6cdd28044c48a1bc53a5baeb62ae08d5f422dd9a..cea51ce2aab7137e4983cc8c490da860f60b61da 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -28,6 +28,7 @@ pub fn routes(state: Arc) -> Router { .layer(Extension(state)) .layer(middleware::from_fn(validate_api_token)), ) + // TODO: Compression on API routes? } pub async fn validate_api_token(req: Request, next: Next) -> impl IntoResponse { diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 5fea3699e9d6b73952e50e642dc93dc8308cbf39..b19cae1959b6153a8a8eadef2ce57dd2bc7286dd 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -51,28 +51,13 @@ async fn main() -> Result<()> { let config = envy::from_env::().expect("error loading config"); let state = AppState::new(&config).await?; - let rpc = Peer::new(); - run_server( - state.clone(), - rpc, - TcpListener::bind(&format!("0.0.0.0:{}", config.http_port)) - .expect("failed to bind TCP listener"), - ) - .await?; - Ok(()) -} - -pub async fn run_server( - state: Arc, - peer: Arc, - listener: TcpListener, -) -> Result<()> { - // TODO: Compression on API routes? - // TODO: Authenticate API routes. - let app = Router::::new().merge(api::routes(state.clone())); + let listener = TcpListener::bind(&format!("0.0.0.0:{}", config.http_port)) + .expect("failed to bind TCP listener"); - // TODO: Add rpc routes + let app = Router::::new() + .merge(api::routes(state)) + .merge(rpc::routes(Peer::new())); axum::Server::from_tcp(listener)? .serve(app.into_make_service()) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 1cc163d633fe690f3fd0b6a6212e019847895b94..cbde3adcbb3357f386c0f90eda1811358d83f163 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1099,6 +1099,10 @@ // } // } +pub fn routes(peer: Arc) -> Router { + Router::new() +} + // pub fn add_routes(app: &mut tide::Server>, rpc: &Arc) { // let server = Server::new(app.state().clone(), rpc.clone(), None); // app.at("/rpc").get(move |request: Request>| { @@ -6360,3 +6364,6 @@ // } // } // } + +use axum::{body::Body, Router}; +use client::Peer; From 2adb9fe472322b6a47ca2d062d58f25e395928a3 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 26 Apr 2022 11:15:41 -0600 Subject: [PATCH 11/11] Get zed.dev working with new collab backend Co-Authored-By: Antonio Scandurra --- Cargo.lock | 29 + crates/client/src/client.rs | 16 +- crates/collab/Cargo.toml | 3 +- crates/collab/src/api.rs | 8 +- crates/collab/src/auth.rs | 11 +- crates/collab/src/db.rs | 341 +- crates/collab/src/main.rs | 14 +- crates/collab/src/rpc.rs | 12759 +++++++++++++++++----------------- crates/rpc/src/conn.rs | 59 +- crates/rpc/src/proto.rs | 14 +- 10 files changed, 6631 insertions(+), 6623 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ef11676ebcbb58fc36860bd4fa317cfb3919ec4..38a6743d5ff5d3d1bc387be9a20157e784fd2b47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,9 +463,11 @@ checksum = "f523b4e98ba6897ae90994bc18423d9877c54f9047b06a00ddc8122a957b1c70" dependencies = [ "async-trait", "axum-core", + "base64 0.13.0", "bitflags", "bytes 1.0.1", "futures-util", + "headers", "http", "http-body", "hyper", @@ -478,8 +480,10 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sha-1 0.10.0", "sync_wrapper", "tokio", + "tokio-tungstenite", "tower", "tower-http", "tower-layer", @@ -2111,6 +2115,31 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "headers" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +dependencies = [ + "base64 0.13.0", + "bitflags", + "bytes 1.0.1", + "headers-core", + "http", + "httpdate", + "mime", + "sha-1 0.10.0", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http", +] + [[package]] name = "heck" version = "0.3.3" diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 93cbba48a190f1df50fb97c8b7be78103c40626a..dc8d8a574f7f9dc7d598ae2aa91f5c1758e82920 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -11,7 +11,7 @@ use async_tungstenite::tungstenite::{ error::Error as WebsocketError, http::{Request, StatusCode}, }; -use futures::{future::LocalBoxFuture, FutureExt, StreamExt}; +use futures::{future::LocalBoxFuture, FutureExt, SinkExt, StreamExt, TryStreamExt}; use gpui::{ actions, AnyModelHandle, AnyViewHandle, AnyWeakModelHandle, AnyWeakViewHandle, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle, @@ -774,7 +774,7 @@ impl Client { "Authorization", format!("{} {}", credentials.user_id, credentials.access_token), ) - .header("X-Zed-Protocol-Version", rpc::PROTOCOL_VERSION); + .header("x-zed-protocol-version", rpc::PROTOCOL_VERSION); let http = self.http.clone(); cx.background().spawn(async move { @@ -817,13 +817,21 @@ impl Client { let request = request.uri(rpc_url.as_str()).body(())?; let (stream, _) = async_tungstenite::async_tls::client_async_tls(request, stream).await?; - Ok(Connection::new(stream)) + Ok(Connection::new( + stream + .map_err(|error| anyhow!(error)) + .sink_map_err(|error| anyhow!(error)), + )) } "http" => { rpc_url.set_scheme("ws").unwrap(); let request = request.uri(rpc_url.as_str()).body(())?; let (stream, _) = async_tungstenite::client_async(request, stream).await?; - Ok(Connection::new(stream)) + Ok(Connection::new( + stream + .map_err(|error| anyhow!(error)) + .sink_map_err(|error| anyhow!(error)), + )) } _ => Err(anyhow!("invalid rpc url: {}", rpc_url))?, } diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 8489cc2be64b194b22b6ec1311abf430deace0cf..be0bace142a1d67cb3b0bd69925bbc5e3df0485a 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -20,12 +20,13 @@ util = { path = "../util" } anyhow = "1.0.40" async-trait = "0.1.50" async-tungstenite = "0.16" -axum = { version = "0.5", features = ["json"] } +axum = { version = "0.5", features = ["json", "headers", "ws"] } base64 = "0.13" envy = "0.4.2" env_logger = "0.8" futures = "0.3" json_env_logger = "0.1" +lazy_static = "1.4" lipsum = { version = "0.8", optional = true } log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" diff --git a/crates/collab/src/api.rs b/crates/collab/src/api.rs index cea51ce2aab7137e4983cc8c490da860f60b61da..818e92316c19a8f9d171fce0d99b3c300a2dfb60 100644 --- a/crates/collab/src/api.rs +++ b/crates/collab/src/api.rs @@ -20,9 +20,11 @@ use tower::ServiceBuilder; pub fn routes(state: Arc) -> Router { Router::new() .route("/users", get(get_users).post(create_user)) - .route("/users/:id", put(update_user).delete(destroy_user)) - .route("/users/:gh_login", get(get_user)) - .route("/users/:gh_login/access_tokens", post(create_access_token)) + .route( + "/users/:id", + put(update_user).delete(destroy_user).get(get_user), + ) + .route("/users/:id/access_tokens", post(create_access_token)) .layer( ServiceBuilder::new() .layer(Extension(state)) diff --git a/crates/collab/src/auth.rs b/crates/collab/src/auth.rs index 39ae919a69e7eef8403f811cd75fb08fdfe6da50..aad331faecfa6e05328d6f665862b054ed11fe51 100644 --- a/crates/collab/src/auth.rs +++ b/crates/collab/src/auth.rs @@ -14,7 +14,7 @@ use scrypt::{ Scrypt, }; -pub async fn validate_header(req: Request, next: Next) -> impl IntoResponse { +pub async fn validate_header(mut req: Request, next: Next) -> impl IntoResponse { let mut auth_header = req .headers() .get(http::header::AUTHORIZATION) @@ -50,14 +50,15 @@ pub async fn validate_header(req: Request, next: Next) -> impl IntoResp } } - if !credentials_valid { + if credentials_valid { + req.extensions_mut().insert(user_id); + Ok::<_, Error>(next.run(req).await) + } else { Err(Error::Http( StatusCode::UNAUTHORIZED, "invalid credentials".to_string(), - ))?; + )) } - - Ok::<_, Error>(next.run(req).await) } const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 283e691211bef8779515e8e6454149dc18f3931c..157a2445e53b3e9ffbab5784f583821ef88463b3 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1,26 +1,10 @@ use anyhow::Context; use anyhow::Result; use async_trait::async_trait; -use futures::executor::block_on; use serde::Serialize; pub use sqlx::postgres::PgPoolOptions as DbOptions; use sqlx::{types::Uuid, FromRow}; use time::OffsetDateTime; -use tokio::task::yield_now; - -macro_rules! test_support { - ($self:ident, { $($token:tt)* }) => {{ - let body = async { - $($token)* - }; - if $self.test_mode { - yield_now().await; - block_on(body) - } else { - body.await - } - }}; -} #[async_trait] pub trait Db: Send + Sync { @@ -78,7 +62,6 @@ pub trait Db: Send + Sync { pub struct PostgresDb { pool: sqlx::PgPool, - test_mode: bool, } impl PostgresDb { @@ -88,10 +71,7 @@ impl PostgresDb { .connect(url) .await .context("failed to connect to postgres database")?; - Ok(Self { - pool, - test_mode: false, - }) + Ok(Self { pool }) } } @@ -100,27 +80,23 @@ impl Db for PostgresDb { // users async fn create_user(&self, github_login: &str, admin: bool) -> Result { - test_support!(self, { - let query = " + let query = " INSERT INTO users (github_login, admin) VALUES ($1, $2) ON CONFLICT (github_login) DO UPDATE SET github_login = excluded.github_login RETURNING id "; - Ok(sqlx::query_scalar(query) - .bind(github_login) - .bind(admin) - .fetch_one(&self.pool) - .await - .map(UserId)?) - }) + Ok(sqlx::query_scalar(query) + .bind(github_login) + .bind(admin) + .fetch_one(&self.pool) + .await + .map(UserId)?) } async fn get_all_users(&self) -> Result> { - test_support!(self, { - let query = "SELECT * FROM users ORDER BY github_login ASC"; - Ok(sqlx::query_as(query).fetch_all(&self.pool).await?) - }) + let query = "SELECT * FROM users ORDER BY github_login ASC"; + Ok(sqlx::query_as(query).fetch_all(&self.pool).await?) } async fn get_user_by_id(&self, id: UserId) -> Result> { @@ -130,57 +106,49 @@ impl Db for PostgresDb { async fn get_users_by_ids(&self, ids: Vec) -> Result> { let ids = ids.into_iter().map(|id| id.0).collect::>(); - test_support!(self, { - let query = " + let query = " SELECT users.* FROM users WHERE users.id = ANY ($1) "; - Ok(sqlx::query_as(query) - .bind(&ids) - .fetch_all(&self.pool) - .await?) - }) + Ok(sqlx::query_as(query) + .bind(&ids) + .fetch_all(&self.pool) + .await?) } async fn get_user_by_github_login(&self, github_login: &str) -> Result> { - test_support!(self, { - let query = "SELECT * FROM users WHERE github_login = $1 LIMIT 1"; - Ok(sqlx::query_as(query) - .bind(github_login) - .fetch_optional(&self.pool) - .await?) - }) + let query = "SELECT * FROM users WHERE github_login = $1 LIMIT 1"; + Ok(sqlx::query_as(query) + .bind(github_login) + .fetch_optional(&self.pool) + .await?) } async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> { - test_support!(self, { - let query = "UPDATE users SET admin = $1 WHERE id = $2"; - Ok(sqlx::query(query) - .bind(is_admin) - .bind(id.0) - .execute(&self.pool) - .await - .map(drop)?) - }) + let query = "UPDATE users SET admin = $1 WHERE id = $2"; + Ok(sqlx::query(query) + .bind(is_admin) + .bind(id.0) + .execute(&self.pool) + .await + .map(drop)?) } async fn destroy_user(&self, id: UserId) -> Result<()> { - test_support!(self, { - let query = "DELETE FROM access_tokens WHERE user_id = $1;"; - sqlx::query(query) - .bind(id.0) - .execute(&self.pool) - .await - .map(drop)?; - let query = "DELETE FROM users WHERE id = $1;"; - Ok(sqlx::query(query) - .bind(id.0) - .execute(&self.pool) - .await - .map(drop)?) - }) + let query = "DELETE FROM access_tokens WHERE user_id = $1;"; + sqlx::query(query) + .bind(id.0) + .execute(&self.pool) + .await + .map(drop)?; + let query = "DELETE FROM users WHERE id = $1;"; + Ok(sqlx::query(query) + .bind(id.0) + .execute(&self.pool) + .await + .map(drop)?) } // access tokens @@ -191,12 +159,11 @@ impl Db for PostgresDb { access_token_hash: &str, max_access_token_count: usize, ) -> Result<()> { - test_support!(self, { - let insert_query = " + let insert_query = " INSERT INTO access_tokens (user_id, hash) VALUES ($1, $2); "; - let cleanup_query = " + let cleanup_query = " DELETE FROM access_tokens WHERE id IN ( SELECT id from access_tokens @@ -206,35 +173,32 @@ impl Db for PostgresDb { ) "; - let mut tx = self.pool.begin().await?; - sqlx::query(insert_query) - .bind(user_id.0) - .bind(access_token_hash) - .execute(&mut tx) - .await?; - sqlx::query(cleanup_query) - .bind(user_id.0) - .bind(access_token_hash) - .bind(max_access_token_count as u32) - .execute(&mut tx) - .await?; - Ok(tx.commit().await?) - }) + let mut tx = self.pool.begin().await?; + sqlx::query(insert_query) + .bind(user_id.0) + .bind(access_token_hash) + .execute(&mut tx) + .await?; + sqlx::query(cleanup_query) + .bind(user_id.0) + .bind(access_token_hash) + .bind(max_access_token_count as u32) + .execute(&mut tx) + .await?; + Ok(tx.commit().await?) } async fn get_access_token_hashes(&self, user_id: UserId) -> Result> { - test_support!(self, { - let query = " + let query = " SELECT hash FROM access_tokens WHERE user_id = $1 ORDER BY id DESC "; - Ok(sqlx::query_scalar(query) - .bind(user_id.0) - .fetch_all(&self.pool) - .await?) - }) + Ok(sqlx::query_scalar(query) + .bind(user_id.0) + .fetch_all(&self.pool) + .await?) } // orgs @@ -242,94 +206,83 @@ impl Db for PostgresDb { #[allow(unused)] // Help rust-analyzer #[cfg(any(test, feature = "seed-support"))] async fn find_org_by_slug(&self, slug: &str) -> Result> { - test_support!(self, { - let query = " + let query = " SELECT * FROM orgs WHERE slug = $1 "; - Ok(sqlx::query_as(query) - .bind(slug) - .fetch_optional(&self.pool) - .await?) - }) + Ok(sqlx::query_as(query) + .bind(slug) + .fetch_optional(&self.pool) + .await?) } #[cfg(any(test, feature = "seed-support"))] async fn create_org(&self, name: &str, slug: &str) -> Result { - test_support!(self, { - let query = " + let query = " INSERT INTO orgs (name, slug) VALUES ($1, $2) RETURNING id "; - Ok(sqlx::query_scalar(query) - .bind(name) - .bind(slug) - .fetch_one(&self.pool) - .await - .map(OrgId)?) - }) + Ok(sqlx::query_scalar(query) + .bind(name) + .bind(slug) + .fetch_one(&self.pool) + .await + .map(OrgId)?) } #[cfg(any(test, feature = "seed-support"))] async fn add_org_member(&self, org_id: OrgId, user_id: UserId, is_admin: bool) -> Result<()> { - test_support!(self, { - let query = " + let query = " INSERT INTO org_memberships (org_id, user_id, admin) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING "; - Ok(sqlx::query(query) - .bind(org_id.0) - .bind(user_id.0) - .bind(is_admin) - .execute(&self.pool) - .await - .map(drop)?) - }) + Ok(sqlx::query(query) + .bind(org_id.0) + .bind(user_id.0) + .bind(is_admin) + .execute(&self.pool) + .await + .map(drop)?) } // channels #[cfg(any(test, feature = "seed-support"))] async fn create_org_channel(&self, org_id: OrgId, name: &str) -> Result { - test_support!(self, { - let query = " + let query = " INSERT INTO channels (owner_id, owner_is_user, name) VALUES ($1, false, $2) RETURNING id "; - Ok(sqlx::query_scalar(query) - .bind(org_id.0) - .bind(name) - .fetch_one(&self.pool) - .await - .map(ChannelId)?) - }) + Ok(sqlx::query_scalar(query) + .bind(org_id.0) + .bind(name) + .fetch_one(&self.pool) + .await + .map(ChannelId)?) } #[allow(unused)] // Help rust-analyzer #[cfg(any(test, feature = "seed-support"))] async fn get_org_channels(&self, org_id: OrgId) -> Result> { - test_support!(self, { - let query = " + let query = " SELECT * FROM channels WHERE channels.owner_is_user = false AND channels.owner_id = $1 "; - Ok(sqlx::query_as(query) - .bind(org_id.0) - .fetch_all(&self.pool) - .await?) - }) + Ok(sqlx::query_as(query) + .bind(org_id.0) + .fetch_all(&self.pool) + .await?) } async fn get_accessible_channels(&self, user_id: UserId) -> Result> { - test_support!(self, { - let query = " + let query = " SELECT channels.* FROM @@ -338,11 +291,10 @@ impl Db for PostgresDb { channel_memberships.user_id = $1 AND channel_memberships.channel_id = channels.id "; - Ok(sqlx::query_as(query) - .bind(user_id.0) - .fetch_all(&self.pool) - .await?) - }) + Ok(sqlx::query_as(query) + .bind(user_id.0) + .fetch_all(&self.pool) + .await?) } async fn can_user_access_channel( @@ -350,20 +302,18 @@ impl Db for PostgresDb { user_id: UserId, channel_id: ChannelId, ) -> Result { - test_support!(self, { - let query = " + let query = " SELECT id FROM channel_memberships WHERE user_id = $1 AND channel_id = $2 LIMIT 1 "; - Ok(sqlx::query_scalar::<_, i32>(query) - .bind(user_id.0) - .bind(channel_id.0) - .fetch_optional(&self.pool) - .await - .map(|e| e.is_some())?) - }) + Ok(sqlx::query_scalar::<_, i32>(query) + .bind(user_id.0) + .bind(channel_id.0) + .fetch_optional(&self.pool) + .await + .map(|e| e.is_some())?) } #[cfg(any(test, feature = "seed-support"))] @@ -373,20 +323,18 @@ impl Db for PostgresDb { user_id: UserId, is_admin: bool, ) -> Result<()> { - test_support!(self, { - let query = " + let query = " INSERT INTO channel_memberships (channel_id, user_id, admin) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING "; - Ok(sqlx::query(query) - .bind(channel_id.0) - .bind(user_id.0) - .bind(is_admin) - .execute(&self.pool) - .await - .map(drop)?) - }) + Ok(sqlx::query(query) + .bind(channel_id.0) + .bind(user_id.0) + .bind(is_admin) + .execute(&self.pool) + .await + .map(drop)?) } // messages @@ -399,23 +347,21 @@ impl Db for PostgresDb { timestamp: OffsetDateTime, nonce: u128, ) -> Result { - test_support!(self, { - let query = " + let query = " INSERT INTO channel_messages (channel_id, sender_id, body, sent_at, nonce) VALUES ($1, $2, $3, $4, $5) ON CONFLICT (nonce) DO UPDATE SET nonce = excluded.nonce RETURNING id "; - Ok(sqlx::query_scalar(query) - .bind(channel_id.0) - .bind(sender_id.0) - .bind(body) - .bind(timestamp) - .bind(Uuid::from_u128(nonce)) - .fetch_one(&self.pool) - .await - .map(MessageId)?) - }) + Ok(sqlx::query_scalar(query) + .bind(channel_id.0) + .bind(sender_id.0) + .bind(body) + .bind(timestamp) + .bind(Uuid::from_u128(nonce)) + .fetch_one(&self.pool) + .await + .map(MessageId)?) } async fn get_channel_messages( @@ -424,8 +370,7 @@ impl Db for PostgresDb { count: usize, before_id: Option, ) -> Result> { - test_support!(self, { - let query = r#" + let query = r#" SELECT * FROM ( SELECT id, channel_id, sender_id, body, sent_at AT TIME ZONE 'UTC' as sent_at, nonce @@ -439,35 +384,32 @@ impl Db for PostgresDb { ) as recent_messages ORDER BY id ASC "#; - Ok(sqlx::query_as(query) - .bind(channel_id.0) - .bind(before_id.unwrap_or(MessageId::MAX)) - .bind(count as i64) - .fetch_all(&self.pool) - .await?) - }) + Ok(sqlx::query_as(query) + .bind(channel_id.0) + .bind(before_id.unwrap_or(MessageId::MAX)) + .bind(count as i64) + .fetch_all(&self.pool) + .await?) } #[cfg(test)] async fn teardown(&self, name: &str, url: &str) { use util::ResultExt; - test_support!(self, { - let query = " + let query = " SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '{}' AND pid <> pg_backend_pid(); "; - sqlx::query(query) - .bind(name) - .execute(&self.pool) - .await - .log_err(); - self.pool.close().await; - ::drop_database(url) - .await - .log_err(); - }) + sqlx::query(query) + .bind(name) + .execute(&self.pool) + .await + .log_err(); + self.pool.close().await; + ::drop_database(url) + .await + .log_err(); } } @@ -705,12 +647,11 @@ pub mod tests { let name = format!("zed-test-{}", rng.gen::()); let url = format!("postgres://postgres@localhost/{}", name); let migrations_path = Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations")); - let db = block_on(async { + let db = futures::executor::block_on(async { Postgres::create_database(&url) .await .expect("failed to create test db"); - let mut db = PostgresDb::new(&url, 5).await.unwrap(); - db.test_mode = true; + let db = PostgresDb::new(&url, 5).await.unwrap(); let migrator = Migrator::new(migrations_path).await.unwrap(); migrator.run(&db.pool).await.unwrap(); db @@ -738,7 +679,7 @@ pub mod tests { impl Drop for TestDb { fn drop(&mut self) { if let Some(db) = self.db.take() { - block_on(db.teardown(&self.name, &self.url)); + futures::executor::block_on(db.teardown(&self.name, &self.url)); } } } diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index b19cae1959b6153a8a8eadef2ce57dd2bc7286dd..f0250a6835fa16227e04ccd1daa8c176591b395b 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -4,12 +4,14 @@ mod db; mod env; mod rpc; -use ::rpc::Peer; use axum::{body::Body, http::StatusCode, response::IntoResponse, Router}; use db::{Db, PostgresDb}; use serde::Deserialize; -use std::{net::TcpListener, sync::Arc}; +use std::{ + net::{SocketAddr, TcpListener}, + sync::Arc, +}; #[derive(Default, Deserialize)] pub struct Config { @@ -56,17 +58,17 @@ async fn main() -> Result<()> { .expect("failed to bind TCP listener"); let app = Router::::new() - .merge(api::routes(state)) - .merge(rpc::routes(Peer::new())); + .merge(api::routes(state.clone())) + .merge(rpc::routes(state)); axum::Server::from_tcp(listener)? - .serve(app.into_make_service()) + .serve(app.into_make_service_with_connect_info::()) .await?; Ok(()) } -pub type Result = std::result::Result; +pub type Result = std::result::Result; pub enum Error { Http(StatusCode, String), diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index cbde3adcbb3357f386c0f90eda1811358d83f163..6c4775ba6d3229d030e02273e14734ed83ed9032 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1,6369 +1,6396 @@ -// mod store; - -// use super::{ -// auth::process_auth_header, -// db::{ChannelId, MessageId, UserId}, -// AppState, -// }; -// use anyhow::anyhow; -// use async_std::{ -// sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, -// task, -// }; -// use async_tungstenite::{tungstenite::protocol::Role, WebSocketStream}; -// use collections::{HashMap, HashSet}; -// use futures::{channel::mpsc, future::BoxFuture, FutureExt, SinkExt, StreamExt}; -// use log::{as_debug, as_display}; -// use rpc::{ -// proto::{self, AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage}, -// Connection, ConnectionId, Peer, TypedEnvelope, -// }; -// use sha1::{Digest as _, Sha1}; -// use std::{ -// any::TypeId, -// future::Future, -// marker::PhantomData, -// ops::{Deref, DerefMut}, -// rc::Rc, -// sync::Arc, -// time::{Duration, Instant}, -// }; -// use store::{Store, Worktree}; -// use time::OffsetDateTime; -// use util::ResultExt; - -// type MessageHandler = Box< -// dyn Send -// + Sync -// + Fn(Arc, Box) -> BoxFuture<'static, tide::Result<()>>, -// >; - -// pub struct Server { -// peer: Arc, -// store: RwLock, -// app_state: Arc, -// handlers: HashMap, -// notifications: Option>, -// } - -// pub trait Executor: Send + Clone { -// type Timer: Send + Future; -// fn spawn_detached>(&self, future: F); -// fn timer(&self, duration: Duration) -> Self::Timer; -// } - -// #[derive(Clone)] -// pub struct RealExecutor; - -// const MESSAGE_COUNT_PER_PAGE: usize = 100; -// const MAX_MESSAGE_LEN: usize = 1024; - -// struct StoreReadGuard<'a> { -// guard: RwLockReadGuard<'a, Store>, -// _not_send: PhantomData>, -// } - -// struct StoreWriteGuard<'a> { -// guard: RwLockWriteGuard<'a, Store>, -// _not_send: PhantomData>, -// } - -// impl Server { -// pub fn new( -// app_state: Arc, -// peer: Arc, -// notifications: Option>, -// ) -> Arc { -// let mut server = Self { -// peer, -// app_state, -// store: Default::default(), -// handlers: Default::default(), -// notifications, -// }; - -// server -// .add_request_handler(Server::ping) -// .add_request_handler(Server::register_project) -// .add_message_handler(Server::unregister_project) -// .add_request_handler(Server::share_project) -// .add_message_handler(Server::unshare_project) -// .add_sync_request_handler(Server::join_project) -// .add_message_handler(Server::leave_project) -// .add_request_handler(Server::register_worktree) -// .add_message_handler(Server::unregister_worktree) -// .add_request_handler(Server::update_worktree) -// .add_message_handler(Server::start_language_server) -// .add_message_handler(Server::update_language_server) -// .add_message_handler(Server::update_diagnostic_summary) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler( -// Server::forward_project_request::, -// ) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::forward_project_request::) -// .add_request_handler(Server::update_buffer) -// .add_message_handler(Server::update_buffer_file) -// .add_message_handler(Server::buffer_reloaded) -// .add_message_handler(Server::buffer_saved) -// .add_request_handler(Server::save_buffer) -// .add_request_handler(Server::get_channels) -// .add_request_handler(Server::get_users) -// .add_request_handler(Server::join_channel) -// .add_message_handler(Server::leave_channel) -// .add_request_handler(Server::send_channel_message) -// .add_request_handler(Server::follow) -// .add_message_handler(Server::unfollow) -// .add_message_handler(Server::update_followers) -// .add_request_handler(Server::get_channel_messages); - -// Arc::new(server) -// } - -// fn add_message_handler(&mut self, handler: F) -> &mut Self -// where -// F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, -// Fut: 'static + Send + Future>, -// M: EnvelopedMessage, -// { -// let prev_handler = self.handlers.insert( -// TypeId::of::(), -// Box::new(move |server, envelope| { -// let envelope = envelope.into_any().downcast::>().unwrap(); -// (handler)(server, *envelope).boxed() -// }), -// ); -// if prev_handler.is_some() { -// panic!("registered a handler for the same message twice"); -// } -// self -// } - -// fn add_request_handler(&mut self, handler: F) -> &mut Self -// where -// F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, -// Fut: 'static + Send + Future>, -// M: RequestMessage, -// { -// self.add_message_handler(move |server, envelope| { -// let receipt = envelope.receipt(); -// let response = (handler)(server.clone(), envelope); -// async move { -// match response.await { -// Ok(response) => { -// server.peer.respond(receipt, response)?; -// Ok(()) -// } -// Err(error) => { -// server.peer.respond_with_error( -// receipt, -// proto::Error { -// message: error.to_string(), -// }, -// )?; -// Err(error) -// } -// } -// } -// }) -// } - -// /// Handle a request while holding a lock to the store. This is useful when we're registering -// /// a connection but we want to respond on the connection before anybody else can send on it. -// fn add_sync_request_handler(&mut self, handler: F) -> &mut Self -// where -// F: 'static -// + Send -// + Sync -// + Fn(Arc, &mut Store, TypedEnvelope) -> tide::Result, -// M: RequestMessage, -// { -// let handler = Arc::new(handler); -// self.add_message_handler(move |server, envelope| { -// let receipt = envelope.receipt(); -// let handler = handler.clone(); -// async move { -// let mut store = server.store.write().await; -// let response = (handler)(server.clone(), &mut *store, envelope); -// match response { -// Ok(response) => { -// server.peer.respond(receipt, response)?; -// Ok(()) -// } -// Err(error) => { -// server.peer.respond_with_error( -// receipt, -// proto::Error { -// message: error.to_string(), -// }, -// )?; -// Err(error) -// } -// } -// } -// }) -// } - -// pub fn handle_connection( -// self: &Arc, -// connection: Connection, -// addr: String, -// user_id: UserId, -// mut send_connection_id: Option>, -// executor: E, -// ) -> impl Future { -// let mut this = self.clone(); -// async move { -// let (connection_id, handle_io, mut incoming_rx) = this -// .peer -// .add_connection(connection, { -// let executor = executor.clone(); -// move |duration| { -// let timer = executor.timer(duration); -// async move { -// timer.await; -// } -// } -// }) -// .await; - -// if let Some(send_connection_id) = send_connection_id.as_mut() { -// let _ = send_connection_id.send(connection_id).await; -// } - -// { -// let mut state = this.state_mut().await; -// state.add_connection(connection_id, user_id); -// this.update_contacts_for_users(&*state, &[user_id]); -// } - -// let handle_io = handle_io.fuse(); -// futures::pin_mut!(handle_io); -// loop { -// let next_message = incoming_rx.next().fuse(); -// futures::pin_mut!(next_message); -// futures::select_biased! { -// result = handle_io => { -// if let Err(err) = result { -// log::error!("error handling rpc connection {:?} - {:?}", addr, err); -// } -// break; -// } -// message = next_message => { -// if let Some(message) = message { -// let start_time = Instant::now(); -// let type_name = message.payload_type_name(); -// log::info!(connection_id = connection_id.0, type_name = type_name; "rpc message received"); -// if let Some(handler) = this.handlers.get(&message.payload_type_id()) { -// let notifications = this.notifications.clone(); -// let is_background = message.is_background(); -// let handle_message = (handler)(this.clone(), message); -// let handle_message = async move { -// if let Err(err) = handle_message.await { -// log::error!(connection_id = connection_id.0, type = type_name, error = as_display!(err); "rpc message error"); -// } else { -// log::info!(connection_id = connection_id.0, type = type_name, duration = as_debug!(start_time.elapsed()); "rpc message handled"); -// } -// if let Some(mut notifications) = notifications { -// let _ = notifications.send(()).await; -// } -// }; -// if is_background { -// executor.spawn_detached(handle_message); -// } else { -// handle_message.await; -// } -// } else { -// log::warn!("unhandled message: {}", type_name); -// } -// } else { -// log::info!(address = as_debug!(addr); "rpc connection closed"); -// break; -// } -// } -// } -// } - -// if let Err(err) = this.sign_out(connection_id).await { -// log::error!("error signing out connection {:?} - {:?}", addr, err); -// } -// } -// } - -// async fn sign_out(self: &mut Arc, connection_id: ConnectionId) -> tide::Result<()> { -// self.peer.disconnect(connection_id); -// let mut state = self.state_mut().await; -// let removed_connection = state.remove_connection(connection_id)?; - -// for (project_id, project) in removed_connection.hosted_projects { -// if let Some(share) = project.share { -// broadcast( -// connection_id, -// share.guests.keys().copied().collect(), -// |conn_id| { -// self.peer -// .send(conn_id, proto::UnshareProject { project_id }) -// }, -// ); -// } -// } - -// for (project_id, peer_ids) in removed_connection.guest_project_ids { -// broadcast(connection_id, peer_ids, |conn_id| { -// self.peer.send( -// conn_id, -// proto::RemoveProjectCollaborator { -// project_id, -// peer_id: connection_id.0, -// }, -// ) -// }); -// } - -// self.update_contacts_for_users(&*state, removed_connection.contact_ids.iter()); -// Ok(()) -// } - -// async fn ping(self: Arc, _: TypedEnvelope) -> tide::Result { -// Ok(proto::Ack {}) -// } - -// async fn register_project( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let project_id = { -// let mut state = self.state_mut().await; -// let user_id = state.user_id_for_connection(request.sender_id)?; -// state.register_project(request.sender_id, user_id) -// }; -// Ok(proto::RegisterProjectResponse { project_id }) -// } - -// async fn unregister_project( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let mut state = self.state_mut().await; -// let project = state.unregister_project(request.payload.project_id, request.sender_id)?; -// self.update_contacts_for_users(&*state, &project.authorized_user_ids()); -// Ok(()) -// } - -// async fn share_project( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let mut state = self.state_mut().await; -// let project = state.share_project(request.payload.project_id, request.sender_id)?; -// self.update_contacts_for_users(&mut *state, &project.authorized_user_ids); -// Ok(proto::Ack {}) -// } - -// async fn unshare_project( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let project_id = request.payload.project_id; -// let mut state = self.state_mut().await; -// let project = state.unshare_project(project_id, request.sender_id)?; -// broadcast(request.sender_id, project.connection_ids, |conn_id| { -// self.peer -// .send(conn_id, proto::UnshareProject { project_id }) -// }); -// self.update_contacts_for_users(&mut *state, &project.authorized_user_ids); -// Ok(()) -// } - -// fn join_project( -// self: Arc, -// state: &mut Store, -// request: TypedEnvelope, -// ) -> tide::Result { -// let project_id = request.payload.project_id; - -// let user_id = state.user_id_for_connection(request.sender_id)?; -// let (response, connection_ids, contact_user_ids) = state -// .join_project(request.sender_id, user_id, project_id) -// .and_then(|joined| { -// let share = joined.project.share()?; -// let peer_count = share.guests.len(); -// let mut collaborators = Vec::with_capacity(peer_count); -// collaborators.push(proto::Collaborator { -// peer_id: joined.project.host_connection_id.0, -// replica_id: 0, -// user_id: joined.project.host_user_id.to_proto(), -// }); -// let worktrees = share -// .worktrees -// .iter() -// .filter_map(|(id, shared_worktree)| { -// let worktree = joined.project.worktrees.get(&id)?; -// Some(proto::Worktree { -// id: *id, -// root_name: worktree.root_name.clone(), -// entries: shared_worktree.entries.values().cloned().collect(), -// diagnostic_summaries: shared_worktree -// .diagnostic_summaries -// .values() -// .cloned() -// .collect(), -// visible: worktree.visible, -// }) -// }) -// .collect(); -// for (peer_conn_id, (peer_replica_id, peer_user_id)) in &share.guests { -// if *peer_conn_id != request.sender_id { -// collaborators.push(proto::Collaborator { -// peer_id: peer_conn_id.0, -// replica_id: *peer_replica_id as u32, -// user_id: peer_user_id.to_proto(), -// }); -// } -// } -// let response = proto::JoinProjectResponse { -// worktrees, -// replica_id: joined.replica_id as u32, -// collaborators, -// language_servers: joined.project.language_servers.clone(), -// }; -// let connection_ids = joined.project.connection_ids(); -// let contact_user_ids = joined.project.authorized_user_ids(); -// Ok((response, connection_ids, contact_user_ids)) -// })?; - -// broadcast(request.sender_id, connection_ids, |conn_id| { -// self.peer.send( -// conn_id, -// proto::AddProjectCollaborator { -// project_id, -// collaborator: Some(proto::Collaborator { -// peer_id: request.sender_id.0, -// replica_id: response.replica_id, -// user_id: user_id.to_proto(), -// }), -// }, -// ) -// }); -// self.update_contacts_for_users(state, &contact_user_ids); -// Ok(response) -// } - -// async fn leave_project( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let sender_id = request.sender_id; -// let project_id = request.payload.project_id; -// let mut state = self.state_mut().await; -// let worktree = state.leave_project(sender_id, project_id)?; -// broadcast(sender_id, worktree.connection_ids, |conn_id| { -// self.peer.send( -// conn_id, -// proto::RemoveProjectCollaborator { -// project_id, -// peer_id: sender_id.0, -// }, -// ) -// }); -// self.update_contacts_for_users(&*state, &worktree.authorized_user_ids); -// Ok(()) -// } - -// async fn register_worktree( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let mut contact_user_ids = HashSet::default(); -// for github_login in &request.payload.authorized_logins { -// let contact_user_id = self.app_state.db.create_user(github_login, false).await?; -// contact_user_ids.insert(contact_user_id); -// } - -// let mut state = self.state_mut().await; -// let host_user_id = state.user_id_for_connection(request.sender_id)?; -// contact_user_ids.insert(host_user_id); - -// let contact_user_ids = contact_user_ids.into_iter().collect::>(); -// let guest_connection_ids = state -// .read_project(request.payload.project_id, request.sender_id)? -// .guest_connection_ids(); -// state.register_worktree( -// request.payload.project_id, -// request.payload.worktree_id, -// request.sender_id, -// Worktree { -// authorized_user_ids: contact_user_ids.clone(), -// root_name: request.payload.root_name.clone(), -// visible: request.payload.visible, -// }, -// )?; - -// broadcast(request.sender_id, guest_connection_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); -// self.update_contacts_for_users(&*state, &contact_user_ids); -// Ok(proto::Ack {}) -// } - -// async fn unregister_worktree( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let project_id = request.payload.project_id; -// let worktree_id = request.payload.worktree_id; -// let mut state = self.state_mut().await; -// let (worktree, guest_connection_ids) = -// state.unregister_worktree(project_id, worktree_id, request.sender_id)?; -// broadcast(request.sender_id, guest_connection_ids, |conn_id| { -// self.peer.send( -// conn_id, -// proto::UnregisterWorktree { -// project_id, -// worktree_id, -// }, -// ) -// }); -// self.update_contacts_for_users(&*state, &worktree.authorized_user_ids); -// Ok(()) -// } - -// async fn update_worktree( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let connection_ids = self.state_mut().await.update_worktree( -// request.sender_id, -// request.payload.project_id, -// request.payload.worktree_id, -// &request.payload.removed_entries, -// &request.payload.updated_entries, -// )?; - -// broadcast(request.sender_id, connection_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); - -// Ok(proto::Ack {}) -// } - -// async fn update_diagnostic_summary( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let summary = request -// .payload -// .summary -// .clone() -// .ok_or_else(|| anyhow!("invalid summary"))?; -// let receiver_ids = self.state_mut().await.update_diagnostic_summary( -// request.payload.project_id, -// request.payload.worktree_id, -// request.sender_id, -// summary, -// )?; - -// broadcast(request.sender_id, receiver_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); -// Ok(()) -// } - -// async fn start_language_server( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let receiver_ids = self.state_mut().await.start_language_server( -// request.payload.project_id, -// request.sender_id, -// request -// .payload -// .server -// .clone() -// .ok_or_else(|| anyhow!("invalid language server"))?, -// )?; -// broadcast(request.sender_id, receiver_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); -// Ok(()) -// } - -// async fn update_language_server( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let receiver_ids = self -// .state() -// .await -// .project_connection_ids(request.payload.project_id, request.sender_id)?; -// broadcast(request.sender_id, receiver_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); -// Ok(()) -// } - -// async fn forward_project_request( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result -// where -// T: EntityMessage + RequestMessage, -// { -// let host_connection_id = self -// .state() -// .await -// .read_project(request.payload.remote_entity_id(), request.sender_id)? -// .host_connection_id; -// Ok(self -// .peer -// .forward_request(request.sender_id, host_connection_id, request.payload) -// .await?) -// } - -// async fn save_buffer( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let host = self -// .state() -// .await -// .read_project(request.payload.project_id, request.sender_id)? -// .host_connection_id; -// let response = self -// .peer -// .forward_request(request.sender_id, host, request.payload.clone()) -// .await?; - -// let mut guests = self -// .state() -// .await -// .read_project(request.payload.project_id, request.sender_id)? -// .connection_ids(); -// guests.retain(|guest_connection_id| *guest_connection_id != request.sender_id); -// broadcast(host, guests, |conn_id| { -// self.peer.forward_send(host, conn_id, response.clone()) -// }); - -// Ok(response) -// } - -// async fn update_buffer( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let receiver_ids = self -// .state() -// .await -// .project_connection_ids(request.payload.project_id, request.sender_id)?; -// broadcast(request.sender_id, receiver_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); -// Ok(proto::Ack {}) -// } - -// async fn update_buffer_file( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let receiver_ids = self -// .state() -// .await -// .project_connection_ids(request.payload.project_id, request.sender_id)?; -// broadcast(request.sender_id, receiver_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); -// Ok(()) -// } - -// async fn buffer_reloaded( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let receiver_ids = self -// .state() -// .await -// .project_connection_ids(request.payload.project_id, request.sender_id)?; -// broadcast(request.sender_id, receiver_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); -// Ok(()) -// } - -// async fn buffer_saved( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let receiver_ids = self -// .state() -// .await -// .project_connection_ids(request.payload.project_id, request.sender_id)?; -// broadcast(request.sender_id, receiver_ids, |connection_id| { -// self.peer -// .forward_send(request.sender_id, connection_id, request.payload.clone()) -// }); -// Ok(()) -// } - -// async fn follow( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let leader_id = ConnectionId(request.payload.leader_id); -// let follower_id = request.sender_id; -// if !self -// .state() -// .await -// .project_connection_ids(request.payload.project_id, follower_id)? -// .contains(&leader_id) -// { -// Err(anyhow!("no such peer"))?; -// } -// let mut response = self -// .peer -// .forward_request(request.sender_id, leader_id, request.payload) -// .await?; -// response -// .views -// .retain(|view| view.leader_id != Some(follower_id.0)); -// Ok(response) -// } - -// async fn unfollow( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let leader_id = ConnectionId(request.payload.leader_id); -// if !self -// .state() -// .await -// .project_connection_ids(request.payload.project_id, request.sender_id)? -// .contains(&leader_id) -// { -// Err(anyhow!("no such peer"))?; -// } -// self.peer -// .forward_send(request.sender_id, leader_id, request.payload)?; -// Ok(()) -// } - -// async fn update_followers( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let connection_ids = self -// .state() -// .await -// .project_connection_ids(request.payload.project_id, request.sender_id)?; -// let leader_id = request -// .payload -// .variant -// .as_ref() -// .and_then(|variant| match variant { -// proto::update_followers::Variant::CreateView(payload) => payload.leader_id, -// proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, -// proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, -// }); -// for follower_id in &request.payload.follower_ids { -// let follower_id = ConnectionId(*follower_id); -// if connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { -// self.peer -// .forward_send(request.sender_id, follower_id, request.payload.clone())?; -// } -// } -// Ok(()) -// } - -// async fn get_channels( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let user_id = self -// .state() -// .await -// .user_id_for_connection(request.sender_id)?; -// let channels = self.app_state.db.get_accessible_channels(user_id).await?; -// Ok(proto::GetChannelsResponse { -// channels: channels -// .into_iter() -// .map(|chan| proto::Channel { -// id: chan.id.to_proto(), -// name: chan.name, -// }) -// .collect(), -// }) -// } - -// async fn get_users( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let user_ids = request -// .payload -// .user_ids -// .into_iter() -// .map(UserId::from_proto) -// .collect(); -// let users = self -// .app_state -// .db -// .get_users_by_ids(user_ids) -// .await? -// .into_iter() -// .map(|user| proto::User { -// id: user.id.to_proto(), -// avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), -// github_login: user.github_login, -// }) -// .collect(); -// Ok(proto::GetUsersResponse { users }) -// } - -// fn update_contacts_for_users<'a>( -// self: &Arc, -// state: &Store, -// user_ids: impl IntoIterator, -// ) { -// for user_id in user_ids { -// let contacts = state.contacts_for_user(*user_id); -// for connection_id in state.connection_ids_for_user(*user_id) { -// self.peer -// .send( -// connection_id, -// proto::UpdateContacts { -// contacts: contacts.clone(), -// }, -// ) -// .log_err(); -// } -// } -// } - -// async fn join_channel( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let user_id = self -// .state() -// .await -// .user_id_for_connection(request.sender_id)?; -// let channel_id = ChannelId::from_proto(request.payload.channel_id); -// if !self -// .app_state -// .db -// .can_user_access_channel(user_id, channel_id) -// .await? -// { -// Err(anyhow!("access denied"))?; -// } - -// self.state_mut() -// .await -// .join_channel(request.sender_id, channel_id); -// let messages = self -// .app_state -// .db -// .get_channel_messages(channel_id, MESSAGE_COUNT_PER_PAGE, None) -// .await? -// .into_iter() -// .map(|msg| proto::ChannelMessage { -// id: msg.id.to_proto(), -// body: msg.body, -// timestamp: msg.sent_at.unix_timestamp() as u64, -// sender_id: msg.sender_id.to_proto(), -// nonce: Some(msg.nonce.as_u128().into()), -// }) -// .collect::>(); -// Ok(proto::JoinChannelResponse { -// done: messages.len() < MESSAGE_COUNT_PER_PAGE, -// messages, -// }) -// } - -// async fn leave_channel( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result<()> { -// let user_id = self -// .state() -// .await -// .user_id_for_connection(request.sender_id)?; -// let channel_id = ChannelId::from_proto(request.payload.channel_id); -// if !self -// .app_state -// .db -// .can_user_access_channel(user_id, channel_id) -// .await? -// { -// Err(anyhow!("access denied"))?; -// } - -// self.state_mut() -// .await -// .leave_channel(request.sender_id, channel_id); - -// Ok(()) -// } - -// async fn send_channel_message( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let channel_id = ChannelId::from_proto(request.payload.channel_id); -// let user_id; -// let connection_ids; -// { -// let state = self.state().await; -// user_id = state.user_id_for_connection(request.sender_id)?; -// connection_ids = state.channel_connection_ids(channel_id)?; -// } - -// // Validate the message body. -// let body = request.payload.body.trim().to_string(); -// if body.len() > MAX_MESSAGE_LEN { -// return Err(anyhow!("message is too long"))?; -// } -// if body.is_empty() { -// return Err(anyhow!("message can't be blank"))?; -// } - -// let timestamp = OffsetDateTime::now_utc(); -// let nonce = request -// .payload -// .nonce -// .ok_or_else(|| anyhow!("nonce can't be blank"))?; - -// let message_id = self -// .app_state -// .db -// .create_channel_message(channel_id, user_id, &body, timestamp, nonce.clone().into()) -// .await? -// .to_proto(); -// let message = proto::ChannelMessage { -// sender_id: user_id.to_proto(), -// id: message_id, -// body, -// timestamp: timestamp.unix_timestamp() as u64, -// nonce: Some(nonce), -// }; -// broadcast(request.sender_id, connection_ids, |conn_id| { -// self.peer.send( -// conn_id, -// proto::ChannelMessageSent { -// channel_id: channel_id.to_proto(), -// message: Some(message.clone()), -// }, -// ) -// }); -// Ok(proto::SendChannelMessageResponse { -// message: Some(message), -// }) -// } - -// async fn get_channel_messages( -// self: Arc, -// request: TypedEnvelope, -// ) -> tide::Result { -// let user_id = self -// .state() -// .await -// .user_id_for_connection(request.sender_id)?; -// let channel_id = ChannelId::from_proto(request.payload.channel_id); -// if !self -// .app_state -// .db -// .can_user_access_channel(user_id, channel_id) -// .await? -// { -// Err(anyhow!("access denied"))?; -// } - -// let messages = self -// .app_state -// .db -// .get_channel_messages( -// channel_id, -// MESSAGE_COUNT_PER_PAGE, -// Some(MessageId::from_proto(request.payload.before_message_id)), -// ) -// .await? -// .into_iter() -// .map(|msg| proto::ChannelMessage { -// id: msg.id.to_proto(), -// body: msg.body, -// timestamp: msg.sent_at.unix_timestamp() as u64, -// sender_id: msg.sender_id.to_proto(), -// nonce: Some(msg.nonce.as_u128().into()), -// }) -// .collect::>(); - -// Ok(proto::GetChannelMessagesResponse { -// done: messages.len() < MESSAGE_COUNT_PER_PAGE, -// messages, -// }) -// } - -// async fn state<'a>(self: &'a Arc) -> StoreReadGuard<'a> { -// #[cfg(test)] -// async_std::task::yield_now().await; -// let guard = self.store.read().await; -// #[cfg(test)] -// async_std::task::yield_now().await; -// StoreReadGuard { -// guard, -// _not_send: PhantomData, -// } -// } - -// async fn state_mut<'a>(self: &'a Arc) -> StoreWriteGuard<'a> { -// #[cfg(test)] -// async_std::task::yield_now().await; -// let guard = self.store.write().await; -// #[cfg(test)] -// async_std::task::yield_now().await; -// StoreWriteGuard { -// guard, -// _not_send: PhantomData, -// } -// } -// } - -// impl<'a> Deref for StoreReadGuard<'a> { -// type Target = Store; - -// fn deref(&self) -> &Self::Target { -// &*self.guard -// } -// } - -// impl<'a> Deref for StoreWriteGuard<'a> { -// type Target = Store; - -// fn deref(&self) -> &Self::Target { -// &*self.guard -// } -// } - -// impl<'a> DerefMut for StoreWriteGuard<'a> { -// fn deref_mut(&mut self) -> &mut Self::Target { -// &mut *self.guard -// } -// } - -// impl<'a> Drop for StoreWriteGuard<'a> { -// fn drop(&mut self) { -// #[cfg(test)] -// self.check_invariants(); -// } -// } - -// impl Executor for RealExecutor { -// type Timer = Timer; - -// fn spawn_detached>(&self, future: F) { -// task::spawn(future); -// } - -// fn timer(&self, duration: Duration) -> Self::Timer { -// Timer::after(duration) -// } -// } - -// fn broadcast(sender_id: ConnectionId, receiver_ids: Vec, mut f: F) -// where -// F: FnMut(ConnectionId) -> anyhow::Result<()>, -// { -// for receiver_id in receiver_ids { -// if receiver_id != sender_id { -// f(receiver_id).log_err(); -// } -// } -// } - -pub fn routes(peer: Arc) -> Router { +mod store; + +use crate::{ + auth, + db::{ChannelId, MessageId, UserId}, + AppState, Result, +}; +use anyhow::anyhow; +use async_tungstenite::tungstenite::{ + protocol::CloseFrame as TungsteniteCloseFrame, Message as TungsteniteMessage, +}; +use axum::{ + body::Body, + extract::{ + ws::{CloseFrame as AxumCloseFrame, Message as AxumMessage}, + ConnectInfo, WebSocketUpgrade, + }, + headers::{Header, HeaderName}, + http::StatusCode, + middleware, + response::{IntoResponse, Response}, + routing::get, + Extension, Router, TypedHeader, +}; +use collections::{HashMap, HashSet}; +use futures::{channel::mpsc, future::BoxFuture, FutureExt, SinkExt, StreamExt, TryStreamExt}; +use lazy_static::lazy_static; +use log::{as_debug, as_display}; +use rpc::{ + proto::{self, AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage}, + Connection, ConnectionId, Peer, TypedEnvelope, +}; +use std::{ + any::TypeId, + future::Future, + marker::PhantomData, + net::SocketAddr, + ops::{Deref, DerefMut}, + rc::Rc, + sync::Arc, + time::{Duration, Instant}, +}; +use store::{Store, Worktree}; +use time::OffsetDateTime; +use tokio::{ + sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, + time::Sleep, +}; +use tower::ServiceBuilder; +use util::ResultExt; + +type MessageHandler = Box< + dyn Send + Sync + Fn(Arc, Box) -> BoxFuture<'static, Result<()>>, +>; + +pub struct Server { + peer: Arc, + store: RwLock, + app_state: Arc, + handlers: HashMap, + notifications: Option>, +} + +pub trait Executor: Send + Clone { + type Sleep: Send + Future; + fn spawn_detached>(&self, future: F); + fn sleep(&self, duration: Duration) -> Self::Sleep; +} + +#[derive(Clone)] +pub struct RealExecutor; + +const MESSAGE_COUNT_PER_PAGE: usize = 100; +const MAX_MESSAGE_LEN: usize = 1024; + +struct StoreReadGuard<'a> { + guard: RwLockReadGuard<'a, Store>, + _not_send: PhantomData>, +} + +struct StoreWriteGuard<'a> { + guard: RwLockWriteGuard<'a, Store>, + _not_send: PhantomData>, +} + +impl Server { + pub fn new( + app_state: Arc, + notifications: Option>, + ) -> Arc { + let mut server = Self { + peer: Peer::new(), + app_state, + store: Default::default(), + handlers: Default::default(), + notifications, + }; + + server + .add_request_handler(Server::ping) + .add_request_handler(Server::register_project) + .add_message_handler(Server::unregister_project) + .add_request_handler(Server::share_project) + .add_message_handler(Server::unshare_project) + .add_sync_request_handler(Server::join_project) + .add_message_handler(Server::leave_project) + .add_request_handler(Server::register_worktree) + .add_message_handler(Server::unregister_worktree) + .add_request_handler(Server::update_worktree) + .add_message_handler(Server::start_language_server) + .add_message_handler(Server::update_language_server) + .add_message_handler(Server::update_diagnostic_summary) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler( + Server::forward_project_request::, + ) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::forward_project_request::) + .add_request_handler(Server::update_buffer) + .add_message_handler(Server::update_buffer_file) + .add_message_handler(Server::buffer_reloaded) + .add_message_handler(Server::buffer_saved) + .add_request_handler(Server::save_buffer) + .add_request_handler(Server::get_channels) + .add_request_handler(Server::get_users) + .add_request_handler(Server::join_channel) + .add_message_handler(Server::leave_channel) + .add_request_handler(Server::send_channel_message) + .add_request_handler(Server::follow) + .add_message_handler(Server::unfollow) + .add_message_handler(Server::update_followers) + .add_request_handler(Server::get_channel_messages); + + Arc::new(server) + } + + fn add_message_handler(&mut self, handler: F) -> &mut Self + where + F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, + Fut: 'static + Send + Future>, + M: EnvelopedMessage, + { + let prev_handler = self.handlers.insert( + TypeId::of::(), + Box::new(move |server, envelope| { + let envelope = envelope.into_any().downcast::>().unwrap(); + (handler)(server, *envelope).boxed() + }), + ); + if prev_handler.is_some() { + panic!("registered a handler for the same message twice"); + } + self + } + + fn add_request_handler(&mut self, handler: F) -> &mut Self + where + F: 'static + Send + Sync + Fn(Arc, TypedEnvelope) -> Fut, + Fut: 'static + Send + Future>, + M: RequestMessage, + { + self.add_message_handler(move |server, envelope| { + let receipt = envelope.receipt(); + let response = (handler)(server.clone(), envelope); + async move { + match response.await { + Ok(response) => { + server.peer.respond(receipt, response)?; + Ok(()) + } + Err(error) => { + server.peer.respond_with_error( + receipt, + proto::Error { + message: error.to_string(), + }, + )?; + Err(error) + } + } + } + }) + } + + /// Handle a request while holding a lock to the store. This is useful when we're registering + /// a connection but we want to respond on the connection before anybody else can send on it. + fn add_sync_request_handler(&mut self, handler: F) -> &mut Self + where + F: 'static + + Send + + Sync + + Fn(Arc, &mut Store, TypedEnvelope) -> Result, + M: RequestMessage, + { + let handler = Arc::new(handler); + self.add_message_handler(move |server, envelope| { + let receipt = envelope.receipt(); + let handler = handler.clone(); + async move { + let mut store = server.store.write().await; + let response = (handler)(server.clone(), &mut *store, envelope); + match response { + Ok(response) => { + server.peer.respond(receipt, response)?; + Ok(()) + } + Err(error) => { + server.peer.respond_with_error( + receipt, + proto::Error { + message: error.to_string(), + }, + )?; + Err(error) + } + } + } + }) + } + + pub fn handle_connection( + self: &Arc, + connection: Connection, + addr: String, + user_id: UserId, + mut send_connection_id: Option>, + executor: E, + ) -> impl Future { + let mut this = self.clone(); + async move { + let (connection_id, handle_io, mut incoming_rx) = this + .peer + .add_connection(connection, { + let executor = executor.clone(); + move |duration| { + let timer = executor.sleep(duration); + async move { + timer.await; + } + } + }) + .await; + + if let Some(send_connection_id) = send_connection_id.as_mut() { + let _ = send_connection_id.send(connection_id).await; + } + + { + let mut state = this.state_mut().await; + state.add_connection(connection_id, user_id); + this.update_contacts_for_users(&*state, &[user_id]); + } + + let handle_io = handle_io.fuse(); + futures::pin_mut!(handle_io); + loop { + let next_message = incoming_rx.next().fuse(); + futures::pin_mut!(next_message); + futures::select_biased! { + result = handle_io => { + if let Err(err) = result { + log::error!("error handling rpc connection {:?} - {:?}", addr, err); + } + break; + } + message = next_message => { + if let Some(message) = message { + let start_time = Instant::now(); + let type_name = message.payload_type_name(); + log::info!(connection_id = connection_id.0, type_name = type_name; "rpc message received"); + if let Some(handler) = this.handlers.get(&message.payload_type_id()) { + let notifications = this.notifications.clone(); + let is_background = message.is_background(); + let handle_message = (handler)(this.clone(), message); + let handle_message = async move { + if let Err(err) = handle_message.await { + log::error!(connection_id = connection_id.0, type = type_name, error = as_display!(err); "rpc message error"); + } else { + log::info!(connection_id = connection_id.0, type = type_name, duration = as_debug!(start_time.elapsed()); "rpc message handled"); + } + if let Some(mut notifications) = notifications { + let _ = notifications.send(()).await; + } + }; + if is_background { + executor.spawn_detached(handle_message); + } else { + handle_message.await; + } + } else { + log::warn!("unhandled message: {}", type_name); + } + } else { + log::info!(address = as_debug!(addr); "rpc connection closed"); + break; + } + } + } + } + + if let Err(err) = this.sign_out(connection_id).await { + log::error!("error signing out connection {:?} - {:?}", addr, err); + } + } + } + + async fn sign_out(self: &mut Arc, connection_id: ConnectionId) -> Result<()> { + self.peer.disconnect(connection_id); + let mut state = self.state_mut().await; + let removed_connection = state.remove_connection(connection_id)?; + + for (project_id, project) in removed_connection.hosted_projects { + if let Some(share) = project.share { + broadcast( + connection_id, + share.guests.keys().copied().collect(), + |conn_id| { + self.peer + .send(conn_id, proto::UnshareProject { project_id }) + }, + ); + } + } + + for (project_id, peer_ids) in removed_connection.guest_project_ids { + broadcast(connection_id, peer_ids, |conn_id| { + self.peer.send( + conn_id, + proto::RemoveProjectCollaborator { + project_id, + peer_id: connection_id.0, + }, + ) + }); + } + + self.update_contacts_for_users(&*state, removed_connection.contact_ids.iter()); + Ok(()) + } + + async fn ping(self: Arc, _: TypedEnvelope) -> Result { + Ok(proto::Ack {}) + } + + async fn register_project( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let project_id = { + let mut state = self.state_mut().await; + let user_id = state.user_id_for_connection(request.sender_id)?; + state.register_project(request.sender_id, user_id) + }; + Ok(proto::RegisterProjectResponse { project_id }) + } + + async fn unregister_project( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let mut state = self.state_mut().await; + let project = state.unregister_project(request.payload.project_id, request.sender_id)?; + self.update_contacts_for_users(&*state, &project.authorized_user_ids()); + Ok(()) + } + + async fn share_project( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let mut state = self.state_mut().await; + let project = state.share_project(request.payload.project_id, request.sender_id)?; + self.update_contacts_for_users(&mut *state, &project.authorized_user_ids); + Ok(proto::Ack {}) + } + + async fn unshare_project( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let project_id = request.payload.project_id; + let mut state = self.state_mut().await; + let project = state.unshare_project(project_id, request.sender_id)?; + broadcast(request.sender_id, project.connection_ids, |conn_id| { + self.peer + .send(conn_id, proto::UnshareProject { project_id }) + }); + self.update_contacts_for_users(&mut *state, &project.authorized_user_ids); + Ok(()) + } + + fn join_project( + self: Arc, + state: &mut Store, + request: TypedEnvelope, + ) -> Result { + let project_id = request.payload.project_id; + + let user_id = state.user_id_for_connection(request.sender_id)?; + let (response, connection_ids, contact_user_ids) = state + .join_project(request.sender_id, user_id, project_id) + .and_then(|joined| { + let share = joined.project.share()?; + let peer_count = share.guests.len(); + let mut collaborators = Vec::with_capacity(peer_count); + collaborators.push(proto::Collaborator { + peer_id: joined.project.host_connection_id.0, + replica_id: 0, + user_id: joined.project.host_user_id.to_proto(), + }); + let worktrees = share + .worktrees + .iter() + .filter_map(|(id, shared_worktree)| { + let worktree = joined.project.worktrees.get(&id)?; + Some(proto::Worktree { + id: *id, + root_name: worktree.root_name.clone(), + entries: shared_worktree.entries.values().cloned().collect(), + diagnostic_summaries: shared_worktree + .diagnostic_summaries + .values() + .cloned() + .collect(), + visible: worktree.visible, + }) + }) + .collect(); + for (peer_conn_id, (peer_replica_id, peer_user_id)) in &share.guests { + if *peer_conn_id != request.sender_id { + collaborators.push(proto::Collaborator { + peer_id: peer_conn_id.0, + replica_id: *peer_replica_id as u32, + user_id: peer_user_id.to_proto(), + }); + } + } + let response = proto::JoinProjectResponse { + worktrees, + replica_id: joined.replica_id as u32, + collaborators, + language_servers: joined.project.language_servers.clone(), + }; + let connection_ids = joined.project.connection_ids(); + let contact_user_ids = joined.project.authorized_user_ids(); + Ok((response, connection_ids, contact_user_ids)) + })?; + + broadcast(request.sender_id, connection_ids, |conn_id| { + self.peer.send( + conn_id, + proto::AddProjectCollaborator { + project_id, + collaborator: Some(proto::Collaborator { + peer_id: request.sender_id.0, + replica_id: response.replica_id, + user_id: user_id.to_proto(), + }), + }, + ) + }); + self.update_contacts_for_users(state, &contact_user_ids); + Ok(response) + } + + async fn leave_project( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let sender_id = request.sender_id; + let project_id = request.payload.project_id; + let mut state = self.state_mut().await; + let worktree = state.leave_project(sender_id, project_id)?; + broadcast(sender_id, worktree.connection_ids, |conn_id| { + self.peer.send( + conn_id, + proto::RemoveProjectCollaborator { + project_id, + peer_id: sender_id.0, + }, + ) + }); + self.update_contacts_for_users(&*state, &worktree.authorized_user_ids); + Ok(()) + } + + async fn register_worktree( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let mut contact_user_ids = HashSet::default(); + for github_login in &request.payload.authorized_logins { + let contact_user_id = self.app_state.db.create_user(github_login, false).await?; + contact_user_ids.insert(contact_user_id); + } + + let mut state = self.state_mut().await; + let host_user_id = state.user_id_for_connection(request.sender_id)?; + contact_user_ids.insert(host_user_id); + + let contact_user_ids = contact_user_ids.into_iter().collect::>(); + let guest_connection_ids = state + .read_project(request.payload.project_id, request.sender_id)? + .guest_connection_ids(); + state.register_worktree( + request.payload.project_id, + request.payload.worktree_id, + request.sender_id, + Worktree { + authorized_user_ids: contact_user_ids.clone(), + root_name: request.payload.root_name.clone(), + visible: request.payload.visible, + }, + )?; + + broadcast(request.sender_id, guest_connection_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + self.update_contacts_for_users(&*state, &contact_user_ids); + Ok(proto::Ack {}) + } + + async fn unregister_worktree( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let project_id = request.payload.project_id; + let worktree_id = request.payload.worktree_id; + let mut state = self.state_mut().await; + let (worktree, guest_connection_ids) = + state.unregister_worktree(project_id, worktree_id, request.sender_id)?; + broadcast(request.sender_id, guest_connection_ids, |conn_id| { + self.peer.send( + conn_id, + proto::UnregisterWorktree { + project_id, + worktree_id, + }, + ) + }); + self.update_contacts_for_users(&*state, &worktree.authorized_user_ids); + Ok(()) + } + + async fn update_worktree( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let connection_ids = self.state_mut().await.update_worktree( + request.sender_id, + request.payload.project_id, + request.payload.worktree_id, + &request.payload.removed_entries, + &request.payload.updated_entries, + )?; + + broadcast(request.sender_id, connection_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + + Ok(proto::Ack {}) + } + + async fn update_diagnostic_summary( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let summary = request + .payload + .summary + .clone() + .ok_or_else(|| anyhow!("invalid summary"))?; + let receiver_ids = self.state_mut().await.update_diagnostic_summary( + request.payload.project_id, + request.payload.worktree_id, + request.sender_id, + summary, + )?; + + broadcast(request.sender_id, receiver_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + Ok(()) + } + + async fn start_language_server( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let receiver_ids = self.state_mut().await.start_language_server( + request.payload.project_id, + request.sender_id, + request + .payload + .server + .clone() + .ok_or_else(|| anyhow!("invalid language server"))?, + )?; + broadcast(request.sender_id, receiver_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + Ok(()) + } + + async fn update_language_server( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let receiver_ids = self + .state() + .await + .project_connection_ids(request.payload.project_id, request.sender_id)?; + broadcast(request.sender_id, receiver_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + Ok(()) + } + + async fn forward_project_request( + self: Arc, + request: TypedEnvelope, + ) -> Result + where + T: EntityMessage + RequestMessage, + { + let host_connection_id = self + .state() + .await + .read_project(request.payload.remote_entity_id(), request.sender_id)? + .host_connection_id; + Ok(self + .peer + .forward_request(request.sender_id, host_connection_id, request.payload) + .await?) + } + + async fn save_buffer( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let host = self + .state() + .await + .read_project(request.payload.project_id, request.sender_id)? + .host_connection_id; + let response = self + .peer + .forward_request(request.sender_id, host, request.payload.clone()) + .await?; + + let mut guests = self + .state() + .await + .read_project(request.payload.project_id, request.sender_id)? + .connection_ids(); + guests.retain(|guest_connection_id| *guest_connection_id != request.sender_id); + broadcast(host, guests, |conn_id| { + self.peer.forward_send(host, conn_id, response.clone()) + }); + + Ok(response) + } + + async fn update_buffer( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let receiver_ids = self + .state() + .await + .project_connection_ids(request.payload.project_id, request.sender_id)?; + broadcast(request.sender_id, receiver_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + Ok(proto::Ack {}) + } + + async fn update_buffer_file( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let receiver_ids = self + .state() + .await + .project_connection_ids(request.payload.project_id, request.sender_id)?; + broadcast(request.sender_id, receiver_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + Ok(()) + } + + async fn buffer_reloaded( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let receiver_ids = self + .state() + .await + .project_connection_ids(request.payload.project_id, request.sender_id)?; + broadcast(request.sender_id, receiver_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + Ok(()) + } + + async fn buffer_saved( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let receiver_ids = self + .state() + .await + .project_connection_ids(request.payload.project_id, request.sender_id)?; + broadcast(request.sender_id, receiver_ids, |connection_id| { + self.peer + .forward_send(request.sender_id, connection_id, request.payload.clone()) + }); + Ok(()) + } + + async fn follow( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let leader_id = ConnectionId(request.payload.leader_id); + let follower_id = request.sender_id; + if !self + .state() + .await + .project_connection_ids(request.payload.project_id, follower_id)? + .contains(&leader_id) + { + Err(anyhow!("no such peer"))?; + } + let mut response = self + .peer + .forward_request(request.sender_id, leader_id, request.payload) + .await?; + response + .views + .retain(|view| view.leader_id != Some(follower_id.0)); + Ok(response) + } + + async fn unfollow(self: Arc, request: TypedEnvelope) -> Result<()> { + let leader_id = ConnectionId(request.payload.leader_id); + if !self + .state() + .await + .project_connection_ids(request.payload.project_id, request.sender_id)? + .contains(&leader_id) + { + Err(anyhow!("no such peer"))?; + } + self.peer + .forward_send(request.sender_id, leader_id, request.payload)?; + Ok(()) + } + + async fn update_followers( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let connection_ids = self + .state() + .await + .project_connection_ids(request.payload.project_id, request.sender_id)?; + let leader_id = request + .payload + .variant + .as_ref() + .and_then(|variant| match variant { + proto::update_followers::Variant::CreateView(payload) => payload.leader_id, + proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, + proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, + }); + for follower_id in &request.payload.follower_ids { + let follower_id = ConnectionId(*follower_id); + if connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { + self.peer + .forward_send(request.sender_id, follower_id, request.payload.clone())?; + } + } + Ok(()) + } + + async fn get_channels( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let user_id = self + .state() + .await + .user_id_for_connection(request.sender_id)?; + let channels = self.app_state.db.get_accessible_channels(user_id).await?; + Ok(proto::GetChannelsResponse { + channels: channels + .into_iter() + .map(|chan| proto::Channel { + id: chan.id.to_proto(), + name: chan.name, + }) + .collect(), + }) + } + + async fn get_users( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let user_ids = request + .payload + .user_ids + .into_iter() + .map(UserId::from_proto) + .collect(); + let users = self + .app_state + .db + .get_users_by_ids(user_ids) + .await? + .into_iter() + .map(|user| proto::User { + id: user.id.to_proto(), + avatar_url: format!("https://github.com/{}.png?size=128", user.github_login), + github_login: user.github_login, + }) + .collect(); + Ok(proto::GetUsersResponse { users }) + } + + fn update_contacts_for_users<'a>( + self: &Arc, + state: &Store, + user_ids: impl IntoIterator, + ) { + for user_id in user_ids { + let contacts = state.contacts_for_user(*user_id); + for connection_id in state.connection_ids_for_user(*user_id) { + self.peer + .send( + connection_id, + proto::UpdateContacts { + contacts: contacts.clone(), + }, + ) + .log_err(); + } + } + } + + async fn join_channel( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let user_id = self + .state() + .await + .user_id_for_connection(request.sender_id)?; + let channel_id = ChannelId::from_proto(request.payload.channel_id); + if !self + .app_state + .db + .can_user_access_channel(user_id, channel_id) + .await? + { + Err(anyhow!("access denied"))?; + } + + self.state_mut() + .await + .join_channel(request.sender_id, channel_id); + let messages = self + .app_state + .db + .get_channel_messages(channel_id, MESSAGE_COUNT_PER_PAGE, None) + .await? + .into_iter() + .map(|msg| proto::ChannelMessage { + id: msg.id.to_proto(), + body: msg.body, + timestamp: msg.sent_at.unix_timestamp() as u64, + sender_id: msg.sender_id.to_proto(), + nonce: Some(msg.nonce.as_u128().into()), + }) + .collect::>(); + Ok(proto::JoinChannelResponse { + done: messages.len() < MESSAGE_COUNT_PER_PAGE, + messages, + }) + } + + async fn leave_channel( + self: Arc, + request: TypedEnvelope, + ) -> Result<()> { + let user_id = self + .state() + .await + .user_id_for_connection(request.sender_id)?; + let channel_id = ChannelId::from_proto(request.payload.channel_id); + if !self + .app_state + .db + .can_user_access_channel(user_id, channel_id) + .await? + { + Err(anyhow!("access denied"))?; + } + + self.state_mut() + .await + .leave_channel(request.sender_id, channel_id); + + Ok(()) + } + + async fn send_channel_message( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let channel_id = ChannelId::from_proto(request.payload.channel_id); + let user_id; + let connection_ids; + { + let state = self.state().await; + user_id = state.user_id_for_connection(request.sender_id)?; + connection_ids = state.channel_connection_ids(channel_id)?; + } + + // Validate the message body. + let body = request.payload.body.trim().to_string(); + if body.len() > MAX_MESSAGE_LEN { + return Err(anyhow!("message is too long"))?; + } + if body.is_empty() { + return Err(anyhow!("message can't be blank"))?; + } + + let timestamp = OffsetDateTime::now_utc(); + let nonce = request + .payload + .nonce + .ok_or_else(|| anyhow!("nonce can't be blank"))?; + + let message_id = self + .app_state + .db + .create_channel_message(channel_id, user_id, &body, timestamp, nonce.clone().into()) + .await? + .to_proto(); + let message = proto::ChannelMessage { + sender_id: user_id.to_proto(), + id: message_id, + body, + timestamp: timestamp.unix_timestamp() as u64, + nonce: Some(nonce), + }; + broadcast(request.sender_id, connection_ids, |conn_id| { + self.peer.send( + conn_id, + proto::ChannelMessageSent { + channel_id: channel_id.to_proto(), + message: Some(message.clone()), + }, + ) + }); + Ok(proto::SendChannelMessageResponse { + message: Some(message), + }) + } + + async fn get_channel_messages( + self: Arc, + request: TypedEnvelope, + ) -> Result { + let user_id = self + .state() + .await + .user_id_for_connection(request.sender_id)?; + let channel_id = ChannelId::from_proto(request.payload.channel_id); + if !self + .app_state + .db + .can_user_access_channel(user_id, channel_id) + .await? + { + Err(anyhow!("access denied"))?; + } + + let messages = self + .app_state + .db + .get_channel_messages( + channel_id, + MESSAGE_COUNT_PER_PAGE, + Some(MessageId::from_proto(request.payload.before_message_id)), + ) + .await? + .into_iter() + .map(|msg| proto::ChannelMessage { + id: msg.id.to_proto(), + body: msg.body, + timestamp: msg.sent_at.unix_timestamp() as u64, + sender_id: msg.sender_id.to_proto(), + nonce: Some(msg.nonce.as_u128().into()), + }) + .collect::>(); + + Ok(proto::GetChannelMessagesResponse { + done: messages.len() < MESSAGE_COUNT_PER_PAGE, + messages, + }) + } + + async fn state<'a>(self: &'a Arc) -> StoreReadGuard<'a> { + #[cfg(test)] + tokio::task::yield_now().await; + let guard = self.store.read().await; + #[cfg(test)] + tokio::task::yield_now().await; + StoreReadGuard { + guard, + _not_send: PhantomData, + } + } + + async fn state_mut<'a>(self: &'a Arc) -> StoreWriteGuard<'a> { + #[cfg(test)] + tokio::task::yield_now().await; + let guard = self.store.write().await; + #[cfg(test)] + tokio::task::yield_now().await; + StoreWriteGuard { + guard, + _not_send: PhantomData, + } + } +} + +impl<'a> Deref for StoreReadGuard<'a> { + type Target = Store; + + fn deref(&self) -> &Self::Target { + &*self.guard + } +} + +impl<'a> Deref for StoreWriteGuard<'a> { + type Target = Store; + + fn deref(&self) -> &Self::Target { + &*self.guard + } +} + +impl<'a> DerefMut for StoreWriteGuard<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut *self.guard + } +} + +impl<'a> Drop for StoreWriteGuard<'a> { + fn drop(&mut self) { + #[cfg(test)] + self.check_invariants(); + } +} + +impl Executor for RealExecutor { + type Sleep = Sleep; + + fn spawn_detached>(&self, future: F) { + tokio::task::spawn(future); + } + + fn sleep(&self, duration: Duration) -> Self::Sleep { + tokio::time::sleep(duration) + } +} + +fn broadcast(sender_id: ConnectionId, receiver_ids: Vec, mut f: F) +where + F: FnMut(ConnectionId) -> anyhow::Result<()>, +{ + for receiver_id in receiver_ids { + if receiver_id != sender_id { + f(receiver_id).log_err(); + } + } +} + +lazy_static! { + static ref ZED_PROTOCOL_VERSION: HeaderName = HeaderName::from_static("x-zed-protocol-version"); +} + +pub struct ProtocolVersion(u32); + +impl Header for ProtocolVersion { + fn name() -> &'static HeaderName { + &ZED_PROTOCOL_VERSION + } + + fn decode<'i, I>(values: &mut I) -> Result + where + Self: Sized, + I: Iterator, + { + let version = values + .next() + .ok_or_else(|| axum::headers::Error::invalid())? + .to_str() + .map_err(|_| axum::headers::Error::invalid())? + .parse() + .map_err(|_| axum::headers::Error::invalid())?; + Ok(Self(version)) + } + + fn encode>(&self, values: &mut E) { + values.extend([self.0.to_string().parse().unwrap()]); + } +} + +pub fn routes(app_state: Arc) -> Router { + let server = Server::new(app_state.clone(), None); Router::new() + .route("/rpc", get(handle_websocket_request)) + .layer( + ServiceBuilder::new() + .layer(Extension(app_state)) + .layer(middleware::from_fn(auth::validate_header)) + .layer(Extension(server)), + ) } -// pub fn add_routes(app: &mut tide::Server>, rpc: &Arc) { -// let server = Server::new(app.state().clone(), rpc.clone(), None); -// app.at("/rpc").get(move |request: Request>| { -// let server = server.clone(); -// async move { -// const WEBSOCKET_GUID: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; - -// let connection_upgrade = header_contains_ignore_case(&request, CONNECTION, "upgrade"); -// let upgrade_to_websocket = header_contains_ignore_case(&request, UPGRADE, "websocket"); -// let upgrade_requested = connection_upgrade && upgrade_to_websocket; -// let client_protocol_version: Option = request -// .header("X-Zed-Protocol-Version") -// .and_then(|v| v.as_str().parse().ok()); - -// if !upgrade_requested || client_protocol_version != Some(rpc::PROTOCOL_VERSION) { -// return Ok(Response::new(StatusCode::UpgradeRequired)); -// } - -// let header = match request.header("Sec-Websocket-Key") { -// Some(h) => h.as_str(), -// None => return Err(anyhow!("expected sec-websocket-key"))?, -// }; - -// let user_id = process_auth_header(&request).await?; - -// let mut response = Response::new(StatusCode::SwitchingProtocols); -// response.insert_header(UPGRADE, "websocket"); -// response.insert_header(CONNECTION, "Upgrade"); -// let hash = Sha1::new().chain(header).chain(WEBSOCKET_GUID).finalize(); -// response.insert_header("Sec-Websocket-Accept", base64::encode(&hash[..])); -// response.insert_header("Sec-Websocket-Version", "13"); - -// let http_res: &mut tide::http::Response = response.as_mut(); -// let upgrade_receiver = http_res.recv_upgrade().await; -// let addr = request.remote().unwrap_or("unknown").to_string(); -// task::spawn(async move { -// if let Some(stream) = upgrade_receiver.await { -// server -// .handle_connection( -// Connection::new( -// WebSocketStream::from_raw_socket(stream, Role::Server, None).await, -// ), -// addr, -// user_id, -// None, -// RealExecutor, -// ) -// .await; -// } -// }); - -// Ok(response) -// } -// }); -// } - -// fn header_contains_ignore_case( -// request: &tide::Request, -// header_name: HeaderName, -// value: &str, -// ) -> bool { -// request -// .header(header_name) -// .map(|h| { -// h.as_str() -// .split(',') -// .any(|s| s.trim().eq_ignore_ascii_case(value.trim())) -// }) -// .unwrap_or(false) -// } - -// #[cfg(test)] -// mod tests { -// use super::*; -// use crate::{ -// db::{tests::TestDb, UserId}, -// AppState, Config, -// }; -// use ::rpc::Peer; -// use client::{ -// self, test::FakeHttpClient, Channel, ChannelDetails, ChannelList, Client, Credentials, -// EstablishConnectionError, UserStore, RECEIVE_TIMEOUT, -// }; -// use collections::BTreeMap; -// use editor::{ -// self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Input, Redo, Rename, -// ToOffset, ToggleCodeActions, Undo, -// }; -// use gpui::{ -// executor::{self, Deterministic}, -// geometry::vector::vec2f, -// ModelHandle, TestAppContext, ViewHandle, -// }; -// use language::{ -// range_to_lsp, tree_sitter_rust, Diagnostic, DiagnosticEntry, FakeLspAdapter, Language, -// LanguageConfig, LanguageRegistry, OffsetRangeExt, Point, Rope, -// }; -// use lsp::{self, FakeLanguageServer}; -// use parking_lot::Mutex; -// use project::{ -// fs::{FakeFs, Fs as _}, -// search::SearchQuery, -// worktree::WorktreeHandle, -// DiagnosticSummary, Project, ProjectPath, WorktreeId, -// }; -// use rand::prelude::*; -// use rpc::PeerId; -// use serde_json::json; -// use settings::Settings; -// use sqlx::types::time::OffsetDateTime; -// use std::{ -// env, -// ops::Deref, -// path::{Path, PathBuf}, -// rc::Rc, -// sync::{ -// atomic::{AtomicBool, Ordering::SeqCst}, -// Arc, -// }, -// time::Duration, -// }; -// use theme::ThemeRegistry; -// use workspace::{Item, SplitDirection, ToggleFollow, Workspace, WorkspaceParams}; - -// #[cfg(test)] -// #[ctor::ctor] -// fn init_logger() { -// if std::env::var("RUST_LOG").is_ok() { -// env_logger::init(); -// } -// } - -// #[gpui::test(iterations = 10)] -// async fn test_share_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// let (window_b, _) = cx_b.add_window(|_| EmptyView); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// cx_a.foreground().forbid_parking(); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.txt": "a-contents", -// "b.txt": "b-contents", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join that project as client B -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// let replica_id_b = project_b.read_with(cx_b, |project, _| { -// assert_eq!( -// project -// .collaborators() -// .get(&client_a.peer_id) -// .unwrap() -// .user -// .github_login, -// "user_a" -// ); -// project.replica_id() -// }); -// project_a -// .condition(&cx_a, |tree, _| { -// tree.collaborators() -// .get(&client_b.peer_id) -// .map_or(false, |collaborator| { -// collaborator.replica_id == replica_id_b -// && collaborator.user.github_login == "user_b" -// }) -// }) -// .await; - -// // Open the same file as client B and client A. -// let buffer_b = project_b -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.txt"), cx)) -// .await -// .unwrap(); -// buffer_b.read_with(cx_b, |buf, _| assert_eq!(buf.text(), "b-contents")); -// project_a.read_with(cx_a, |project, cx| { -// assert!(project.has_open_buffer((worktree_id, "b.txt"), cx)) -// }); -// let buffer_a = project_a -// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "b.txt"), cx)) -// .await -// .unwrap(); - -// let editor_b = cx_b.add_view(window_b, |cx| Editor::for_buffer(buffer_b, None, cx)); - -// // TODO -// // // Create a selection set as client B and see that selection set as client A. -// // buffer_a -// // .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 1) -// // .await; - -// // Edit the buffer as client B and see that edit as client A. -// editor_b.update(cx_b, |editor, cx| { -// editor.handle_input(&Input("ok, ".into()), cx) -// }); -// buffer_a -// .condition(&cx_a, |buffer, _| buffer.text() == "ok, b-contents") -// .await; - -// // TODO -// // // Remove the selection set as client B, see those selections disappear as client A. -// cx_b.update(move |_| drop(editor_b)); -// // buffer_a -// // .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 0) -// // .await; - -// // Dropping the client B's project removes client B from client A's collaborators. -// cx_b.update(move |_| drop(project_b)); -// project_a -// .condition(&cx_a, |project, _| project.collaborators().is_empty()) -// .await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_unshare_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// cx_a.foreground().forbid_parking(); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.txt": "a-contents", -// "b.txt": "b-contents", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); -// assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - -// // Join that project as client B -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); -// project_b -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) -// .await -// .unwrap(); - -// // Unshare the project as client A -// project_a.update(cx_a, |project, cx| project.unshare(cx)); -// project_b -// .condition(cx_b, |project, _| project.is_read_only()) -// .await; -// assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared())); -// cx_b.update(|_| { -// drop(project_b); -// }); - -// // Share the project again and ensure guests can still join. -// project_a -// .update(cx_a, |project, cx| project.share(cx)) -// .await -// .unwrap(); -// assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - -// let project_b2 = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); -// project_b2 -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) -// .await -// .unwrap(); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_host_disconnect(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// cx_a.foreground().forbid_parking(); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.txt": "a-contents", -// "b.txt": "b-contents", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); -// assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - -// // Join that project as client B -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); -// project_b -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) -// .await -// .unwrap(); - -// // Drop client A's connection. Collaborators should disappear and the project should not be shown as shared. -// server.disconnect_client(client_a.current_user_id(cx_a)); -// cx_a.foreground().advance_clock(rpc::RECEIVE_TIMEOUT); -// project_a -// .condition(cx_a, |project, _| project.collaborators().is_empty()) -// .await; -// project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); -// project_b -// .condition(cx_b, |project, _| project.is_read_only()) -// .await; -// assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared())); -// cx_b.update(|_| { -// drop(project_b); -// }); - -// // Await reconnection -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - -// // Share the project again and ensure guests can still join. -// project_a -// .update(cx_a, |project, cx| project.share(cx)) -// .await -// .unwrap(); -// assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - -// let project_b2 = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); -// project_b2 -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) -// .await -// .unwrap(); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_propagate_saves_and_fs_changes( -// cx_a: &mut TestAppContext, -// cx_b: &mut TestAppContext, -// cx_c: &mut TestAppContext, -// ) { -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// cx_a.foreground().forbid_parking(); - -// // Connect to a server as 3 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; -// let client_c = server.create_client(cx_c, "user_c").await; - -// // Share a worktree as client A. -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, -// "file1": "", -// "file2": "" -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join that worktree as clients B and C. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); -// let project_c = Project::remote( -// project_id, -// client_c.clone(), -// client_c.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_c.to_async(), -// ) -// .await -// .unwrap(); -// let worktree_b = project_b.read_with(cx_b, |p, cx| p.worktrees(cx).next().unwrap()); -// let worktree_c = project_c.read_with(cx_c, |p, cx| p.worktrees(cx).next().unwrap()); - -// // Open and edit a buffer as both guests B and C. -// let buffer_b = project_b -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) -// .await -// .unwrap(); -// let buffer_c = project_c -// .update(cx_c, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) -// .await -// .unwrap(); -// buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "i-am-b, ", cx)); -// buffer_c.update(cx_c, |buf, cx| buf.edit([0..0], "i-am-c, ", cx)); - -// // Open and edit that buffer as the host. -// let buffer_a = project_a -// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) -// .await -// .unwrap(); - -// buffer_a -// .condition(cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, ") -// .await; -// buffer_a.update(cx_a, |buf, cx| { -// buf.edit([buf.len()..buf.len()], "i-am-a", cx) -// }); - -// // Wait for edits to propagate -// buffer_a -// .condition(cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") -// .await; -// buffer_b -// .condition(cx_b, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") -// .await; -// buffer_c -// .condition(cx_c, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") -// .await; - -// // Edit the buffer as the host and concurrently save as guest B. -// let save_b = buffer_b.update(cx_b, |buf, cx| buf.save(cx)); -// buffer_a.update(cx_a, |buf, cx| buf.edit([0..0], "hi-a, ", cx)); -// save_b.await.unwrap(); -// assert_eq!( -// fs.load("/a/file1".as_ref()).await.unwrap(), -// "hi-a, i-am-c, i-am-b, i-am-a" -// ); -// buffer_a.read_with(cx_a, |buf, _| assert!(!buf.is_dirty())); -// buffer_b.read_with(cx_b, |buf, _| assert!(!buf.is_dirty())); -// buffer_c.condition(cx_c, |buf, _| !buf.is_dirty()).await; - -// worktree_a.flush_fs_events(cx_a).await; - -// // Make changes on host's file system, see those changes on guest worktrees. -// fs.rename( -// "/a/file1".as_ref(), -// "/a/file1-renamed".as_ref(), -// Default::default(), -// ) -// .await -// .unwrap(); - -// fs.rename("/a/file2".as_ref(), "/a/file3".as_ref(), Default::default()) -// .await -// .unwrap(); -// fs.insert_file(Path::new("/a/file4"), "4".into()).await; - -// worktree_a -// .condition(&cx_a, |tree, _| { -// tree.paths() -// .map(|p| p.to_string_lossy()) -// .collect::>() -// == [".zed.toml", "file1-renamed", "file3", "file4"] -// }) -// .await; -// worktree_b -// .condition(&cx_b, |tree, _| { -// tree.paths() -// .map(|p| p.to_string_lossy()) -// .collect::>() -// == [".zed.toml", "file1-renamed", "file3", "file4"] -// }) -// .await; -// worktree_c -// .condition(&cx_c, |tree, _| { -// tree.paths() -// .map(|p| p.to_string_lossy()) -// .collect::>() -// == [".zed.toml", "file1-renamed", "file3", "file4"] -// }) -// .await; - -// // Ensure buffer files are updated as well. -// buffer_a -// .condition(&cx_a, |buf, _| { -// buf.file().unwrap().path().to_str() == Some("file1-renamed") -// }) -// .await; -// buffer_b -// .condition(&cx_b, |buf, _| { -// buf.file().unwrap().path().to_str() == Some("file1-renamed") -// }) -// .await; -// buffer_c -// .condition(&cx_c, |buf, _| { -// buf.file().unwrap().path().to_str() == Some("file1-renamed") -// }) -// .await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_buffer_conflict_after_save(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/dir", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, -// "a.txt": "a-contents", -// }), -// ) -// .await; - -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/dir", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join that project as client B -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Open a buffer as client B -// let buffer_b = project_b -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) -// .await -// .unwrap(); - -// buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "world ", cx)); -// buffer_b.read_with(cx_b, |buf, _| { -// assert!(buf.is_dirty()); -// assert!(!buf.has_conflict()); -// }); - -// buffer_b.update(cx_b, |buf, cx| buf.save(cx)).await.unwrap(); -// buffer_b -// .condition(&cx_b, |buffer_b, _| !buffer_b.is_dirty()) -// .await; -// buffer_b.read_with(cx_b, |buf, _| { -// assert!(!buf.has_conflict()); -// }); - -// buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "hello ", cx)); -// buffer_b.read_with(cx_b, |buf, _| { -// assert!(buf.is_dirty()); -// assert!(!buf.has_conflict()); -// }); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_buffer_reloading(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/dir", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, -// "a.txt": "a-contents", -// }), -// ) -// .await; - -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/dir", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join that project as client B -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); -// let _worktree_b = project_b.update(cx_b, |p, cx| p.worktrees(cx).next().unwrap()); - -// // Open a buffer as client B -// let buffer_b = project_b -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) -// .await -// .unwrap(); -// buffer_b.read_with(cx_b, |buf, _| { -// assert!(!buf.is_dirty()); -// assert!(!buf.has_conflict()); -// }); - -// fs.save(Path::new("/dir/a.txt"), &"new contents".into()) -// .await -// .unwrap(); -// buffer_b -// .condition(&cx_b, |buf, _| { -// buf.text() == "new contents" && !buf.is_dirty() -// }) -// .await; -// buffer_b.read_with(cx_b, |buf, _| { -// assert!(!buf.has_conflict()); -// }); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_editing_while_guest_opens_buffer( -// cx_a: &mut TestAppContext, -// cx_b: &mut TestAppContext, -// ) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/dir", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.txt": "a-contents", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/dir", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join that project as client B -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Open a buffer as client A -// let buffer_a = project_a -// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) -// .await -// .unwrap(); - -// // Start opening the same buffer as client B -// let buffer_b = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); - -// // Edit the buffer as client A while client B is still opening it. -// cx_b.background().simulate_random_delay().await; -// buffer_a.update(cx_a, |buf, cx| buf.edit([0..0], "X", cx)); -// cx_b.background().simulate_random_delay().await; -// buffer_a.update(cx_a, |buf, cx| buf.edit([1..1], "Y", cx)); - -// let text = buffer_a.read_with(cx_a, |buf, _| buf.text()); -// let buffer_b = buffer_b.await.unwrap(); -// buffer_b.condition(&cx_b, |buf, _| buf.text() == text).await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_leaving_worktree_while_opening_buffer( -// cx_a: &mut TestAppContext, -// cx_b: &mut TestAppContext, -// ) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/dir", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.txt": "a-contents", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/dir", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join that project as client B -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // See that a guest has joined as client A. -// project_a -// .condition(&cx_a, |p, _| p.collaborators().len() == 1) -// .await; - -// // Begin opening a buffer as client B, but leave the project before the open completes. -// let buffer_b = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); -// cx_b.update(|_| drop(project_b)); -// drop(buffer_b); - -// // See that the guest has left. -// project_a -// .condition(&cx_a, |p, _| p.collaborators().len() == 0) -// .await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_leaving_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.txt": "a-contents", -// "b.txt": "b-contents", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a -// .update(cx_a, |project, _| project.next_remote_id()) -// .await; -// project_a -// .update(cx_a, |project, cx| project.share(cx)) -// .await -// .unwrap(); - -// // Join that project as client B -// let _project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Client A sees that a guest has joined. -// project_a -// .condition(cx_a, |p, _| p.collaborators().len() == 1) -// .await; - -// // Drop client B's connection and ensure client A observes client B leaving the project. -// client_b.disconnect(&cx_b.to_async()).unwrap(); -// project_a -// .condition(cx_a, |p, _| p.collaborators().len() == 0) -// .await; - -// // Rejoin the project as client B -// let _project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Client A sees that a guest has re-joined. -// project_a -// .condition(cx_a, |p, _| p.collaborators().len() == 1) -// .await; - -// // Simulate connection loss for client B and ensure client A observes client B leaving the project. -// client_b.wait_for_current_user(cx_b).await; -// server.disconnect_client(client_b.current_user_id(cx_b)); -// cx_a.foreground().advance_clock(Duration::from_secs(3)); -// project_a -// .condition(cx_a, |p, _| p.collaborators().len() == 0) -// .await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_collaborating_with_diagnostics( -// cx_a: &mut TestAppContext, -// cx_b: &mut TestAppContext, -// ) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.rs": "let one = two", -// "other.rs": "", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Cause the language server to start. -// let _ = cx_a -// .background() -// .spawn(project_a.update(cx_a, |project, cx| { -// project.open_buffer( -// ProjectPath { -// worktree_id, -// path: Path::new("other.rs").into(), -// }, -// cx, -// ) -// })) -// .await -// .unwrap(); - -// // Simulate a language server reporting errors for a file. -// let mut fake_language_server = fake_language_servers.next().await.unwrap(); -// fake_language_server -// .receive_notification::() -// .await; -// fake_language_server.notify::( -// lsp::PublishDiagnosticsParams { -// uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), -// version: None, -// diagnostics: vec![lsp::Diagnostic { -// severity: Some(lsp::DiagnosticSeverity::ERROR), -// range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 7)), -// message: "message 1".to_string(), -// ..Default::default() -// }], -// }, -// ); - -// // Wait for server to see the diagnostics update. -// server -// .condition(|store| { -// let worktree = store -// .project(project_id) -// .unwrap() -// .share -// .as_ref() -// .unwrap() -// .worktrees -// .get(&worktree_id.to_proto()) -// .unwrap(); - -// !worktree.diagnostic_summaries.is_empty() -// }) -// .await; - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// project_b.read_with(cx_b, |project, cx| { -// assert_eq!( -// project.diagnostic_summaries(cx).collect::>(), -// &[( -// ProjectPath { -// worktree_id, -// path: Arc::from(Path::new("a.rs")), -// }, -// DiagnosticSummary { -// error_count: 1, -// warning_count: 0, -// ..Default::default() -// }, -// )] -// ) -// }); - -// // Simulate a language server reporting more errors for a file. -// fake_language_server.notify::( -// lsp::PublishDiagnosticsParams { -// uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), -// version: None, -// diagnostics: vec![ -// lsp::Diagnostic { -// severity: Some(lsp::DiagnosticSeverity::ERROR), -// range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 7)), -// message: "message 1".to_string(), -// ..Default::default() -// }, -// lsp::Diagnostic { -// severity: Some(lsp::DiagnosticSeverity::WARNING), -// range: lsp::Range::new( -// lsp::Position::new(0, 10), -// lsp::Position::new(0, 13), -// ), -// message: "message 2".to_string(), -// ..Default::default() -// }, -// ], -// }, -// ); - -// // Client b gets the updated summaries -// project_b -// .condition(&cx_b, |project, cx| { -// project.diagnostic_summaries(cx).collect::>() -// == &[( -// ProjectPath { -// worktree_id, -// path: Arc::from(Path::new("a.rs")), -// }, -// DiagnosticSummary { -// error_count: 1, -// warning_count: 1, -// ..Default::default() -// }, -// )] -// }) -// .await; - -// // Open the file with the errors on client B. They should be present. -// let buffer_b = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) -// .await -// .unwrap(); - -// buffer_b.read_with(cx_b, |buffer, _| { -// assert_eq!( -// buffer -// .snapshot() -// .diagnostics_in_range::<_, Point>(0..buffer.len(), false) -// .map(|entry| entry) -// .collect::>(), -// &[ -// DiagnosticEntry { -// range: Point::new(0, 4)..Point::new(0, 7), -// diagnostic: Diagnostic { -// group_id: 0, -// message: "message 1".to_string(), -// severity: lsp::DiagnosticSeverity::ERROR, -// is_primary: true, -// ..Default::default() -// } -// }, -// DiagnosticEntry { -// range: Point::new(0, 10)..Point::new(0, 13), -// diagnostic: Diagnostic { -// group_id: 1, -// severity: lsp::DiagnosticSeverity::WARNING, -// message: "message 2".to_string(), -// is_primary: true, -// ..Default::default() -// } -// } -// ] -// ); -// }); - -// // Simulate a language server reporting no errors for a file. -// fake_language_server.notify::( -// lsp::PublishDiagnosticsParams { -// uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), -// version: None, -// diagnostics: vec![], -// }, -// ); -// project_a -// .condition(cx_a, |project, cx| { -// project.diagnostic_summaries(cx).collect::>() == &[] -// }) -// .await; -// project_b -// .condition(cx_b, |project, cx| { -// project.diagnostic_summaries(cx).collect::>() == &[] -// }) -// .await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_collaborating_with_completion( -// cx_a: &mut TestAppContext, -// cx_b: &mut TestAppContext, -// ) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(FakeLspAdapter { -// capabilities: lsp::ServerCapabilities { -// completion_provider: Some(lsp::CompletionOptions { -// trigger_characters: Some(vec![".".to_string()]), -// ..Default::default() -// }), -// ..Default::default() -// }, -// ..Default::default() -// }); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "main.rs": "fn main() { a }", -// "other.rs": "", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Open a file in an editor as the guest. -// let buffer_b = project_b -// .update(cx_b, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx)) -// .await -// .unwrap(); -// let (window_b, _) = cx_b.add_window(|_| EmptyView); -// let editor_b = cx_b.add_view(window_b, |cx| { -// Editor::for_buffer(buffer_b.clone(), Some(project_b.clone()), cx) -// }); - -// let fake_language_server = fake_language_servers.next().await.unwrap(); -// buffer_b -// .condition(&cx_b, |buffer, _| !buffer.completion_triggers().is_empty()) -// .await; - -// // Type a completion trigger character as the guest. -// editor_b.update(cx_b, |editor, cx| { -// editor.select_ranges([13..13], None, cx); -// editor.handle_input(&Input(".".into()), cx); -// cx.focus(&editor_b); -// }); - -// // Receive a completion request as the host's language server. -// // Return some completions from the host's language server. -// cx_a.foreground().start_waiting(); -// fake_language_server -// .handle_request::(|params, _| async move { -// assert_eq!( -// params.text_document_position.text_document.uri, -// lsp::Url::from_file_path("/a/main.rs").unwrap(), -// ); -// assert_eq!( -// params.text_document_position.position, -// lsp::Position::new(0, 14), -// ); - -// Ok(Some(lsp::CompletionResponse::Array(vec![ -// lsp::CompletionItem { -// label: "first_method(…)".into(), -// detail: Some("fn(&mut self, B) -> C".into()), -// text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { -// new_text: "first_method($1)".to_string(), -// range: lsp::Range::new( -// lsp::Position::new(0, 14), -// lsp::Position::new(0, 14), -// ), -// })), -// insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), -// ..Default::default() -// }, -// lsp::CompletionItem { -// label: "second_method(…)".into(), -// detail: Some("fn(&mut self, C) -> D".into()), -// text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { -// new_text: "second_method()".to_string(), -// range: lsp::Range::new( -// lsp::Position::new(0, 14), -// lsp::Position::new(0, 14), -// ), -// })), -// insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), -// ..Default::default() -// }, -// ]))) -// }) -// .next() -// .await -// .unwrap(); -// cx_a.foreground().finish_waiting(); - -// // Open the buffer on the host. -// let buffer_a = project_a -// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx)) -// .await -// .unwrap(); -// buffer_a -// .condition(&cx_a, |buffer, _| buffer.text() == "fn main() { a. }") -// .await; - -// // Confirm a completion on the guest. -// editor_b -// .condition(&cx_b, |editor, _| editor.context_menu_visible()) -// .await; -// editor_b.update(cx_b, |editor, cx| { -// editor.confirm_completion(&ConfirmCompletion { item_ix: Some(0) }, cx); -// assert_eq!(editor.text(cx), "fn main() { a.first_method() }"); -// }); - -// // Return a resolved completion from the host's language server. -// // The resolved completion has an additional text edit. -// fake_language_server.handle_request::( -// |params, _| async move { -// assert_eq!(params.label, "first_method(…)"); -// Ok(lsp::CompletionItem { -// label: "first_method(…)".into(), -// detail: Some("fn(&mut self, B) -> C".into()), -// text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { -// new_text: "first_method($1)".to_string(), -// range: lsp::Range::new( -// lsp::Position::new(0, 14), -// lsp::Position::new(0, 14), -// ), -// })), -// additional_text_edits: Some(vec![lsp::TextEdit { -// new_text: "use d::SomeTrait;\n".to_string(), -// range: lsp::Range::new(lsp::Position::new(0, 0), lsp::Position::new(0, 0)), -// }]), -// insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), -// ..Default::default() -// }) -// }, -// ); - -// // The additional edit is applied. -// buffer_a -// .condition(&cx_a, |buffer, _| { -// buffer.text() == "use d::SomeTrait;\nfn main() { a.first_method() }" -// }) -// .await; -// buffer_b -// .condition(&cx_b, |buffer, _| { -// buffer.text() == "use d::SomeTrait;\nfn main() { a.first_method() }" -// }) -// .await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_reloading_buffer_manually(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.rs": "let one = 1;", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); -// let buffer_a = project_a -// .update(cx_a, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx)) -// .await -// .unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// let buffer_b = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) -// .await -// .unwrap(); -// buffer_b.update(cx_b, |buffer, cx| { -// buffer.edit([4..7], "six", cx); -// buffer.edit([10..11], "6", cx); -// assert_eq!(buffer.text(), "let six = 6;"); -// assert!(buffer.is_dirty()); -// assert!(!buffer.has_conflict()); -// }); -// buffer_a -// .condition(cx_a, |buffer, _| buffer.text() == "let six = 6;") -// .await; - -// fs.save(Path::new("/a/a.rs"), &Rope::from("let seven = 7;")) -// .await -// .unwrap(); -// buffer_a -// .condition(cx_a, |buffer, _| buffer.has_conflict()) -// .await; -// buffer_b -// .condition(cx_b, |buffer, _| buffer.has_conflict()) -// .await; - -// project_b -// .update(cx_b, |project, cx| { -// project.reload_buffers(HashSet::from_iter([buffer_b.clone()]), true, cx) -// }) -// .await -// .unwrap(); -// buffer_a.read_with(cx_a, |buffer, _| { -// assert_eq!(buffer.text(), "let seven = 7;"); -// assert!(!buffer.is_dirty()); -// assert!(!buffer.has_conflict()); -// }); -// buffer_b.read_with(cx_b, |buffer, _| { -// assert_eq!(buffer.text(), "let seven = 7;"); -// assert!(!buffer.is_dirty()); -// assert!(!buffer.has_conflict()); -// }); - -// buffer_a.update(cx_a, |buffer, cx| { -// // Undoing on the host is a no-op when the reload was initiated by the guest. -// buffer.undo(cx); -// assert_eq!(buffer.text(), "let seven = 7;"); -// assert!(!buffer.is_dirty()); -// assert!(!buffer.has_conflict()); -// }); -// buffer_b.update(cx_b, |buffer, cx| { -// // Undoing on the guest rolls back the buffer to before it was reloaded but the conflict gets cleared. -// buffer.undo(cx); -// assert_eq!(buffer.text(), "let six = 6;"); -// assert!(buffer.is_dirty()); -// assert!(!buffer.has_conflict()); -// }); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_formatting_buffer(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.rs": "let one = two", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// let buffer_b = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) -// .await -// .unwrap(); - -// let fake_language_server = fake_language_servers.next().await.unwrap(); -// fake_language_server.handle_request::(|_, _| async move { -// Ok(Some(vec![ -// lsp::TextEdit { -// range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 4)), -// new_text: "h".to_string(), -// }, -// lsp::TextEdit { -// range: lsp::Range::new(lsp::Position::new(0, 7), lsp::Position::new(0, 7)), -// new_text: "y".to_string(), -// }, -// ])) -// }); - -// project_b -// .update(cx_b, |project, cx| { -// project.format(HashSet::from_iter([buffer_b.clone()]), true, cx) -// }) -// .await -// .unwrap(); -// assert_eq!( -// buffer_b.read_with(cx_b, |buffer, _| buffer.text()), -// "let honey = two" -// ); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_definition(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// fs.insert_tree( -// "/root-1", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.rs": "const ONE: usize = b::TWO + b::THREE;", -// }), -// ) -// .await; -// fs.insert_tree( -// "/root-2", -// json!({ -// "b.rs": "const TWO: usize = 2;\nconst THREE: usize = 3;", -// }), -// ) -// .await; - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/root-1", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Open the file on client B. -// let buffer_b = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) -// .await -// .unwrap(); - -// // Request the definition of a symbol as the guest. -// let fake_language_server = fake_language_servers.next().await.unwrap(); -// fake_language_server.handle_request::( -// |_, _| async move { -// Ok(Some(lsp::GotoDefinitionResponse::Scalar( -// lsp::Location::new( -// lsp::Url::from_file_path("/root-2/b.rs").unwrap(), -// lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), -// ), -// ))) -// }, -// ); - -// let definitions_1 = project_b -// .update(cx_b, |p, cx| p.definition(&buffer_b, 23, cx)) -// .await -// .unwrap(); -// cx_b.read(|cx| { -// assert_eq!(definitions_1.len(), 1); -// assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); -// let target_buffer = definitions_1[0].buffer.read(cx); -// assert_eq!( -// target_buffer.text(), -// "const TWO: usize = 2;\nconst THREE: usize = 3;" -// ); -// assert_eq!( -// definitions_1[0].range.to_point(target_buffer), -// Point::new(0, 6)..Point::new(0, 9) -// ); -// }); - -// // Try getting more definitions for the same buffer, ensuring the buffer gets reused from -// // the previous call to `definition`. -// fake_language_server.handle_request::( -// |_, _| async move { -// Ok(Some(lsp::GotoDefinitionResponse::Scalar( -// lsp::Location::new( -// lsp::Url::from_file_path("/root-2/b.rs").unwrap(), -// lsp::Range::new(lsp::Position::new(1, 6), lsp::Position::new(1, 11)), -// ), -// ))) -// }, -// ); - -// let definitions_2 = project_b -// .update(cx_b, |p, cx| p.definition(&buffer_b, 33, cx)) -// .await -// .unwrap(); -// cx_b.read(|cx| { -// assert_eq!(definitions_2.len(), 1); -// assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); -// let target_buffer = definitions_2[0].buffer.read(cx); -// assert_eq!( -// target_buffer.text(), -// "const TWO: usize = 2;\nconst THREE: usize = 3;" -// ); -// assert_eq!( -// definitions_2[0].range.to_point(target_buffer), -// Point::new(1, 6)..Point::new(1, 11) -// ); -// }); -// assert_eq!(definitions_1[0].buffer, definitions_2[0].buffer); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_references(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// fs.insert_tree( -// "/root-1", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "one.rs": "const ONE: usize = 1;", -// "two.rs": "const TWO: usize = one::ONE + one::ONE;", -// }), -// ) -// .await; -// fs.insert_tree( -// "/root-2", -// json!({ -// "three.rs": "const THREE: usize = two::TWO + one::ONE;", -// }), -// ) -// .await; - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/root-1", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Open the file on client B. -// let buffer_b = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "one.rs"), cx))) -// .await -// .unwrap(); - -// // Request references to a symbol as the guest. -// let fake_language_server = fake_language_servers.next().await.unwrap(); -// fake_language_server.handle_request::( -// |params, _| async move { -// assert_eq!( -// params.text_document_position.text_document.uri.as_str(), -// "file:///root-1/one.rs" -// ); -// Ok(Some(vec![ -// lsp::Location { -// uri: lsp::Url::from_file_path("/root-1/two.rs").unwrap(), -// range: lsp::Range::new( -// lsp::Position::new(0, 24), -// lsp::Position::new(0, 27), -// ), -// }, -// lsp::Location { -// uri: lsp::Url::from_file_path("/root-1/two.rs").unwrap(), -// range: lsp::Range::new( -// lsp::Position::new(0, 35), -// lsp::Position::new(0, 38), -// ), -// }, -// lsp::Location { -// uri: lsp::Url::from_file_path("/root-2/three.rs").unwrap(), -// range: lsp::Range::new( -// lsp::Position::new(0, 37), -// lsp::Position::new(0, 40), -// ), -// }, -// ])) -// }, -// ); - -// let references = project_b -// .update(cx_b, |p, cx| p.references(&buffer_b, 7, cx)) -// .await -// .unwrap(); -// cx_b.read(|cx| { -// assert_eq!(references.len(), 3); -// assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); - -// let two_buffer = references[0].buffer.read(cx); -// let three_buffer = references[2].buffer.read(cx); -// assert_eq!( -// two_buffer.file().unwrap().path().as_ref(), -// Path::new("two.rs") -// ); -// assert_eq!(references[1].buffer, references[0].buffer); -// assert_eq!( -// three_buffer.file().unwrap().full_path(cx), -// Path::new("three.rs") -// ); - -// assert_eq!(references[0].range.to_offset(&two_buffer), 24..27); -// assert_eq!(references[1].range.to_offset(&two_buffer), 35..38); -// assert_eq!(references[2].range.to_offset(&three_buffer), 37..40); -// }); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_project_search(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// fs.insert_tree( -// "/root-1", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a": "hello world", -// "b": "goodnight moon", -// "c": "a world of goo", -// "d": "world champion of clown world", -// }), -// ) -// .await; -// fs.insert_tree( -// "/root-2", -// json!({ -// "e": "disney world is fun", -// }), -// ) -// .await; - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; - -// let (worktree_1, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/root-1", true, cx) -// }) -// .await -// .unwrap(); -// worktree_1 -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let (worktree_2, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/root-2", true, cx) -// }) -// .await -// .unwrap(); -// worktree_2 -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; - -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// let results = project_b -// .update(cx_b, |project, cx| { -// project.search(SearchQuery::text("world", false, false), cx) -// }) -// .await -// .unwrap(); - -// let mut ranges_by_path = results -// .into_iter() -// .map(|(buffer, ranges)| { -// buffer.read_with(cx_b, |buffer, cx| { -// let path = buffer.file().unwrap().full_path(cx); -// let offset_ranges = ranges -// .into_iter() -// .map(|range| range.to_offset(buffer)) -// .collect::>(); -// (path, offset_ranges) -// }) -// }) -// .collect::>(); -// ranges_by_path.sort_by_key(|(path, _)| path.clone()); - -// assert_eq!( -// ranges_by_path, -// &[ -// (PathBuf::from("root-1/a"), vec![6..11]), -// (PathBuf::from("root-1/c"), vec![2..7]), -// (PathBuf::from("root-1/d"), vec![0..5, 24..29]), -// (PathBuf::from("root-2/e"), vec![7..12]), -// ] -// ); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_document_highlights(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// fs.insert_tree( -// "/root-1", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "main.rs": "fn double(number: i32) -> i32 { number + number }", -// }), -// ) -// .await; - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/root-1", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Open the file on client B. -// let buffer_b = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx))) -// .await -// .unwrap(); - -// // Request document highlights as the guest. -// let fake_language_server = fake_language_servers.next().await.unwrap(); -// fake_language_server.handle_request::( -// |params, _| async move { -// assert_eq!( -// params -// .text_document_position_params -// .text_document -// .uri -// .as_str(), -// "file:///root-1/main.rs" -// ); -// assert_eq!( -// params.text_document_position_params.position, -// lsp::Position::new(0, 34) -// ); -// Ok(Some(vec![ -// lsp::DocumentHighlight { -// kind: Some(lsp::DocumentHighlightKind::WRITE), -// range: lsp::Range::new( -// lsp::Position::new(0, 10), -// lsp::Position::new(0, 16), -// ), -// }, -// lsp::DocumentHighlight { -// kind: Some(lsp::DocumentHighlightKind::READ), -// range: lsp::Range::new( -// lsp::Position::new(0, 32), -// lsp::Position::new(0, 38), -// ), -// }, -// lsp::DocumentHighlight { -// kind: Some(lsp::DocumentHighlightKind::READ), -// range: lsp::Range::new( -// lsp::Position::new(0, 41), -// lsp::Position::new(0, 47), -// ), -// }, -// ])) -// }, -// ); - -// let highlights = project_b -// .update(cx_b, |p, cx| p.document_highlights(&buffer_b, 34, cx)) -// .await -// .unwrap(); -// buffer_b.read_with(cx_b, |buffer, _| { -// let snapshot = buffer.snapshot(); - -// let highlights = highlights -// .into_iter() -// .map(|highlight| (highlight.kind, highlight.range.to_offset(&snapshot))) -// .collect::>(); -// assert_eq!( -// highlights, -// &[ -// (lsp::DocumentHighlightKind::WRITE, 10..16), -// (lsp::DocumentHighlightKind::READ, 32..38), -// (lsp::DocumentHighlightKind::READ, 41..47) -// ] -// ) -// }); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_project_symbols(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// fs.insert_tree( -// "/code", -// json!({ -// "crate-1": { -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "one.rs": "const ONE: usize = 1;", -// }, -// "crate-2": { -// "two.rs": "const TWO: usize = 2; const THREE: usize = 3;", -// }, -// "private": { -// "passwords.txt": "the-password", -// } -// }), -// ) -// .await; - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/code/crate-1", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// // Cause the language server to start. -// let _buffer = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "one.rs"), cx))) -// .await -// .unwrap(); - -// let fake_language_server = fake_language_servers.next().await.unwrap(); -// fake_language_server.handle_request::( -// |_, _| async move { -// #[allow(deprecated)] -// Ok(Some(vec![lsp::SymbolInformation { -// name: "TWO".into(), -// location: lsp::Location { -// uri: lsp::Url::from_file_path("/code/crate-2/two.rs").unwrap(), -// range: lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), -// }, -// kind: lsp::SymbolKind::CONSTANT, -// tags: None, -// container_name: None, -// deprecated: None, -// }])) -// }, -// ); - -// // Request the definition of a symbol as the guest. -// let symbols = project_b -// .update(cx_b, |p, cx| p.symbols("two", cx)) -// .await -// .unwrap(); -// assert_eq!(symbols.len(), 1); -// assert_eq!(symbols[0].name, "TWO"); - -// // Open one of the returned symbols. -// let buffer_b_2 = project_b -// .update(cx_b, |project, cx| { -// project.open_buffer_for_symbol(&symbols[0], cx) -// }) -// .await -// .unwrap(); -// buffer_b_2.read_with(cx_b, |buffer, _| { -// assert_eq!( -// buffer.file().unwrap().path().as_ref(), -// Path::new("../crate-2/two.rs") -// ); -// }); - -// // Attempt to craft a symbol and violate host's privacy by opening an arbitrary file. -// let mut fake_symbol = symbols[0].clone(); -// fake_symbol.path = Path::new("/code/secrets").into(); -// let error = project_b -// .update(cx_b, |project, cx| { -// project.open_buffer_for_symbol(&fake_symbol, cx) -// }) -// .await -// .unwrap_err(); -// assert!(error.to_string().contains("invalid symbol signature")); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_open_buffer_while_getting_definition_pointing_to_it( -// cx_a: &mut TestAppContext, -// cx_b: &mut TestAppContext, -// mut rng: StdRng, -// ) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// fs.insert_tree( -// "/root", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "a.rs": "const ONE: usize = b::TWO;", -// "b.rs": "const TWO: usize = 2", -// }), -// ) -// .await; - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); - -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/root", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// let buffer_b1 = cx_b -// .background() -// .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) -// .await -// .unwrap(); - -// let fake_language_server = fake_language_servers.next().await.unwrap(); -// fake_language_server.handle_request::( -// |_, _| async move { -// Ok(Some(lsp::GotoDefinitionResponse::Scalar( -// lsp::Location::new( -// lsp::Url::from_file_path("/root/b.rs").unwrap(), -// lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), -// ), -// ))) -// }, -// ); - -// let definitions; -// let buffer_b2; -// if rng.gen() { -// definitions = project_b.update(cx_b, |p, cx| p.definition(&buffer_b1, 23, cx)); -// buffer_b2 = project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.rs"), cx)); -// } else { -// buffer_b2 = project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.rs"), cx)); -// definitions = project_b.update(cx_b, |p, cx| p.definition(&buffer_b1, 23, cx)); -// } - -// let buffer_b2 = buffer_b2.await.unwrap(); -// let definitions = definitions.await.unwrap(); -// assert_eq!(definitions.len(), 1); -// assert_eq!(definitions[0].buffer, buffer_b2); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_collaborating_with_code_actions( -// cx_a: &mut TestAppContext, -// cx_b: &mut TestAppContext, -// ) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// cx_b.update(|cx| editor::init(cx)); - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "main.rs": "mod other;\nfn main() { let foo = other::foo(); }", -// "other.rs": "pub fn foo() -> usize { 4 }", -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); -// let mut params = cx_b.update(WorkspaceParams::test); -// params.languages = lang_registry.clone(); -// params.client = client_b.client.clone(); -// params.user_store = client_b.user_store.clone(); -// params.project = project_b; - -// let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(¶ms, cx)); -// let editor_b = workspace_b -// .update(cx_b, |workspace, cx| { -// workspace.open_path((worktree_id, "main.rs"), cx) -// }) -// .await -// .unwrap() -// .downcast::() -// .unwrap(); - -// let mut fake_language_server = fake_language_servers.next().await.unwrap(); -// fake_language_server -// .handle_request::(|params, _| async move { -// assert_eq!( -// params.text_document.uri, -// lsp::Url::from_file_path("/a/main.rs").unwrap(), -// ); -// assert_eq!(params.range.start, lsp::Position::new(0, 0)); -// assert_eq!(params.range.end, lsp::Position::new(0, 0)); -// Ok(None) -// }) -// .next() -// .await; - -// // Move cursor to a location that contains code actions. -// editor_b.update(cx_b, |editor, cx| { -// editor.select_ranges([Point::new(1, 31)..Point::new(1, 31)], None, cx); -// cx.focus(&editor_b); -// }); - -// fake_language_server -// .handle_request::(|params, _| async move { -// assert_eq!( -// params.text_document.uri, -// lsp::Url::from_file_path("/a/main.rs").unwrap(), -// ); -// assert_eq!(params.range.start, lsp::Position::new(1, 31)); -// assert_eq!(params.range.end, lsp::Position::new(1, 31)); - -// Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( -// lsp::CodeAction { -// title: "Inline into all callers".to_string(), -// edit: Some(lsp::WorkspaceEdit { -// changes: Some( -// [ -// ( -// lsp::Url::from_file_path("/a/main.rs").unwrap(), -// vec![lsp::TextEdit::new( -// lsp::Range::new( -// lsp::Position::new(1, 22), -// lsp::Position::new(1, 34), -// ), -// "4".to_string(), -// )], -// ), -// ( -// lsp::Url::from_file_path("/a/other.rs").unwrap(), -// vec![lsp::TextEdit::new( -// lsp::Range::new( -// lsp::Position::new(0, 0), -// lsp::Position::new(0, 27), -// ), -// "".to_string(), -// )], -// ), -// ] -// .into_iter() -// .collect(), -// ), -// ..Default::default() -// }), -// data: Some(json!({ -// "codeActionParams": { -// "range": { -// "start": {"line": 1, "column": 31}, -// "end": {"line": 1, "column": 31}, -// } -// } -// })), -// ..Default::default() -// }, -// )])) -// }) -// .next() -// .await; - -// // Toggle code actions and wait for them to display. -// editor_b.update(cx_b, |editor, cx| { -// editor.toggle_code_actions( -// &ToggleCodeActions { -// deployed_from_indicator: false, -// }, -// cx, -// ); -// }); -// editor_b -// .condition(&cx_b, |editor, _| editor.context_menu_visible()) -// .await; - -// fake_language_server.remove_request_handler::(); - -// // Confirming the code action will trigger a resolve request. -// let confirm_action = workspace_b -// .update(cx_b, |workspace, cx| { -// Editor::confirm_code_action(workspace, &ConfirmCodeAction { item_ix: Some(0) }, cx) -// }) -// .unwrap(); -// fake_language_server.handle_request::( -// |_, _| async move { -// Ok(lsp::CodeAction { -// title: "Inline into all callers".to_string(), -// edit: Some(lsp::WorkspaceEdit { -// changes: Some( -// [ -// ( -// lsp::Url::from_file_path("/a/main.rs").unwrap(), -// vec![lsp::TextEdit::new( -// lsp::Range::new( -// lsp::Position::new(1, 22), -// lsp::Position::new(1, 34), -// ), -// "4".to_string(), -// )], -// ), -// ( -// lsp::Url::from_file_path("/a/other.rs").unwrap(), -// vec![lsp::TextEdit::new( -// lsp::Range::new( -// lsp::Position::new(0, 0), -// lsp::Position::new(0, 27), -// ), -// "".to_string(), -// )], -// ), -// ] -// .into_iter() -// .collect(), -// ), -// ..Default::default() -// }), -// ..Default::default() -// }) -// }, -// ); - -// // After the action is confirmed, an editor containing both modified files is opened. -// confirm_action.await.unwrap(); -// let code_action_editor = workspace_b.read_with(cx_b, |workspace, cx| { -// workspace -// .active_item(cx) -// .unwrap() -// .downcast::() -// .unwrap() -// }); -// code_action_editor.update(cx_b, |editor, cx| { -// assert_eq!(editor.text(cx), "\nmod other;\nfn main() { let foo = 4; }"); -// editor.undo(&Undo, cx); -// assert_eq!( -// editor.text(cx), -// "pub fn foo() -> usize { 4 }\nmod other;\nfn main() { let foo = other::foo(); }" -// ); -// editor.redo(&Redo, cx); -// assert_eq!(editor.text(cx), "\nmod other;\nfn main() { let foo = 4; }"); -// }); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); -// cx_b.update(|cx| editor::init(cx)); - -// // Set up a fake language server. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// Some(tree_sitter_rust::language()), -// ); -// let mut fake_language_servers = language.set_fake_lsp_adapter(FakeLspAdapter { -// capabilities: lsp::ServerCapabilities { -// rename_provider: Some(lsp::OneOf::Right(lsp::RenameOptions { -// prepare_provider: Some(true), -// work_done_progress_options: Default::default(), -// })), -// ..Default::default() -// }, -// ..Default::default() -// }); -// lang_registry.add(Arc::new(language)); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Share a project as client A -// fs.insert_tree( -// "/dir", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "one.rs": "const ONE: usize = 1;", -// "two.rs": "const TWO: usize = one::ONE + one::ONE;" -// }), -// ) -// .await; -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/dir", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; -// let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); -// project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); - -// // Join the worktree as client B. -// let project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); -// let mut params = cx_b.update(WorkspaceParams::test); -// params.languages = lang_registry.clone(); -// params.client = client_b.client.clone(); -// params.user_store = client_b.user_store.clone(); -// params.project = project_b; - -// let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(¶ms, cx)); -// let editor_b = workspace_b -// .update(cx_b, |workspace, cx| { -// workspace.open_path((worktree_id, "one.rs"), cx) -// }) -// .await -// .unwrap() -// .downcast::() -// .unwrap(); -// let fake_language_server = fake_language_servers.next().await.unwrap(); - -// // Move cursor to a location that can be renamed. -// let prepare_rename = editor_b.update(cx_b, |editor, cx| { -// editor.select_ranges([7..7], None, cx); -// editor.rename(&Rename, cx).unwrap() -// }); - -// fake_language_server -// .handle_request::(|params, _| async move { -// assert_eq!(params.text_document.uri.as_str(), "file:///dir/one.rs"); -// assert_eq!(params.position, lsp::Position::new(0, 7)); -// Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( -// lsp::Position::new(0, 6), -// lsp::Position::new(0, 9), -// )))) -// }) -// .next() -// .await -// .unwrap(); -// prepare_rename.await.unwrap(); -// editor_b.update(cx_b, |editor, cx| { -// let rename = editor.pending_rename().unwrap(); -// let buffer = editor.buffer().read(cx).snapshot(cx); -// assert_eq!( -// rename.range.start.to_offset(&buffer)..rename.range.end.to_offset(&buffer), -// 6..9 -// ); -// rename.editor.update(cx, |rename_editor, cx| { -// rename_editor.buffer().update(cx, |rename_buffer, cx| { -// rename_buffer.edit([0..3], "THREE", cx); -// }); -// }); -// }); - -// let confirm_rename = workspace_b.update(cx_b, |workspace, cx| { -// Editor::confirm_rename(workspace, &ConfirmRename, cx).unwrap() -// }); -// fake_language_server -// .handle_request::(|params, _| async move { -// assert_eq!( -// params.text_document_position.text_document.uri.as_str(), -// "file:///dir/one.rs" -// ); -// assert_eq!( -// params.text_document_position.position, -// lsp::Position::new(0, 6) -// ); -// assert_eq!(params.new_name, "THREE"); -// Ok(Some(lsp::WorkspaceEdit { -// changes: Some( -// [ -// ( -// lsp::Url::from_file_path("/dir/one.rs").unwrap(), -// vec![lsp::TextEdit::new( -// lsp::Range::new( -// lsp::Position::new(0, 6), -// lsp::Position::new(0, 9), -// ), -// "THREE".to_string(), -// )], -// ), -// ( -// lsp::Url::from_file_path("/dir/two.rs").unwrap(), -// vec![ -// lsp::TextEdit::new( -// lsp::Range::new( -// lsp::Position::new(0, 24), -// lsp::Position::new(0, 27), -// ), -// "THREE".to_string(), -// ), -// lsp::TextEdit::new( -// lsp::Range::new( -// lsp::Position::new(0, 35), -// lsp::Position::new(0, 38), -// ), -// "THREE".to_string(), -// ), -// ], -// ), -// ] -// .into_iter() -// .collect(), -// ), -// ..Default::default() -// })) -// }) -// .next() -// .await -// .unwrap(); -// confirm_rename.await.unwrap(); - -// let rename_editor = workspace_b.read_with(cx_b, |workspace, cx| { -// workspace -// .active_item(cx) -// .unwrap() -// .downcast::() -// .unwrap() -// }); -// rename_editor.update(cx_b, |editor, cx| { -// assert_eq!( -// editor.text(cx), -// "const TWO: usize = one::THREE + one::THREE;\nconst THREE: usize = 1;" -// ); -// editor.undo(&Undo, cx); -// assert_eq!( -// editor.text(cx), -// "const TWO: usize = one::ONE + one::ONE;\nconst ONE: usize = 1;" -// ); -// editor.redo(&Redo, cx); -// assert_eq!( -// editor.text(cx), -// "const TWO: usize = one::THREE + one::THREE;\nconst THREE: usize = 1;" -// ); -// }); - -// // Ensure temporary rename edits cannot be undone/redone. -// editor_b.update(cx_b, |editor, cx| { -// editor.undo(&Undo, cx); -// assert_eq!(editor.text(cx), "const ONE: usize = 1;"); -// editor.undo(&Undo, cx); -// assert_eq!(editor.text(cx), "const ONE: usize = 1;"); -// editor.redo(&Redo, cx); -// assert_eq!(editor.text(cx), "const THREE: usize = 1;"); -// }) -// } - -// #[gpui::test(iterations = 10)] -// async fn test_basic_chat(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; - -// // Create an org that includes these 2 users. -// let db = &server.app_state.db; -// let org_id = db.create_org("Test Org", "test-org").await.unwrap(); -// db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) -// .await -// .unwrap(); -// db.add_org_member(org_id, client_b.current_user_id(&cx_b), false) -// .await -// .unwrap(); - -// // Create a channel that includes all the users. -// let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); -// db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) -// .await -// .unwrap(); -// db.add_channel_member(channel_id, client_b.current_user_id(&cx_b), false) -// .await -// .unwrap(); -// db.create_channel_message( -// channel_id, -// client_b.current_user_id(&cx_b), -// "hello A, it's B.", -// OffsetDateTime::now_utc(), -// 1, -// ) -// .await -// .unwrap(); - -// let channels_a = cx_a -// .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); -// channels_a -// .condition(cx_a, |list, _| list.available_channels().is_some()) -// .await; -// channels_a.read_with(cx_a, |list, _| { -// assert_eq!( -// list.available_channels().unwrap(), -// &[ChannelDetails { -// id: channel_id.to_proto(), -// name: "test-channel".to_string() -// }] -// ) -// }); -// let channel_a = channels_a.update(cx_a, |this, cx| { -// this.get_channel(channel_id.to_proto(), cx).unwrap() -// }); -// channel_a.read_with(cx_a, |channel, _| assert!(channel.messages().is_empty())); -// channel_a -// .condition(&cx_a, |channel, _| { -// channel_messages(channel) -// == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] -// }) -// .await; - -// let channels_b = cx_b -// .add_model(|cx| ChannelList::new(client_b.user_store.clone(), client_b.clone(), cx)); -// channels_b -// .condition(cx_b, |list, _| list.available_channels().is_some()) -// .await; -// channels_b.read_with(cx_b, |list, _| { -// assert_eq!( -// list.available_channels().unwrap(), -// &[ChannelDetails { -// id: channel_id.to_proto(), -// name: "test-channel".to_string() -// }] -// ) -// }); - -// let channel_b = channels_b.update(cx_b, |this, cx| { -// this.get_channel(channel_id.to_proto(), cx).unwrap() -// }); -// channel_b.read_with(cx_b, |channel, _| assert!(channel.messages().is_empty())); -// channel_b -// .condition(&cx_b, |channel, _| { -// channel_messages(channel) -// == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] -// }) -// .await; - -// channel_a -// .update(cx_a, |channel, cx| { -// channel -// .send_message("oh, hi B.".to_string(), cx) -// .unwrap() -// .detach(); -// let task = channel.send_message("sup".to_string(), cx).unwrap(); -// assert_eq!( -// channel_messages(channel), -// &[ -// ("user_b".to_string(), "hello A, it's B.".to_string(), false), -// ("user_a".to_string(), "oh, hi B.".to_string(), true), -// ("user_a".to_string(), "sup".to_string(), true) -// ] -// ); -// task -// }) -// .await -// .unwrap(); - -// channel_b -// .condition(&cx_b, |channel, _| { -// channel_messages(channel) -// == [ -// ("user_b".to_string(), "hello A, it's B.".to_string(), false), -// ("user_a".to_string(), "oh, hi B.".to_string(), false), -// ("user_a".to_string(), "sup".to_string(), false), -// ] -// }) -// .await; - -// assert_eq!( -// server -// .state() -// .await -// .channel(channel_id) -// .unwrap() -// .connection_ids -// .len(), -// 2 -// ); -// cx_b.update(|_| drop(channel_b)); -// server -// .condition(|state| state.channel(channel_id).unwrap().connection_ids.len() == 1) -// .await; - -// cx_a.update(|_| drop(channel_a)); -// server -// .condition(|state| state.channel(channel_id).is_none()) -// .await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_chat_message_validation(cx_a: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); - -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; - -// let db = &server.app_state.db; -// let org_id = db.create_org("Test Org", "test-org").await.unwrap(); -// let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); -// db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) -// .await -// .unwrap(); -// db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) -// .await -// .unwrap(); - -// let channels_a = cx_a -// .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); -// channels_a -// .condition(cx_a, |list, _| list.available_channels().is_some()) -// .await; -// let channel_a = channels_a.update(cx_a, |this, cx| { -// this.get_channel(channel_id.to_proto(), cx).unwrap() -// }); - -// // Messages aren't allowed to be too long. -// channel_a -// .update(cx_a, |channel, cx| { -// let long_body = "this is long.\n".repeat(1024); -// channel.send_message(long_body, cx).unwrap() -// }) -// .await -// .unwrap_err(); - -// // Messages aren't allowed to be blank. -// channel_a.update(cx_a, |channel, cx| { -// channel.send_message(String::new(), cx).unwrap_err() -// }); - -// // Leading and trailing whitespace are trimmed. -// channel_a -// .update(cx_a, |channel, cx| { -// channel -// .send_message("\n surrounded by whitespace \n".to_string(), cx) -// .unwrap() -// }) -// .await -// .unwrap(); -// assert_eq!( -// db.get_channel_messages(channel_id, 10, None) -// .await -// .unwrap() -// .iter() -// .map(|m| &m.body) -// .collect::>(), -// &["surrounded by whitespace"] -// ); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_chat_reconnection(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); - -// // Connect to a server as 2 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; -// let mut status_b = client_b.status(); - -// // Create an org that includes these 2 users. -// let db = &server.app_state.db; -// let org_id = db.create_org("Test Org", "test-org").await.unwrap(); -// db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) -// .await -// .unwrap(); -// db.add_org_member(org_id, client_b.current_user_id(&cx_b), false) -// .await -// .unwrap(); - -// // Create a channel that includes all the users. -// let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); -// db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) -// .await -// .unwrap(); -// db.add_channel_member(channel_id, client_b.current_user_id(&cx_b), false) -// .await -// .unwrap(); -// db.create_channel_message( -// channel_id, -// client_b.current_user_id(&cx_b), -// "hello A, it's B.", -// OffsetDateTime::now_utc(), -// 2, -// ) -// .await -// .unwrap(); - -// let channels_a = cx_a -// .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); -// channels_a -// .condition(cx_a, |list, _| list.available_channels().is_some()) -// .await; - -// channels_a.read_with(cx_a, |list, _| { -// assert_eq!( -// list.available_channels().unwrap(), -// &[ChannelDetails { -// id: channel_id.to_proto(), -// name: "test-channel".to_string() -// }] -// ) -// }); -// let channel_a = channels_a.update(cx_a, |this, cx| { -// this.get_channel(channel_id.to_proto(), cx).unwrap() -// }); -// channel_a.read_with(cx_a, |channel, _| assert!(channel.messages().is_empty())); -// channel_a -// .condition(&cx_a, |channel, _| { -// channel_messages(channel) -// == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] -// }) -// .await; - -// let channels_b = cx_b -// .add_model(|cx| ChannelList::new(client_b.user_store.clone(), client_b.clone(), cx)); -// channels_b -// .condition(cx_b, |list, _| list.available_channels().is_some()) -// .await; -// channels_b.read_with(cx_b, |list, _| { -// assert_eq!( -// list.available_channels().unwrap(), -// &[ChannelDetails { -// id: channel_id.to_proto(), -// name: "test-channel".to_string() -// }] -// ) -// }); - -// let channel_b = channels_b.update(cx_b, |this, cx| { -// this.get_channel(channel_id.to_proto(), cx).unwrap() -// }); -// channel_b.read_with(cx_b, |channel, _| assert!(channel.messages().is_empty())); -// channel_b -// .condition(&cx_b, |channel, _| { -// channel_messages(channel) -// == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] -// }) -// .await; - -// // Disconnect client B, ensuring we can still access its cached channel data. -// server.forbid_connections(); -// server.disconnect_client(client_b.current_user_id(&cx_b)); -// cx_b.foreground().advance_clock(Duration::from_secs(3)); -// while !matches!( -// status_b.next().await, -// Some(client::Status::ReconnectionError { .. }) -// ) {} - -// channels_b.read_with(cx_b, |channels, _| { -// assert_eq!( -// channels.available_channels().unwrap(), -// [ChannelDetails { -// id: channel_id.to_proto(), -// name: "test-channel".to_string() -// }] -// ) -// }); -// channel_b.read_with(cx_b, |channel, _| { -// assert_eq!( -// channel_messages(channel), -// [("user_b".to_string(), "hello A, it's B.".to_string(), false)] -// ) -// }); - -// // Send a message from client B while it is disconnected. -// channel_b -// .update(cx_b, |channel, cx| { -// let task = channel -// .send_message("can you see this?".to_string(), cx) -// .unwrap(); -// assert_eq!( -// channel_messages(channel), -// &[ -// ("user_b".to_string(), "hello A, it's B.".to_string(), false), -// ("user_b".to_string(), "can you see this?".to_string(), true) -// ] -// ); -// task -// }) -// .await -// .unwrap_err(); - -// // Send a message from client A while B is disconnected. -// channel_a -// .update(cx_a, |channel, cx| { -// channel -// .send_message("oh, hi B.".to_string(), cx) -// .unwrap() -// .detach(); -// let task = channel.send_message("sup".to_string(), cx).unwrap(); -// assert_eq!( -// channel_messages(channel), -// &[ -// ("user_b".to_string(), "hello A, it's B.".to_string(), false), -// ("user_a".to_string(), "oh, hi B.".to_string(), true), -// ("user_a".to_string(), "sup".to_string(), true) -// ] -// ); -// task -// }) -// .await -// .unwrap(); - -// // Give client B a chance to reconnect. -// server.allow_connections(); -// cx_b.foreground().advance_clock(Duration::from_secs(10)); - -// // Verify that B sees the new messages upon reconnection, as well as the message client B -// // sent while offline. -// channel_b -// .condition(&cx_b, |channel, _| { -// channel_messages(channel) -// == [ -// ("user_b".to_string(), "hello A, it's B.".to_string(), false), -// ("user_a".to_string(), "oh, hi B.".to_string(), false), -// ("user_a".to_string(), "sup".to_string(), false), -// ("user_b".to_string(), "can you see this?".to_string(), false), -// ] -// }) -// .await; - -// // Ensure client A and B can communicate normally after reconnection. -// channel_a -// .update(cx_a, |channel, cx| { -// channel.send_message("you online?".to_string(), cx).unwrap() -// }) -// .await -// .unwrap(); -// channel_b -// .condition(&cx_b, |channel, _| { -// channel_messages(channel) -// == [ -// ("user_b".to_string(), "hello A, it's B.".to_string(), false), -// ("user_a".to_string(), "oh, hi B.".to_string(), false), -// ("user_a".to_string(), "sup".to_string(), false), -// ("user_b".to_string(), "can you see this?".to_string(), false), -// ("user_a".to_string(), "you online?".to_string(), false), -// ] -// }) -// .await; - -// channel_b -// .update(cx_b, |channel, cx| { -// channel.send_message("yep".to_string(), cx).unwrap() -// }) -// .await -// .unwrap(); -// channel_a -// .condition(&cx_a, |channel, _| { -// channel_messages(channel) -// == [ -// ("user_b".to_string(), "hello A, it's B.".to_string(), false), -// ("user_a".to_string(), "oh, hi B.".to_string(), false), -// ("user_a".to_string(), "sup".to_string(), false), -// ("user_b".to_string(), "can you see this?".to_string(), false), -// ("user_a".to_string(), "you online?".to_string(), false), -// ("user_b".to_string(), "yep".to_string(), false), -// ] -// }) -// .await; -// } - -// #[gpui::test(iterations = 10)] -// async fn test_contacts( -// cx_a: &mut TestAppContext, -// cx_b: &mut TestAppContext, -// cx_c: &mut TestAppContext, -// ) { -// cx_a.foreground().forbid_parking(); -// let lang_registry = Arc::new(LanguageRegistry::test()); -// let fs = FakeFs::new(cx_a.background()); - -// // Connect to a server as 3 clients. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let client_a = server.create_client(cx_a, "user_a").await; -// let client_b = server.create_client(cx_b, "user_b").await; -// let client_c = server.create_client(cx_c, "user_c").await; - -// // Share a worktree as client A. -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, -// }), -// ) -// .await; - -// let project_a = cx_a.update(|cx| { -// Project::local( -// client_a.clone(), -// client_a.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let (worktree_a, _) = project_a -// .update(cx_a, |p, cx| { -// p.find_or_create_local_worktree("/a", true, cx) -// }) -// .await -// .unwrap(); -// worktree_a -// .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; - -// client_a -// .user_store -// .condition(&cx_a, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] -// }) -// .await; -// client_b -// .user_store -// .condition(&cx_b, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] -// }) -// .await; -// client_c -// .user_store -// .condition(&cx_c, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] -// }) -// .await; - -// let project_id = project_a -// .update(cx_a, |project, _| project.next_remote_id()) -// .await; -// project_a -// .update(cx_a, |project, cx| project.share(cx)) -// .await -// .unwrap(); -// client_a -// .user_store -// .condition(&cx_a, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] -// }) -// .await; -// client_b -// .user_store -// .condition(&cx_b, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] -// }) -// .await; -// client_c -// .user_store -// .condition(&cx_c, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] -// }) -// .await; - -// let _project_b = Project::remote( -// project_id, -// client_b.clone(), -// client_b.user_store.clone(), -// lang_registry.clone(), -// fs.clone(), -// &mut cx_b.to_async(), -// ) -// .await -// .unwrap(); - -// client_a -// .user_store -// .condition(&cx_a, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] -// }) -// .await; -// client_b -// .user_store -// .condition(&cx_b, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] -// }) -// .await; -// client_c -// .user_store -// .condition(&cx_c, |user_store, _| { -// contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] -// }) -// .await; - -// project_a -// .condition(&cx_a, |project, _| { -// project.collaborators().contains_key(&client_b.peer_id) -// }) -// .await; - -// cx_a.update(move |_| drop(project_a)); -// client_a -// .user_store -// .condition(&cx_a, |user_store, _| contacts(user_store) == vec![]) -// .await; -// client_b -// .user_store -// .condition(&cx_b, |user_store, _| contacts(user_store) == vec![]) -// .await; -// client_c -// .user_store -// .condition(&cx_c, |user_store, _| contacts(user_store) == vec![]) -// .await; - -// fn contacts(user_store: &UserStore) -> Vec<(&str, Vec<(&str, bool, Vec<&str>)>)> { -// user_store -// .contacts() -// .iter() -// .map(|contact| { -// let worktrees = contact -// .projects -// .iter() -// .map(|p| { -// ( -// p.worktree_root_names[0].as_str(), -// p.is_shared, -// p.guests.iter().map(|p| p.github_login.as_str()).collect(), -// ) -// }) -// .collect(); -// (contact.user.github_login.as_str(), worktrees) -// }) -// .collect() -// } -// } - -// #[gpui::test(iterations = 10)] -// async fn test_following(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let fs = FakeFs::new(cx_a.background()); - -// // 2 clients connect to a server. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let mut client_a = server.create_client(cx_a, "user_a").await; -// let mut client_b = server.create_client(cx_b, "user_b").await; -// cx_a.update(editor::init); -// cx_b.update(editor::init); - -// // Client A shares a project. -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "1.txt": "one", -// "2.txt": "two", -// "3.txt": "three", -// }), -// ) -// .await; -// let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; -// project_a -// .update(cx_a, |project, cx| project.share(cx)) -// .await -// .unwrap(); - -// // Client B joins the project. -// let project_b = client_b -// .build_remote_project( -// project_a -// .read_with(cx_a, |project, _| project.remote_id()) -// .unwrap(), -// cx_b, -// ) -// .await; - -// // Client A opens some editors. -// let workspace_a = client_a.build_workspace(&project_a, cx_a); -// let pane_a = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); -// let editor_a1 = workspace_a -// .update(cx_a, |workspace, cx| { -// workspace.open_path((worktree_id, "1.txt"), cx) -// }) -// .await -// .unwrap() -// .downcast::() -// .unwrap(); -// let editor_a2 = workspace_a -// .update(cx_a, |workspace, cx| { -// workspace.open_path((worktree_id, "2.txt"), cx) -// }) -// .await -// .unwrap() -// .downcast::() -// .unwrap(); - -// // Client B opens an editor. -// let workspace_b = client_b.build_workspace(&project_b, cx_b); -// let editor_b1 = workspace_b -// .update(cx_b, |workspace, cx| { -// workspace.open_path((worktree_id, "1.txt"), cx) -// }) -// .await -// .unwrap() -// .downcast::() -// .unwrap(); - -// let client_a_id = project_b.read_with(cx_b, |project, _| { -// project.collaborators().values().next().unwrap().peer_id -// }); -// let client_b_id = project_a.read_with(cx_a, |project, _| { -// project.collaborators().values().next().unwrap().peer_id -// }); - -// // When client B starts following client A, all visible view states are replicated to client B. -// editor_a1.update(cx_a, |editor, cx| editor.select_ranges([0..1], None, cx)); -// editor_a2.update(cx_a, |editor, cx| editor.select_ranges([2..3], None, cx)); -// workspace_b -// .update(cx_b, |workspace, cx| { -// workspace -// .toggle_follow(&ToggleFollow(client_a_id), cx) -// .unwrap() -// }) -// .await -// .unwrap(); -// let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| { -// workspace -// .active_item(cx) -// .unwrap() -// .downcast::() -// .unwrap() -// }); -// assert!(cx_b.read(|cx| editor_b2.is_focused(cx))); -// assert_eq!( -// editor_b2.read_with(cx_b, |editor, cx| editor.project_path(cx)), -// Some((worktree_id, "2.txt").into()) -// ); -// assert_eq!( -// editor_b2.read_with(cx_b, |editor, cx| editor.selected_ranges(cx)), -// vec![2..3] -// ); -// assert_eq!( -// editor_b1.read_with(cx_b, |editor, cx| editor.selected_ranges(cx)), -// vec![0..1] -// ); - -// // When client A activates a different editor, client B does so as well. -// workspace_a.update(cx_a, |workspace, cx| { -// workspace.activate_item(&editor_a1, cx) -// }); -// workspace_b -// .condition(cx_b, |workspace, cx| { -// workspace.active_item(cx).unwrap().id() == editor_b1.id() -// }) -// .await; - -// // When client A navigates back and forth, client B does so as well. -// workspace_a -// .update(cx_a, |workspace, cx| { -// workspace::Pane::go_back(workspace, None, cx) -// }) -// .await; -// workspace_b -// .condition(cx_b, |workspace, cx| { -// workspace.active_item(cx).unwrap().id() == editor_b2.id() -// }) -// .await; - -// workspace_a -// .update(cx_a, |workspace, cx| { -// workspace::Pane::go_forward(workspace, None, cx) -// }) -// .await; -// workspace_b -// .condition(cx_b, |workspace, cx| { -// workspace.active_item(cx).unwrap().id() == editor_b1.id() -// }) -// .await; - -// // Changes to client A's editor are reflected on client B. -// editor_a1.update(cx_a, |editor, cx| { -// editor.select_ranges([1..1, 2..2], None, cx); -// }); -// editor_b1 -// .condition(cx_b, |editor, cx| { -// editor.selected_ranges(cx) == vec![1..1, 2..2] -// }) -// .await; - -// editor_a1.update(cx_a, |editor, cx| editor.set_text("TWO", cx)); -// editor_b1 -// .condition(cx_b, |editor, cx| editor.text(cx) == "TWO") -// .await; - -// editor_a1.update(cx_a, |editor, cx| { -// editor.select_ranges([3..3], None, cx); -// editor.set_scroll_position(vec2f(0., 100.), cx); -// }); -// editor_b1 -// .condition(cx_b, |editor, cx| editor.selected_ranges(cx) == vec![3..3]) -// .await; - -// // After unfollowing, client B stops receiving updates from client A. -// workspace_b.update(cx_b, |workspace, cx| { -// workspace.unfollow(&workspace.active_pane().clone(), cx) -// }); -// workspace_a.update(cx_a, |workspace, cx| { -// workspace.activate_item(&editor_a2, cx) -// }); -// cx_a.foreground().run_until_parked(); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, cx| workspace -// .active_item(cx) -// .unwrap() -// .id()), -// editor_b1.id() -// ); - -// // Client A starts following client B. -// workspace_a -// .update(cx_a, |workspace, cx| { -// workspace -// .toggle_follow(&ToggleFollow(client_b_id), cx) -// .unwrap() -// }) -// .await -// .unwrap(); -// assert_eq!( -// workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), -// Some(client_b_id) -// ); -// assert_eq!( -// workspace_a.read_with(cx_a, |workspace, cx| workspace -// .active_item(cx) -// .unwrap() -// .id()), -// editor_a1.id() -// ); - -// // Following interrupts when client B disconnects. -// client_b.disconnect(&cx_b.to_async()).unwrap(); -// cx_a.foreground().run_until_parked(); -// assert_eq!( -// workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), -// None -// ); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_peers_following_each_other(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let fs = FakeFs::new(cx_a.background()); - -// // 2 clients connect to a server. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let mut client_a = server.create_client(cx_a, "user_a").await; -// let mut client_b = server.create_client(cx_b, "user_b").await; -// cx_a.update(editor::init); -// cx_b.update(editor::init); - -// // Client A shares a project. -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "1.txt": "one", -// "2.txt": "two", -// "3.txt": "three", -// "4.txt": "four", -// }), -// ) -// .await; -// let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; -// project_a -// .update(cx_a, |project, cx| project.share(cx)) -// .await -// .unwrap(); - -// // Client B joins the project. -// let project_b = client_b -// .build_remote_project( -// project_a -// .read_with(cx_a, |project, _| project.remote_id()) -// .unwrap(), -// cx_b, -// ) -// .await; - -// // Client A opens some editors. -// let workspace_a = client_a.build_workspace(&project_a, cx_a); -// let pane_a1 = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); -// let _editor_a1 = workspace_a -// .update(cx_a, |workspace, cx| { -// workspace.open_path((worktree_id, "1.txt"), cx) -// }) -// .await -// .unwrap() -// .downcast::() -// .unwrap(); - -// // Client B opens an editor. -// let workspace_b = client_b.build_workspace(&project_b, cx_b); -// let pane_b1 = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); -// let _editor_b1 = workspace_b -// .update(cx_b, |workspace, cx| { -// workspace.open_path((worktree_id, "2.txt"), cx) -// }) -// .await -// .unwrap() -// .downcast::() -// .unwrap(); - -// // Clients A and B follow each other in split panes -// workspace_a -// .update(cx_a, |workspace, cx| { -// workspace.split_pane(workspace.active_pane().clone(), SplitDirection::Right, cx); -// assert_ne!(*workspace.active_pane(), pane_a1); -// let leader_id = *project_a.read(cx).collaborators().keys().next().unwrap(); -// workspace -// .toggle_follow(&workspace::ToggleFollow(leader_id), cx) -// .unwrap() -// }) -// .await -// .unwrap(); -// workspace_b -// .update(cx_b, |workspace, cx| { -// workspace.split_pane(workspace.active_pane().clone(), SplitDirection::Right, cx); -// assert_ne!(*workspace.active_pane(), pane_b1); -// let leader_id = *project_b.read(cx).collaborators().keys().next().unwrap(); -// workspace -// .toggle_follow(&workspace::ToggleFollow(leader_id), cx) -// .unwrap() -// }) -// .await -// .unwrap(); - -// workspace_a -// .update(cx_a, |workspace, cx| { -// workspace.activate_next_pane(cx); -// assert_eq!(*workspace.active_pane(), pane_a1); -// workspace.open_path((worktree_id, "3.txt"), cx) -// }) -// .await -// .unwrap(); -// workspace_b -// .update(cx_b, |workspace, cx| { -// workspace.activate_next_pane(cx); -// assert_eq!(*workspace.active_pane(), pane_b1); -// workspace.open_path((worktree_id, "4.txt"), cx) -// }) -// .await -// .unwrap(); -// cx_a.foreground().run_until_parked(); - -// // Ensure leader updates don't change the active pane of followers -// workspace_a.read_with(cx_a, |workspace, _| { -// assert_eq!(*workspace.active_pane(), pane_a1); -// }); -// workspace_b.read_with(cx_b, |workspace, _| { -// assert_eq!(*workspace.active_pane(), pane_b1); -// }); - -// // Ensure peers following each other doesn't cause an infinite loop. -// assert_eq!( -// workspace_a.read_with(cx_a, |workspace, cx| workspace -// .active_item(cx) -// .unwrap() -// .project_path(cx)), -// Some((worktree_id, "3.txt").into()) -// ); -// workspace_a.update(cx_a, |workspace, cx| { -// assert_eq!( -// workspace.active_item(cx).unwrap().project_path(cx), -// Some((worktree_id, "3.txt").into()) -// ); -// workspace.activate_next_pane(cx); -// assert_eq!( -// workspace.active_item(cx).unwrap().project_path(cx), -// Some((worktree_id, "4.txt").into()) -// ); -// }); -// workspace_b.update(cx_b, |workspace, cx| { -// assert_eq!( -// workspace.active_item(cx).unwrap().project_path(cx), -// Some((worktree_id, "4.txt").into()) -// ); -// workspace.activate_next_pane(cx); -// assert_eq!( -// workspace.active_item(cx).unwrap().project_path(cx), -// Some((worktree_id, "3.txt").into()) -// ); -// }); -// } - -// #[gpui::test(iterations = 10)] -// async fn test_auto_unfollowing(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { -// cx_a.foreground().forbid_parking(); -// let fs = FakeFs::new(cx_a.background()); - -// // 2 clients connect to a server. -// let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; -// let mut client_a = server.create_client(cx_a, "user_a").await; -// let mut client_b = server.create_client(cx_b, "user_b").await; -// cx_a.update(editor::init); -// cx_b.update(editor::init); - -// // Client A shares a project. -// fs.insert_tree( -// "/a", -// json!({ -// ".zed.toml": r#"collaborators = ["user_b"]"#, -// "1.txt": "one", -// "2.txt": "two", -// "3.txt": "three", -// }), -// ) -// .await; -// let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; -// project_a -// .update(cx_a, |project, cx| project.share(cx)) -// .await -// .unwrap(); - -// // Client B joins the project. -// let project_b = client_b -// .build_remote_project( -// project_a -// .read_with(cx_a, |project, _| project.remote_id()) -// .unwrap(), -// cx_b, -// ) -// .await; - -// // Client A opens some editors. -// let workspace_a = client_a.build_workspace(&project_a, cx_a); -// let _editor_a1 = workspace_a -// .update(cx_a, |workspace, cx| { -// workspace.open_path((worktree_id, "1.txt"), cx) -// }) -// .await -// .unwrap() -// .downcast::() -// .unwrap(); - -// // Client B starts following client A. -// let workspace_b = client_b.build_workspace(&project_b, cx_b); -// let pane_b = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); -// let leader_id = project_b.read_with(cx_b, |project, _| { -// project.collaborators().values().next().unwrap().peer_id -// }); -// workspace_b -// .update(cx_b, |workspace, cx| { -// workspace -// .toggle_follow(&ToggleFollow(leader_id), cx) -// .unwrap() -// }) -// .await -// .unwrap(); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// Some(leader_id) -// ); -// let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| { -// workspace -// .active_item(cx) -// .unwrap() -// .downcast::() -// .unwrap() -// }); - -// // When client B moves, it automatically stops following client A. -// editor_b2.update(cx_b, |editor, cx| editor.move_right(&editor::MoveRight, cx)); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// None -// ); - -// workspace_b -// .update(cx_b, |workspace, cx| { -// workspace -// .toggle_follow(&ToggleFollow(leader_id), cx) -// .unwrap() -// }) -// .await -// .unwrap(); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// Some(leader_id) -// ); - -// // When client B edits, it automatically stops following client A. -// editor_b2.update(cx_b, |editor, cx| editor.insert("X", cx)); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// None -// ); - -// workspace_b -// .update(cx_b, |workspace, cx| { -// workspace -// .toggle_follow(&ToggleFollow(leader_id), cx) -// .unwrap() -// }) -// .await -// .unwrap(); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// Some(leader_id) -// ); - -// // When client B scrolls, it automatically stops following client A. -// editor_b2.update(cx_b, |editor, cx| { -// editor.set_scroll_position(vec2f(0., 3.), cx) -// }); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// None -// ); - -// workspace_b -// .update(cx_b, |workspace, cx| { -// workspace -// .toggle_follow(&ToggleFollow(leader_id), cx) -// .unwrap() -// }) -// .await -// .unwrap(); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// Some(leader_id) -// ); - -// // When client B activates a different pane, it continues following client A in the original pane. -// workspace_b.update(cx_b, |workspace, cx| { -// workspace.split_pane(pane_b.clone(), SplitDirection::Right, cx) -// }); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// Some(leader_id) -// ); - -// workspace_b.update(cx_b, |workspace, cx| workspace.activate_next_pane(cx)); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// Some(leader_id) -// ); - -// // When client B activates a different item in the original pane, it automatically stops following client A. -// workspace_b -// .update(cx_b, |workspace, cx| { -// workspace.open_path((worktree_id, "2.txt"), cx) -// }) -// .await -// .unwrap(); -// assert_eq!( -// workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), -// None -// ); -// } - -// #[gpui::test(iterations = 100)] -// async fn test_random_collaboration( -// cx: &mut TestAppContext, -// deterministic: Arc, -// rng: StdRng, -// ) { -// cx.foreground().forbid_parking(); -// let max_peers = env::var("MAX_PEERS") -// .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) -// .unwrap_or(5); -// assert!(max_peers <= 5); - -// let max_operations = env::var("OPERATIONS") -// .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) -// .unwrap_or(10); - -// let rng = Arc::new(Mutex::new(rng)); - -// let guest_lang_registry = Arc::new(LanguageRegistry::test()); -// let host_language_registry = Arc::new(LanguageRegistry::test()); - -// let fs = FakeFs::new(cx.background()); -// fs.insert_tree( -// "/_collab", -// json!({ -// ".zed.toml": r#"collaborators = ["guest-1", "guest-2", "guest-3", "guest-4"]"# -// }), -// ) -// .await; - -// let mut server = TestServer::start(cx.foreground(), cx.background()).await; -// let mut clients = Vec::new(); -// let mut user_ids = Vec::new(); -// let mut op_start_signals = Vec::new(); -// let files = Arc::new(Mutex::new(Vec::new())); - -// let mut next_entity_id = 100000; -// let mut host_cx = TestAppContext::new( -// cx.foreground_platform(), -// cx.platform(), -// deterministic.build_foreground(next_entity_id), -// deterministic.build_background(), -// cx.font_cache(), -// cx.leak_detector(), -// next_entity_id, -// ); -// let host = server.create_client(&mut host_cx, "host").await; -// let host_project = host_cx.update(|cx| { -// Project::local( -// host.client.clone(), -// host.user_store.clone(), -// host_language_registry.clone(), -// fs.clone(), -// cx, -// ) -// }); -// let host_project_id = host_project -// .update(&mut host_cx, |p, _| p.next_remote_id()) -// .await; - -// let (collab_worktree, _) = host_project -// .update(&mut host_cx, |project, cx| { -// project.find_or_create_local_worktree("/_collab", true, cx) -// }) -// .await -// .unwrap(); -// collab_worktree -// .read_with(&host_cx, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// host_project -// .update(&mut host_cx, |project, cx| project.share(cx)) -// .await -// .unwrap(); - -// // Set up fake language servers. -// let mut language = Language::new( -// LanguageConfig { -// name: "Rust".into(), -// path_suffixes: vec!["rs".to_string()], -// ..Default::default() -// }, -// None, -// ); -// let _fake_servers = language.set_fake_lsp_adapter(FakeLspAdapter { -// name: "the-fake-language-server", -// capabilities: lsp::LanguageServer::full_capabilities(), -// initializer: Some(Box::new({ -// let rng = rng.clone(); -// let files = files.clone(); -// let project = host_project.downgrade(); -// move |fake_server: &mut FakeLanguageServer| { -// fake_server.handle_request::( -// |_, _| async move { -// Ok(Some(lsp::CompletionResponse::Array(vec![ -// lsp::CompletionItem { -// text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { -// range: lsp::Range::new( -// lsp::Position::new(0, 0), -// lsp::Position::new(0, 0), -// ), -// new_text: "the-new-text".to_string(), -// })), -// ..Default::default() -// }, -// ]))) -// }, -// ); - -// fake_server.handle_request::( -// |_, _| async move { -// Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( -// lsp::CodeAction { -// title: "the-code-action".to_string(), -// ..Default::default() -// }, -// )])) -// }, -// ); - -// fake_server.handle_request::( -// |params, _| async move { -// Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( -// params.position, -// params.position, -// )))) -// }, -// ); - -// fake_server.handle_request::({ -// let files = files.clone(); -// let rng = rng.clone(); -// move |_, _| { -// let files = files.clone(); -// let rng = rng.clone(); -// async move { -// let files = files.lock(); -// let mut rng = rng.lock(); -// let count = rng.gen_range::(1..3); -// let files = (0..count) -// .map(|_| files.choose(&mut *rng).unwrap()) -// .collect::>(); -// log::info!("LSP: Returning definitions in files {:?}", &files); -// Ok(Some(lsp::GotoDefinitionResponse::Array( -// files -// .into_iter() -// .map(|file| lsp::Location { -// uri: lsp::Url::from_file_path(file).unwrap(), -// range: Default::default(), -// }) -// .collect(), -// ))) -// } -// } -// }); - -// fake_server.handle_request::({ -// let rng = rng.clone(); -// let project = project.clone(); -// move |params, mut cx| { -// let highlights = if let Some(project) = project.upgrade(&cx) { -// project.update(&mut cx, |project, cx| { -// let path = params -// .text_document_position_params -// .text_document -// .uri -// .to_file_path() -// .unwrap(); -// let (worktree, relative_path) = -// project.find_local_worktree(&path, cx)?; -// let project_path = -// ProjectPath::from((worktree.read(cx).id(), relative_path)); -// let buffer = -// project.get_open_buffer(&project_path, cx)?.read(cx); - -// let mut highlights = Vec::new(); -// let highlight_count = rng.lock().gen_range(1..=5); -// let mut prev_end = 0; -// for _ in 0..highlight_count { -// let range = -// buffer.random_byte_range(prev_end, &mut *rng.lock()); - -// highlights.push(lsp::DocumentHighlight { -// range: range_to_lsp(range.to_point_utf16(buffer)), -// kind: Some(lsp::DocumentHighlightKind::READ), -// }); -// prev_end = range.end; -// } -// Some(highlights) -// }) -// } else { -// None -// }; -// async move { Ok(highlights) } -// } -// }); -// } -// })), -// ..Default::default() -// }); -// host_language_registry.add(Arc::new(language)); - -// let op_start_signal = futures::channel::mpsc::unbounded(); -// user_ids.push(host.current_user_id(&host_cx)); -// op_start_signals.push(op_start_signal.0); -// clients.push(host_cx.foreground().spawn(host.simulate_host( -// host_project, -// files, -// op_start_signal.1, -// rng.clone(), -// host_cx, -// ))); - -// let disconnect_host_at = if rng.lock().gen_bool(0.2) { -// rng.lock().gen_range(0..max_operations) -// } else { -// max_operations -// }; -// let mut available_guests = vec![ -// "guest-1".to_string(), -// "guest-2".to_string(), -// "guest-3".to_string(), -// "guest-4".to_string(), -// ]; -// let mut operations = 0; -// while operations < max_operations { -// if operations == disconnect_host_at { -// server.disconnect_client(user_ids[0]); -// cx.foreground().advance_clock(RECEIVE_TIMEOUT); -// drop(op_start_signals); -// let mut clients = futures::future::join_all(clients).await; -// cx.foreground().run_until_parked(); - -// let (host, mut host_cx, host_err) = clients.remove(0); -// if let Some(host_err) = host_err { -// log::error!("host error - {}", host_err); -// } -// host.project -// .as_ref() -// .unwrap() -// .read_with(&host_cx, |project, _| assert!(!project.is_shared())); -// for (guest, mut guest_cx, guest_err) in clients { -// if let Some(guest_err) = guest_err { -// log::error!("{} error - {}", guest.username, guest_err); -// } -// let contacts = server -// .store -// .read() -// .await -// .contacts_for_user(guest.current_user_id(&guest_cx)); -// assert!(!contacts -// .iter() -// .flat_map(|contact| &contact.projects) -// .any(|project| project.id == host_project_id)); -// guest -// .project -// .as_ref() -// .unwrap() -// .read_with(&guest_cx, |project, _| assert!(project.is_read_only())); -// guest_cx.update(|_| drop(guest)); -// } -// host_cx.update(|_| drop(host)); - -// return; -// } - -// let distribution = rng.lock().gen_range(0..100); -// match distribution { -// 0..=19 if !available_guests.is_empty() => { -// let guest_ix = rng.lock().gen_range(0..available_guests.len()); -// let guest_username = available_guests.remove(guest_ix); -// log::info!("Adding new connection for {}", guest_username); -// next_entity_id += 100000; -// let mut guest_cx = TestAppContext::new( -// cx.foreground_platform(), -// cx.platform(), -// deterministic.build_foreground(next_entity_id), -// deterministic.build_background(), -// cx.font_cache(), -// cx.leak_detector(), -// next_entity_id, -// ); -// let guest = server.create_client(&mut guest_cx, &guest_username).await; -// let guest_project = Project::remote( -// host_project_id, -// guest.client.clone(), -// guest.user_store.clone(), -// guest_lang_registry.clone(), -// FakeFs::new(cx.background()), -// &mut guest_cx.to_async(), -// ) -// .await -// .unwrap(); -// let op_start_signal = futures::channel::mpsc::unbounded(); -// user_ids.push(guest.current_user_id(&guest_cx)); -// op_start_signals.push(op_start_signal.0); -// clients.push(guest_cx.foreground().spawn(guest.simulate_guest( -// guest_username.clone(), -// guest_project, -// op_start_signal.1, -// rng.clone(), -// guest_cx, -// ))); - -// log::info!("Added connection for {}", guest_username); -// operations += 1; -// } -// 20..=29 if clients.len() > 1 => { -// log::info!("Removing guest"); -// let guest_ix = rng.lock().gen_range(1..clients.len()); -// let removed_guest_id = user_ids.remove(guest_ix); -// let guest = clients.remove(guest_ix); -// op_start_signals.remove(guest_ix); -// server.disconnect_client(removed_guest_id); -// cx.foreground().advance_clock(RECEIVE_TIMEOUT); -// let (guest, mut guest_cx, guest_err) = guest.await; -// if let Some(guest_err) = guest_err { -// log::error!("{} error - {}", guest.username, guest_err); -// } -// guest -// .project -// .as_ref() -// .unwrap() -// .read_with(&guest_cx, |project, _| assert!(project.is_read_only())); -// for user_id in &user_ids { -// for contact in server.store.read().await.contacts_for_user(*user_id) { -// assert_ne!( -// contact.user_id, removed_guest_id.0 as u64, -// "removed guest is still a contact of another peer" -// ); -// for project in contact.projects { -// for project_guest_id in project.guests { -// assert_ne!( -// project_guest_id, removed_guest_id.0 as u64, -// "removed guest appears as still participating on a project" -// ); -// } -// } -// } -// } - -// log::info!("{} removed", guest.username); -// available_guests.push(guest.username.clone()); -// guest_cx.update(|_| drop(guest)); - -// operations += 1; -// } -// _ => { -// while operations < max_operations && rng.lock().gen_bool(0.7) { -// op_start_signals -// .choose(&mut *rng.lock()) -// .unwrap() -// .unbounded_send(()) -// .unwrap(); -// operations += 1; -// } - -// if rng.lock().gen_bool(0.8) { -// cx.foreground().run_until_parked(); -// } -// } -// } -// } - -// drop(op_start_signals); -// let mut clients = futures::future::join_all(clients).await; -// cx.foreground().run_until_parked(); - -// let (host_client, mut host_cx, host_err) = clients.remove(0); -// if let Some(host_err) = host_err { -// panic!("host error - {}", host_err); -// } -// let host_project = host_client.project.as_ref().unwrap(); -// let host_worktree_snapshots = host_project.read_with(&host_cx, |project, cx| { -// project -// .worktrees(cx) -// .map(|worktree| { -// let snapshot = worktree.read(cx).snapshot(); -// (snapshot.id(), snapshot) -// }) -// .collect::>() -// }); - -// host_client -// .project -// .as_ref() -// .unwrap() -// .read_with(&host_cx, |project, cx| project.check_invariants(cx)); - -// for (guest_client, mut guest_cx, guest_err) in clients.into_iter() { -// if let Some(guest_err) = guest_err { -// panic!("{} error - {}", guest_client.username, guest_err); -// } -// let worktree_snapshots = -// guest_client -// .project -// .as_ref() -// .unwrap() -// .read_with(&guest_cx, |project, cx| { -// project -// .worktrees(cx) -// .map(|worktree| { -// let worktree = worktree.read(cx); -// (worktree.id(), worktree.snapshot()) -// }) -// .collect::>() -// }); - -// assert_eq!( -// worktree_snapshots.keys().collect::>(), -// host_worktree_snapshots.keys().collect::>(), -// "{} has different worktrees than the host", -// guest_client.username -// ); -// for (id, host_snapshot) in &host_worktree_snapshots { -// let guest_snapshot = &worktree_snapshots[id]; -// assert_eq!( -// guest_snapshot.root_name(), -// host_snapshot.root_name(), -// "{} has different root name than the host for worktree {}", -// guest_client.username, -// id -// ); -// assert_eq!( -// guest_snapshot.entries(false).collect::>(), -// host_snapshot.entries(false).collect::>(), -// "{} has different snapshot than the host for worktree {}", -// guest_client.username, -// id -// ); -// } - -// guest_client -// .project -// .as_ref() -// .unwrap() -// .read_with(&guest_cx, |project, cx| project.check_invariants(cx)); - -// for guest_buffer in &guest_client.buffers { -// let buffer_id = guest_buffer.read_with(&guest_cx, |buffer, _| buffer.remote_id()); -// let host_buffer = host_project.read_with(&host_cx, |project, cx| { -// project.buffer_for_id(buffer_id, cx).expect(&format!( -// "host does not have buffer for guest:{}, peer:{}, id:{}", -// guest_client.username, guest_client.peer_id, buffer_id -// )) -// }); -// let path = host_buffer -// .read_with(&host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); - -// assert_eq!( -// guest_buffer.read_with(&guest_cx, |buffer, _| buffer.deferred_ops_len()), -// 0, -// "{}, buffer {}, path {:?} has deferred operations", -// guest_client.username, -// buffer_id, -// path, -// ); -// assert_eq!( -// guest_buffer.read_with(&guest_cx, |buffer, _| buffer.text()), -// host_buffer.read_with(&host_cx, |buffer, _| buffer.text()), -// "{}, buffer {}, path {:?}, differs from the host's buffer", -// guest_client.username, -// buffer_id, -// path -// ); -// } - -// guest_cx.update(|_| drop(guest_client)); -// } - -// host_cx.update(|_| drop(host_client)); -// } - -// struct TestServer { -// peer: Arc, -// app_state: Arc, -// server: Arc, -// foreground: Rc, -// notifications: mpsc::UnboundedReceiver<()>, -// connection_killers: Arc>>>, -// forbid_connections: Arc, -// _test_db: TestDb, -// } - -// impl TestServer { -// async fn start( -// foreground: Rc, -// background: Arc, -// ) -> Self { -// let test_db = TestDb::fake(background); -// let app_state = Self::build_app_state(&test_db).await; -// let peer = Peer::new(); -// let notifications = mpsc::unbounded(); -// let server = Server::new(app_state.clone(), peer.clone(), Some(notifications.0)); -// Self { -// peer, -// app_state, -// server, -// foreground, -// notifications: notifications.1, -// connection_killers: Default::default(), -// forbid_connections: Default::default(), -// _test_db: test_db, -// } -// } - -// async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient { -// cx.update(|cx| { -// let settings = Settings::test(cx); -// cx.set_global(settings); -// }); - -// let http = FakeHttpClient::with_404_response(); -// let user_id = self.app_state.db.create_user(name, false).await.unwrap(); -// let client_name = name.to_string(); -// let mut client = Client::new(http.clone()); -// let server = self.server.clone(); -// let connection_killers = self.connection_killers.clone(); -// let forbid_connections = self.forbid_connections.clone(); -// let (connection_id_tx, mut connection_id_rx) = mpsc::channel(16); - -// Arc::get_mut(&mut client) -// .unwrap() -// .override_authenticate(move |cx| { -// cx.spawn(|_| async move { -// let access_token = "the-token".to_string(); -// Ok(Credentials { -// user_id: user_id.0 as u64, -// access_token, -// }) -// }) -// }) -// .override_establish_connection(move |credentials, cx| { -// assert_eq!(credentials.user_id, user_id.0 as u64); -// assert_eq!(credentials.access_token, "the-token"); - -// let server = server.clone(); -// let connection_killers = connection_killers.clone(); -// let forbid_connections = forbid_connections.clone(); -// let client_name = client_name.clone(); -// let connection_id_tx = connection_id_tx.clone(); -// cx.spawn(move |cx| async move { -// if forbid_connections.load(SeqCst) { -// Err(EstablishConnectionError::other(anyhow!( -// "server is forbidding connections" -// ))) -// } else { -// let (client_conn, server_conn, killed) = -// Connection::in_memory(cx.background()); -// connection_killers.lock().insert(user_id, killed); -// cx.background() -// .spawn(server.handle_connection( -// server_conn, -// client_name, -// user_id, -// Some(connection_id_tx), -// cx.background(), -// )) -// .detach(); -// Ok(client_conn) -// } -// }) -// }); - -// client -// .authenticate_and_connect(false, &cx.to_async()) -// .await -// .unwrap(); - -// Channel::init(&client); -// Project::init(&client); -// cx.update(|cx| { -// workspace::init(&client, cx); -// }); - -// let peer_id = PeerId(connection_id_rx.next().await.unwrap().0); -// let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx)); - -// let client = TestClient { -// client, -// peer_id, -// username: name.to_string(), -// user_store, -// language_registry: Arc::new(LanguageRegistry::test()), -// project: Default::default(), -// buffers: Default::default(), -// }; -// client.wait_for_current_user(cx).await; -// client -// } - -// fn disconnect_client(&self, user_id: UserId) { -// self.connection_killers -// .lock() -// .remove(&user_id) -// .unwrap() -// .store(true, SeqCst); -// } - -// fn forbid_connections(&self) { -// self.forbid_connections.store(true, SeqCst); -// } - -// fn allow_connections(&self) { -// self.forbid_connections.store(false, SeqCst); -// } - -// async fn build_app_state(test_db: &TestDb) -> Arc { -// let mut config = Config::default(); -// config.database_url = test_db.url.clone(); -// Arc::new(AppState { -// db: test_db.db().clone(), -// config, -// }) -// } - -// async fn state<'a>(&'a self) -> RwLockReadGuard<'a, Store> { -// self.server.store.read().await -// } - -// async fn condition(&mut self, mut predicate: F) -// where -// F: FnMut(&Store) -> bool, -// { -// async_std::future::timeout(Duration::from_millis(500), async { -// while !(predicate)(&*self.server.store.read().await) { -// self.foreground.start_waiting(); -// self.notifications.next().await; -// self.foreground.finish_waiting(); -// } -// }) -// .await -// .expect("condition timed out"); -// } -// } - -// impl Deref for TestServer { -// type Target = Server; - -// fn deref(&self) -> &Self::Target { -// &self.server -// } -// } - -// impl Drop for TestServer { -// fn drop(&mut self) { -// self.peer.reset(); -// } -// } - -// struct TestClient { -// client: Arc, -// username: String, -// pub peer_id: PeerId, -// pub user_store: ModelHandle, -// language_registry: Arc, -// project: Option>, -// buffers: HashSet>, -// } - -// impl Deref for TestClient { -// type Target = Arc; - -// fn deref(&self) -> &Self::Target { -// &self.client -// } -// } - -// impl TestClient { -// pub fn current_user_id(&self, cx: &TestAppContext) -> UserId { -// UserId::from_proto( -// self.user_store -// .read_with(cx, |user_store, _| user_store.current_user().unwrap().id), -// ) -// } - -// async fn wait_for_current_user(&self, cx: &TestAppContext) { -// let mut authed_user = self -// .user_store -// .read_with(cx, |user_store, _| user_store.watch_current_user()); -// while authed_user.next().await.unwrap().is_none() {} -// } - -// async fn build_local_project( -// &mut self, -// fs: Arc, -// root_path: impl AsRef, -// cx: &mut TestAppContext, -// ) -> (ModelHandle, WorktreeId) { -// let project = cx.update(|cx| { -// Project::local( -// self.client.clone(), -// self.user_store.clone(), -// self.language_registry.clone(), -// fs, -// cx, -// ) -// }); -// self.project = Some(project.clone()); -// let (worktree, _) = project -// .update(cx, |p, cx| { -// p.find_or_create_local_worktree(root_path, true, cx) -// }) -// .await -// .unwrap(); -// worktree -// .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete()) -// .await; -// project -// .update(cx, |project, _| project.next_remote_id()) -// .await; -// (project, worktree.read_with(cx, |tree, _| tree.id())) -// } - -// async fn build_remote_project( -// &mut self, -// project_id: u64, -// cx: &mut TestAppContext, -// ) -> ModelHandle { -// let project = Project::remote( -// project_id, -// self.client.clone(), -// self.user_store.clone(), -// self.language_registry.clone(), -// FakeFs::new(cx.background()), -// &mut cx.to_async(), -// ) -// .await -// .unwrap(); -// self.project = Some(project.clone()); -// project -// } - -// fn build_workspace( -// &self, -// project: &ModelHandle, -// cx: &mut TestAppContext, -// ) -> ViewHandle { -// let (window_id, _) = cx.add_window(|_| EmptyView); -// cx.add_view(window_id, |cx| { -// let fs = project.read(cx).fs().clone(); -// Workspace::new( -// &WorkspaceParams { -// fs, -// project: project.clone(), -// user_store: self.user_store.clone(), -// languages: self.language_registry.clone(), -// themes: ThemeRegistry::new((), cx.font_cache().clone()), -// channel_list: cx.add_model(|cx| { -// ChannelList::new(self.user_store.clone(), self.client.clone(), cx) -// }), -// client: self.client.clone(), -// }, -// cx, -// ) -// }) -// } - -// async fn simulate_host( -// mut self, -// project: ModelHandle, -// files: Arc>>, -// op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, -// rng: Arc>, -// mut cx: TestAppContext, -// ) -> (Self, TestAppContext, Option) { -// async fn simulate_host_internal( -// client: &mut TestClient, -// project: ModelHandle, -// files: Arc>>, -// mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, -// rng: Arc>, -// cx: &mut TestAppContext, -// ) -> anyhow::Result<()> { -// let fs = project.read_with(cx, |project, _| project.fs().clone()); - -// while op_start_signal.next().await.is_some() { -// let distribution = rng.lock().gen_range::(0..100); -// match distribution { -// 0..=20 if !files.lock().is_empty() => { -// let path = files.lock().choose(&mut *rng.lock()).unwrap().clone(); -// let mut path = path.as_path(); -// while let Some(parent_path) = path.parent() { -// path = parent_path; -// if rng.lock().gen() { -// break; -// } -// } - -// log::info!("Host: find/create local worktree {:?}", path); -// let find_or_create_worktree = project.update(cx, |project, cx| { -// project.find_or_create_local_worktree(path, true, cx) -// }); -// if rng.lock().gen() { -// cx.background().spawn(find_or_create_worktree).detach(); -// } else { -// find_or_create_worktree.await?; -// } -// } -// 10..=80 if !files.lock().is_empty() => { -// let buffer = if client.buffers.is_empty() || rng.lock().gen() { -// let file = files.lock().choose(&mut *rng.lock()).unwrap().clone(); -// let (worktree, path) = project -// .update(cx, |project, cx| { -// project.find_or_create_local_worktree( -// file.clone(), -// true, -// cx, -// ) -// }) -// .await?; -// let project_path = -// worktree.read_with(cx, |worktree, _| (worktree.id(), path)); -// log::info!( -// "Host: opening path {:?}, worktree {}, relative_path {:?}", -// file, -// project_path.0, -// project_path.1 -// ); -// let buffer = project -// .update(cx, |project, cx| project.open_buffer(project_path, cx)) -// .await -// .unwrap(); -// client.buffers.insert(buffer.clone()); -// buffer -// } else { -// client -// .buffers -// .iter() -// .choose(&mut *rng.lock()) -// .unwrap() -// .clone() -// }; - -// if rng.lock().gen_bool(0.1) { -// cx.update(|cx| { -// log::info!( -// "Host: dropping buffer {:?}", -// buffer.read(cx).file().unwrap().full_path(cx) -// ); -// client.buffers.remove(&buffer); -// drop(buffer); -// }); -// } else { -// buffer.update(cx, |buffer, cx| { -// log::info!( -// "Host: updating buffer {:?} ({})", -// buffer.file().unwrap().full_path(cx), -// buffer.remote_id() -// ); - -// if rng.lock().gen_bool(0.7) { -// buffer.randomly_edit(&mut *rng.lock(), 5, cx); -// } else { -// buffer.randomly_undo_redo(&mut *rng.lock(), cx); -// } -// }); -// } -// } -// _ => loop { -// let path_component_count = rng.lock().gen_range::(1..=5); -// let mut path = PathBuf::new(); -// path.push("/"); -// for _ in 0..path_component_count { -// let letter = rng.lock().gen_range(b'a'..=b'z'); -// path.push(std::str::from_utf8(&[letter]).unwrap()); -// } -// path.set_extension("rs"); -// let parent_path = path.parent().unwrap(); - -// log::info!("Host: creating file {:?}", path,); - -// if fs.create_dir(&parent_path).await.is_ok() -// && fs.create_file(&path, Default::default()).await.is_ok() -// { -// files.lock().push(path); -// break; -// } else { -// log::info!("Host: cannot create file"); -// } -// }, -// } - -// cx.background().simulate_random_delay().await; -// } - -// Ok(()) -// } - -// let result = simulate_host_internal( -// &mut self, -// project.clone(), -// files, -// op_start_signal, -// rng, -// &mut cx, -// ) -// .await; -// log::info!("Host done"); -// self.project = Some(project); -// (self, cx, result.err()) -// } - -// pub async fn simulate_guest( -// mut self, -// guest_username: String, -// project: ModelHandle, -// op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, -// rng: Arc>, -// mut cx: TestAppContext, -// ) -> (Self, TestAppContext, Option) { -// async fn simulate_guest_internal( -// client: &mut TestClient, -// guest_username: &str, -// project: ModelHandle, -// mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, -// rng: Arc>, -// cx: &mut TestAppContext, -// ) -> anyhow::Result<()> { -// while op_start_signal.next().await.is_some() { -// let buffer = if client.buffers.is_empty() || rng.lock().gen() { -// let worktree = if let Some(worktree) = -// project.read_with(cx, |project, cx| { -// project -// .worktrees(&cx) -// .filter(|worktree| { -// let worktree = worktree.read(cx); -// worktree.is_visible() -// && worktree.entries(false).any(|e| e.is_file()) -// }) -// .choose(&mut *rng.lock()) -// }) { -// worktree -// } else { -// cx.background().simulate_random_delay().await; -// continue; -// }; - -// let (worktree_root_name, project_path) = -// worktree.read_with(cx, |worktree, _| { -// let entry = worktree -// .entries(false) -// .filter(|e| e.is_file()) -// .choose(&mut *rng.lock()) -// .unwrap(); -// ( -// worktree.root_name().to_string(), -// (worktree.id(), entry.path.clone()), -// ) -// }); -// log::info!( -// "{}: opening path {:?} in worktree {} ({})", -// guest_username, -// project_path.1, -// project_path.0, -// worktree_root_name, -// ); -// let buffer = project -// .update(cx, |project, cx| { -// project.open_buffer(project_path.clone(), cx) -// }) -// .await?; -// log::info!( -// "{}: opened path {:?} in worktree {} ({}) with buffer id {}", -// guest_username, -// project_path.1, -// project_path.0, -// worktree_root_name, -// buffer.read_with(cx, |buffer, _| buffer.remote_id()) -// ); -// client.buffers.insert(buffer.clone()); -// buffer -// } else { -// client -// .buffers -// .iter() -// .choose(&mut *rng.lock()) -// .unwrap() -// .clone() -// }; - -// let choice = rng.lock().gen_range(0..100); -// match choice { -// 0..=9 => { -// cx.update(|cx| { -// log::info!( -// "{}: dropping buffer {:?}", -// guest_username, -// buffer.read(cx).file().unwrap().full_path(cx) -// ); -// client.buffers.remove(&buffer); -// drop(buffer); -// }); -// } -// 10..=19 => { -// let completions = project.update(cx, |project, cx| { -// log::info!( -// "{}: requesting completions for buffer {} ({:?})", -// guest_username, -// buffer.read(cx).remote_id(), -// buffer.read(cx).file().unwrap().full_path(cx) -// ); -// let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); -// project.completions(&buffer, offset, cx) -// }); -// let completions = cx.background().spawn(async move { -// completions -// .await -// .map_err(|err| anyhow!("completions request failed: {:?}", err)) -// }); -// if rng.lock().gen_bool(0.3) { -// log::info!("{}: detaching completions request", guest_username); -// cx.update(|cx| completions.detach_and_log_err(cx)); -// } else { -// completions.await?; -// } -// } -// 20..=29 => { -// let code_actions = project.update(cx, |project, cx| { -// log::info!( -// "{}: requesting code actions for buffer {} ({:?})", -// guest_username, -// buffer.read(cx).remote_id(), -// buffer.read(cx).file().unwrap().full_path(cx) -// ); -// let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock()); -// project.code_actions(&buffer, range, cx) -// }); -// let code_actions = cx.background().spawn(async move { -// code_actions.await.map_err(|err| { -// anyhow!("code actions request failed: {:?}", err) -// }) -// }); -// if rng.lock().gen_bool(0.3) { -// log::info!("{}: detaching code actions request", guest_username); -// cx.update(|cx| code_actions.detach_and_log_err(cx)); -// } else { -// code_actions.await?; -// } -// } -// 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => { -// let (requested_version, save) = buffer.update(cx, |buffer, cx| { -// log::info!( -// "{}: saving buffer {} ({:?})", -// guest_username, -// buffer.remote_id(), -// buffer.file().unwrap().full_path(cx) -// ); -// (buffer.version(), buffer.save(cx)) -// }); -// let save = cx.background().spawn(async move { -// let (saved_version, _) = save -// .await -// .map_err(|err| anyhow!("save request failed: {:?}", err))?; -// assert!(saved_version.observed_all(&requested_version)); -// Ok::<_, anyhow::Error>(()) -// }); -// if rng.lock().gen_bool(0.3) { -// log::info!("{}: detaching save request", guest_username); -// cx.update(|cx| save.detach_and_log_err(cx)); -// } else { -// save.await?; -// } -// } -// 40..=44 => { -// let prepare_rename = project.update(cx, |project, cx| { -// log::info!( -// "{}: preparing rename for buffer {} ({:?})", -// guest_username, -// buffer.read(cx).remote_id(), -// buffer.read(cx).file().unwrap().full_path(cx) -// ); -// let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); -// project.prepare_rename(buffer, offset, cx) -// }); -// let prepare_rename = cx.background().spawn(async move { -// prepare_rename.await.map_err(|err| { -// anyhow!("prepare rename request failed: {:?}", err) -// }) -// }); -// if rng.lock().gen_bool(0.3) { -// log::info!("{}: detaching prepare rename request", guest_username); -// cx.update(|cx| prepare_rename.detach_and_log_err(cx)); -// } else { -// prepare_rename.await?; -// } -// } -// 45..=49 => { -// let definitions = project.update(cx, |project, cx| { -// log::info!( -// "{}: requesting definitions for buffer {} ({:?})", -// guest_username, -// buffer.read(cx).remote_id(), -// buffer.read(cx).file().unwrap().full_path(cx) -// ); -// let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); -// project.definition(&buffer, offset, cx) -// }); -// let definitions = cx.background().spawn(async move { -// definitions -// .await -// .map_err(|err| anyhow!("definitions request failed: {:?}", err)) -// }); -// if rng.lock().gen_bool(0.3) { -// log::info!("{}: detaching definitions request", guest_username); -// cx.update(|cx| definitions.detach_and_log_err(cx)); -// } else { -// client -// .buffers -// .extend(definitions.await?.into_iter().map(|loc| loc.buffer)); -// } -// } -// 50..=54 => { -// let highlights = project.update(cx, |project, cx| { -// log::info!( -// "{}: requesting highlights for buffer {} ({:?})", -// guest_username, -// buffer.read(cx).remote_id(), -// buffer.read(cx).file().unwrap().full_path(cx) -// ); -// let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); -// project.document_highlights(&buffer, offset, cx) -// }); -// let highlights = cx.background().spawn(async move { -// highlights -// .await -// .map_err(|err| anyhow!("highlights request failed: {:?}", err)) -// }); -// if rng.lock().gen_bool(0.3) { -// log::info!("{}: detaching highlights request", guest_username); -// cx.update(|cx| highlights.detach_and_log_err(cx)); -// } else { -// highlights.await?; -// } -// } -// 55..=59 => { -// let search = project.update(cx, |project, cx| { -// let query = rng.lock().gen_range('a'..='z'); -// log::info!("{}: project-wide search {:?}", guest_username, query); -// project.search(SearchQuery::text(query, false, false), cx) -// }); -// let search = cx.background().spawn(async move { -// search -// .await -// .map_err(|err| anyhow!("search request failed: {:?}", err)) -// }); -// if rng.lock().gen_bool(0.3) { -// log::info!("{}: detaching search request", guest_username); -// cx.update(|cx| search.detach_and_log_err(cx)); -// } else { -// client.buffers.extend(search.await?.into_keys()); -// } -// } -// _ => { -// buffer.update(cx, |buffer, cx| { -// log::info!( -// "{}: updating buffer {} ({:?})", -// guest_username, -// buffer.remote_id(), -// buffer.file().unwrap().full_path(cx) -// ); -// if rng.lock().gen_bool(0.7) { -// buffer.randomly_edit(&mut *rng.lock(), 5, cx); -// } else { -// buffer.randomly_undo_redo(&mut *rng.lock(), cx); -// } -// }); -// } -// } -// cx.background().simulate_random_delay().await; -// } -// Ok(()) -// } - -// let result = simulate_guest_internal( -// &mut self, -// &guest_username, -// project.clone(), -// op_start_signal, -// rng, -// &mut cx, -// ) -// .await; -// log::info!("{}: done", guest_username); - -// self.project = Some(project); -// (self, cx, result.err()) -// } -// } - -// impl Drop for TestClient { -// fn drop(&mut self) { -// self.client.tear_down(); -// } -// } - -// impl Executor for Arc { -// type Timer = gpui::executor::Timer; - -// fn spawn_detached>(&self, future: F) { -// self.spawn(future).detach(); -// } - -// fn timer(&self, duration: Duration) -> Self::Timer { -// self.as_ref().timer(duration) -// } -// } - -// fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> { -// channel -// .messages() -// .cursor::<()>() -// .map(|m| { -// ( -// m.sender.github_login.clone(), -// m.body.clone(), -// m.is_pending(), -// ) -// }) -// .collect() -// } - -// struct EmptyView; - -// impl gpui::Entity for EmptyView { -// type Event = (); -// } - -// impl gpui::View for EmptyView { -// fn ui_name() -> &'static str { -// "empty view" -// } - -// fn render(&mut self, _: &mut gpui::RenderContext) -> gpui::ElementBox { -// gpui::Element::boxed(gpui::elements::Empty) -// } -// } -// } - -use axum::{body::Body, Router}; -use client::Peer; +pub async fn handle_websocket_request( + TypedHeader(ProtocolVersion(protocol_version)): TypedHeader, + ConnectInfo(socket_address): ConnectInfo, + Extension(server): Extension>, + Extension(user_id): Extension, + ws: WebSocketUpgrade, +) -> Response { + if protocol_version != rpc::PROTOCOL_VERSION { + return ( + StatusCode::UPGRADE_REQUIRED, + "client must be upgraded".to_string(), + ) + .into_response(); + } + let socket_address = socket_address.to_string(); + ws.on_upgrade(move |socket| { + let socket = socket + .map_ok(to_tungstenite_message) + .err_into() + .with(|message| async move { Ok(to_axum_message(message)) }); + let connection = Connection::new(Box::pin(socket)); + server.handle_connection(connection, socket_address, user_id, None, RealExecutor) + }) +} + +fn to_axum_message(message: TungsteniteMessage) -> AxumMessage { + match message { + TungsteniteMessage::Text(payload) => AxumMessage::Text(payload), + TungsteniteMessage::Binary(payload) => AxumMessage::Binary(payload), + TungsteniteMessage::Ping(payload) => AxumMessage::Ping(payload), + TungsteniteMessage::Pong(payload) => AxumMessage::Pong(payload), + TungsteniteMessage::Close(frame) => AxumMessage::Close(frame.map(|frame| AxumCloseFrame { + code: frame.code.into(), + reason: frame.reason, + })), + } +} + +fn to_tungstenite_message(message: AxumMessage) -> TungsteniteMessage { + match message { + AxumMessage::Text(payload) => TungsteniteMessage::Text(payload), + AxumMessage::Binary(payload) => TungsteniteMessage::Binary(payload), + AxumMessage::Ping(payload) => TungsteniteMessage::Ping(payload), + AxumMessage::Pong(payload) => TungsteniteMessage::Pong(payload), + AxumMessage::Close(frame) => { + TungsteniteMessage::Close(frame.map(|frame| TungsteniteCloseFrame { + code: frame.code.into(), + reason: frame.reason, + })) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + db::{tests::TestDb, UserId}, + AppState, + }; + use ::rpc::Peer; + use client::{ + self, test::FakeHttpClient, Channel, ChannelDetails, ChannelList, Client, Credentials, + EstablishConnectionError, UserStore, RECEIVE_TIMEOUT, + }; + use collections::BTreeMap; + use editor::{ + self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Input, Redo, Rename, + ToOffset, ToggleCodeActions, Undo, + }; + use gpui::{ + executor::{self, Deterministic}, + geometry::vector::vec2f, + ModelHandle, TestAppContext, ViewHandle, + }; + use language::{ + range_to_lsp, tree_sitter_rust, Diagnostic, DiagnosticEntry, FakeLspAdapter, Language, + LanguageConfig, LanguageRegistry, OffsetRangeExt, Point, Rope, + }; + use lsp::{self, FakeLanguageServer}; + use parking_lot::Mutex; + use project::{ + fs::{FakeFs, Fs as _}, + search::SearchQuery, + worktree::WorktreeHandle, + DiagnosticSummary, Project, ProjectPath, WorktreeId, + }; + use rand::prelude::*; + use rpc::PeerId; + use serde_json::json; + use settings::Settings; + use sqlx::types::time::OffsetDateTime; + use std::{ + env, + ops::Deref, + path::{Path, PathBuf}, + rc::Rc, + sync::{ + atomic::{AtomicBool, Ordering::SeqCst}, + Arc, + }, + time::Duration, + }; + use theme::ThemeRegistry; + use workspace::{Item, SplitDirection, ToggleFollow, Workspace, WorkspaceParams}; + + #[cfg(test)] + #[ctor::ctor] + fn init_logger() { + if std::env::var("RUST_LOG").is_ok() { + env_logger::init(); + } + } + + #[gpui::test(iterations = 10)] + async fn test_share_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + let (window_b, _) = cx_b.add_window(|_| EmptyView); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + cx_a.foreground().forbid_parking(); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + "b.txt": "b-contents", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join that project as client B + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + let replica_id_b = project_b.read_with(cx_b, |project, _| { + assert_eq!( + project + .collaborators() + .get(&client_a.peer_id) + .unwrap() + .user + .github_login, + "user_a" + ); + project.replica_id() + }); + project_a + .condition(&cx_a, |tree, _| { + tree.collaborators() + .get(&client_b.peer_id) + .map_or(false, |collaborator| { + collaborator.replica_id == replica_id_b + && collaborator.user.github_login == "user_b" + }) + }) + .await; + + // Open the same file as client B and client A. + let buffer_b = project_b + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.txt"), cx)) + .await + .unwrap(); + buffer_b.read_with(cx_b, |buf, _| assert_eq!(buf.text(), "b-contents")); + project_a.read_with(cx_a, |project, cx| { + assert!(project.has_open_buffer((worktree_id, "b.txt"), cx)) + }); + let buffer_a = project_a + .update(cx_a, |p, cx| p.open_buffer((worktree_id, "b.txt"), cx)) + .await + .unwrap(); + + let editor_b = cx_b.add_view(window_b, |cx| Editor::for_buffer(buffer_b, None, cx)); + + // TODO + // // Create a selection set as client B and see that selection set as client A. + // buffer_a + // .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 1) + // .await; + + // Edit the buffer as client B and see that edit as client A. + editor_b.update(cx_b, |editor, cx| { + editor.handle_input(&Input("ok, ".into()), cx) + }); + buffer_a + .condition(&cx_a, |buffer, _| buffer.text() == "ok, b-contents") + .await; + + // TODO + // // Remove the selection set as client B, see those selections disappear as client A. + cx_b.update(move |_| drop(editor_b)); + // buffer_a + // .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 0) + // .await; + + // Dropping the client B's project removes client B from client A's collaborators. + cx_b.update(move |_| drop(project_b)); + project_a + .condition(&cx_a, |project, _| project.collaborators().is_empty()) + .await; + } + + #[gpui::test(iterations = 10)] + async fn test_unshare_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + cx_a.foreground().forbid_parking(); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + "b.txt": "b-contents", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + + // Join that project as client B + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + project_b + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) + .await + .unwrap(); + + // Unshare the project as client A + project_a.update(cx_a, |project, cx| project.unshare(cx)); + project_b + .condition(cx_b, |project, _| project.is_read_only()) + .await; + assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared())); + cx_b.update(|_| { + drop(project_b); + }); + + // Share the project again and ensure guests can still join. + project_a + .update(cx_a, |project, cx| project.share(cx)) + .await + .unwrap(); + assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + + let project_b2 = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + project_b2 + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) + .await + .unwrap(); + } + + #[gpui::test(iterations = 10)] + async fn test_host_disconnect(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + cx_a.foreground().forbid_parking(); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + "b.txt": "b-contents", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + + // Join that project as client B + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + project_b + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) + .await + .unwrap(); + + // Drop client A's connection. Collaborators should disappear and the project should not be shown as shared. + server.disconnect_client(client_a.current_user_id(cx_a)); + cx_a.foreground().advance_clock(rpc::RECEIVE_TIMEOUT); + project_a + .condition(cx_a, |project, _| project.collaborators().is_empty()) + .await; + project_a.read_with(cx_a, |project, _| assert!(!project.is_shared())); + project_b + .condition(cx_b, |project, _| project.is_read_only()) + .await; + assert!(worktree_a.read_with(cx_a, |tree, _| !tree.as_local().unwrap().is_shared())); + cx_b.update(|_| { + drop(project_b); + }); + + // Await reconnection + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + + // Share the project again and ensure guests can still join. + project_a + .update(cx_a, |project, cx| project.share(cx)) + .await + .unwrap(); + assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); + + let project_b2 = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + project_b2 + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) + .await + .unwrap(); + } + + #[gpui::test(iterations = 10)] + async fn test_propagate_saves_and_fs_changes( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, + cx_c: &mut TestAppContext, + ) { + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + cx_a.foreground().forbid_parking(); + + // Connect to a server as 3 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + let client_c = server.create_client(cx_c, "user_c").await; + + // Share a worktree as client A. + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, + "file1": "", + "file2": "" + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join that worktree as clients B and C. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + let project_c = Project::remote( + project_id, + client_c.clone(), + client_c.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_c.to_async(), + ) + .await + .unwrap(); + let worktree_b = project_b.read_with(cx_b, |p, cx| p.worktrees(cx).next().unwrap()); + let worktree_c = project_c.read_with(cx_c, |p, cx| p.worktrees(cx).next().unwrap()); + + // Open and edit a buffer as both guests B and C. + let buffer_b = project_b + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) + .await + .unwrap(); + let buffer_c = project_c + .update(cx_c, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) + .await + .unwrap(); + buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "i-am-b, ", cx)); + buffer_c.update(cx_c, |buf, cx| buf.edit([0..0], "i-am-c, ", cx)); + + // Open and edit that buffer as the host. + let buffer_a = project_a + .update(cx_a, |p, cx| p.open_buffer((worktree_id, "file1"), cx)) + .await + .unwrap(); + + buffer_a + .condition(cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, ") + .await; + buffer_a.update(cx_a, |buf, cx| { + buf.edit([buf.len()..buf.len()], "i-am-a", cx) + }); + + // Wait for edits to propagate + buffer_a + .condition(cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") + .await; + buffer_b + .condition(cx_b, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") + .await; + buffer_c + .condition(cx_c, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a") + .await; + + // Edit the buffer as the host and concurrently save as guest B. + let save_b = buffer_b.update(cx_b, |buf, cx| buf.save(cx)); + buffer_a.update(cx_a, |buf, cx| buf.edit([0..0], "hi-a, ", cx)); + save_b.await.unwrap(); + assert_eq!( + fs.load("/a/file1".as_ref()).await.unwrap(), + "hi-a, i-am-c, i-am-b, i-am-a" + ); + buffer_a.read_with(cx_a, |buf, _| assert!(!buf.is_dirty())); + buffer_b.read_with(cx_b, |buf, _| assert!(!buf.is_dirty())); + buffer_c.condition(cx_c, |buf, _| !buf.is_dirty()).await; + + worktree_a.flush_fs_events(cx_a).await; + + // Make changes on host's file system, see those changes on guest worktrees. + fs.rename( + "/a/file1".as_ref(), + "/a/file1-renamed".as_ref(), + Default::default(), + ) + .await + .unwrap(); + + fs.rename("/a/file2".as_ref(), "/a/file3".as_ref(), Default::default()) + .await + .unwrap(); + fs.insert_file(Path::new("/a/file4"), "4".into()).await; + + worktree_a + .condition(&cx_a, |tree, _| { + tree.paths() + .map(|p| p.to_string_lossy()) + .collect::>() + == [".zed.toml", "file1-renamed", "file3", "file4"] + }) + .await; + worktree_b + .condition(&cx_b, |tree, _| { + tree.paths() + .map(|p| p.to_string_lossy()) + .collect::>() + == [".zed.toml", "file1-renamed", "file3", "file4"] + }) + .await; + worktree_c + .condition(&cx_c, |tree, _| { + tree.paths() + .map(|p| p.to_string_lossy()) + .collect::>() + == [".zed.toml", "file1-renamed", "file3", "file4"] + }) + .await; + + // Ensure buffer files are updated as well. + buffer_a + .condition(&cx_a, |buf, _| { + buf.file().unwrap().path().to_str() == Some("file1-renamed") + }) + .await; + buffer_b + .condition(&cx_b, |buf, _| { + buf.file().unwrap().path().to_str() == Some("file1-renamed") + }) + .await; + buffer_c + .condition(&cx_c, |buf, _| { + buf.file().unwrap().path().to_str() == Some("file1-renamed") + }) + .await; + } + + #[gpui::test(iterations = 10)] + async fn test_buffer_conflict_after_save(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/dir", + json!({ + ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, + "a.txt": "a-contents", + }), + ) + .await; + + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/dir", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join that project as client B + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Open a buffer as client B + let buffer_b = project_b + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) + .await + .unwrap(); + + buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "world ", cx)); + buffer_b.read_with(cx_b, |buf, _| { + assert!(buf.is_dirty()); + assert!(!buf.has_conflict()); + }); + + buffer_b.update(cx_b, |buf, cx| buf.save(cx)).await.unwrap(); + buffer_b + .condition(&cx_b, |buffer_b, _| !buffer_b.is_dirty()) + .await; + buffer_b.read_with(cx_b, |buf, _| { + assert!(!buf.has_conflict()); + }); + + buffer_b.update(cx_b, |buf, cx| buf.edit([0..0], "hello ", cx)); + buffer_b.read_with(cx_b, |buf, _| { + assert!(buf.is_dirty()); + assert!(!buf.has_conflict()); + }); + } + + #[gpui::test(iterations = 10)] + async fn test_buffer_reloading(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/dir", + json!({ + ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, + "a.txt": "a-contents", + }), + ) + .await; + + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/dir", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join that project as client B + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + let _worktree_b = project_b.update(cx_b, |p, cx| p.worktrees(cx).next().unwrap()); + + // Open a buffer as client B + let buffer_b = project_b + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) + .await + .unwrap(); + buffer_b.read_with(cx_b, |buf, _| { + assert!(!buf.is_dirty()); + assert!(!buf.has_conflict()); + }); + + fs.save(Path::new("/dir/a.txt"), &"new contents".into()) + .await + .unwrap(); + buffer_b + .condition(&cx_b, |buf, _| { + buf.text() == "new contents" && !buf.is_dirty() + }) + .await; + buffer_b.read_with(cx_b, |buf, _| { + assert!(!buf.has_conflict()); + }); + } + + #[gpui::test(iterations = 10)] + async fn test_editing_while_guest_opens_buffer( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/dir", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/dir", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join that project as client B + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Open a buffer as client A + let buffer_a = project_a + .update(cx_a, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx)) + .await + .unwrap(); + + // Start opening the same buffer as client B + let buffer_b = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); + + // Edit the buffer as client A while client B is still opening it. + cx_b.background().simulate_random_delay().await; + buffer_a.update(cx_a, |buf, cx| buf.edit([0..0], "X", cx)); + cx_b.background().simulate_random_delay().await; + buffer_a.update(cx_a, |buf, cx| buf.edit([1..1], "Y", cx)); + + let text = buffer_a.read_with(cx_a, |buf, _| buf.text()); + let buffer_b = buffer_b.await.unwrap(); + buffer_b.condition(&cx_b, |buf, _| buf.text() == text).await; + } + + #[gpui::test(iterations = 10)] + async fn test_leaving_worktree_while_opening_buffer( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/dir", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/dir", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join that project as client B + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // See that a guest has joined as client A. + project_a + .condition(&cx_a, |p, _| p.collaborators().len() == 1) + .await; + + // Begin opening a buffer as client B, but leave the project before the open completes. + let buffer_b = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))); + cx_b.update(|_| drop(project_b)); + drop(buffer_b); + + // See that the guest has left. + project_a + .condition(&cx_a, |p, _| p.collaborators().len() == 0) + .await; + } + + #[gpui::test(iterations = 10)] + async fn test_leaving_project(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.txt": "a-contents", + "b.txt": "b-contents", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a + .update(cx_a, |project, _| project.next_remote_id()) + .await; + project_a + .update(cx_a, |project, cx| project.share(cx)) + .await + .unwrap(); + + // Join that project as client B + let _project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Client A sees that a guest has joined. + project_a + .condition(cx_a, |p, _| p.collaborators().len() == 1) + .await; + + // Drop client B's connection and ensure client A observes client B leaving the project. + client_b.disconnect(&cx_b.to_async()).unwrap(); + project_a + .condition(cx_a, |p, _| p.collaborators().len() == 0) + .await; + + // Rejoin the project as client B + let _project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Client A sees that a guest has re-joined. + project_a + .condition(cx_a, |p, _| p.collaborators().len() == 1) + .await; + + // Simulate connection loss for client B and ensure client A observes client B leaving the project. + client_b.wait_for_current_user(cx_b).await; + server.disconnect_client(client_b.current_user_id(cx_b)); + cx_a.foreground().advance_clock(Duration::from_secs(3)); + project_a + .condition(cx_a, |p, _| p.collaborators().len() == 0) + .await; + } + + #[gpui::test(iterations = 10)] + async fn test_collaborating_with_diagnostics( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.rs": "let one = two", + "other.rs": "", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Cause the language server to start. + let _ = cx_a + .background() + .spawn(project_a.update(cx_a, |project, cx| { + project.open_buffer( + ProjectPath { + worktree_id, + path: Path::new("other.rs").into(), + }, + cx, + ) + })) + .await + .unwrap(); + + // Simulate a language server reporting errors for a file. + let mut fake_language_server = fake_language_servers.next().await.unwrap(); + fake_language_server + .receive_notification::() + .await; + fake_language_server.notify::( + lsp::PublishDiagnosticsParams { + uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), + version: None, + diagnostics: vec![lsp::Diagnostic { + severity: Some(lsp::DiagnosticSeverity::ERROR), + range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 7)), + message: "message 1".to_string(), + ..Default::default() + }], + }, + ); + + // Wait for server to see the diagnostics update. + server + .condition(|store| { + let worktree = store + .project(project_id) + .unwrap() + .share + .as_ref() + .unwrap() + .worktrees + .get(&worktree_id.to_proto()) + .unwrap(); + + !worktree.diagnostic_summaries.is_empty() + }) + .await; + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + project_b.read_with(cx_b, |project, cx| { + assert_eq!( + project.diagnostic_summaries(cx).collect::>(), + &[( + ProjectPath { + worktree_id, + path: Arc::from(Path::new("a.rs")), + }, + DiagnosticSummary { + error_count: 1, + warning_count: 0, + ..Default::default() + }, + )] + ) + }); + + // Simulate a language server reporting more errors for a file. + fake_language_server.notify::( + lsp::PublishDiagnosticsParams { + uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), + version: None, + diagnostics: vec![ + lsp::Diagnostic { + severity: Some(lsp::DiagnosticSeverity::ERROR), + range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 7)), + message: "message 1".to_string(), + ..Default::default() + }, + lsp::Diagnostic { + severity: Some(lsp::DiagnosticSeverity::WARNING), + range: lsp::Range::new( + lsp::Position::new(0, 10), + lsp::Position::new(0, 13), + ), + message: "message 2".to_string(), + ..Default::default() + }, + ], + }, + ); + + // Client b gets the updated summaries + project_b + .condition(&cx_b, |project, cx| { + project.diagnostic_summaries(cx).collect::>() + == &[( + ProjectPath { + worktree_id, + path: Arc::from(Path::new("a.rs")), + }, + DiagnosticSummary { + error_count: 1, + warning_count: 1, + ..Default::default() + }, + )] + }) + .await; + + // Open the file with the errors on client B. They should be present. + let buffer_b = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) + .await + .unwrap(); + + buffer_b.read_with(cx_b, |buffer, _| { + assert_eq!( + buffer + .snapshot() + .diagnostics_in_range::<_, Point>(0..buffer.len(), false) + .map(|entry| entry) + .collect::>(), + &[ + DiagnosticEntry { + range: Point::new(0, 4)..Point::new(0, 7), + diagnostic: Diagnostic { + group_id: 0, + message: "message 1".to_string(), + severity: lsp::DiagnosticSeverity::ERROR, + is_primary: true, + ..Default::default() + } + }, + DiagnosticEntry { + range: Point::new(0, 10)..Point::new(0, 13), + diagnostic: Diagnostic { + group_id: 1, + severity: lsp::DiagnosticSeverity::WARNING, + message: "message 2".to_string(), + is_primary: true, + ..Default::default() + } + } + ] + ); + }); + + // Simulate a language server reporting no errors for a file. + fake_language_server.notify::( + lsp::PublishDiagnosticsParams { + uri: lsp::Url::from_file_path("/a/a.rs").unwrap(), + version: None, + diagnostics: vec![], + }, + ); + project_a + .condition(cx_a, |project, cx| { + project.diagnostic_summaries(cx).collect::>() == &[] + }) + .await; + project_b + .condition(cx_b, |project, cx| { + project.diagnostic_summaries(cx).collect::>() == &[] + }) + .await; + } + + #[gpui::test(iterations = 10)] + async fn test_collaborating_with_completion( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(FakeLspAdapter { + capabilities: lsp::ServerCapabilities { + completion_provider: Some(lsp::CompletionOptions { + trigger_characters: Some(vec![".".to_string()]), + ..Default::default() + }), + ..Default::default() + }, + ..Default::default() + }); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "main.rs": "fn main() { a }", + "other.rs": "", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Open a file in an editor as the guest. + let buffer_b = project_b + .update(cx_b, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx)) + .await + .unwrap(); + let (window_b, _) = cx_b.add_window(|_| EmptyView); + let editor_b = cx_b.add_view(window_b, |cx| { + Editor::for_buffer(buffer_b.clone(), Some(project_b.clone()), cx) + }); + + let fake_language_server = fake_language_servers.next().await.unwrap(); + buffer_b + .condition(&cx_b, |buffer, _| !buffer.completion_triggers().is_empty()) + .await; + + // Type a completion trigger character as the guest. + editor_b.update(cx_b, |editor, cx| { + editor.select_ranges([13..13], None, cx); + editor.handle_input(&Input(".".into()), cx); + cx.focus(&editor_b); + }); + + // Receive a completion request as the host's language server. + // Return some completions from the host's language server. + cx_a.foreground().start_waiting(); + fake_language_server + .handle_request::(|params, _| async move { + assert_eq!( + params.text_document_position.text_document.uri, + lsp::Url::from_file_path("/a/main.rs").unwrap(), + ); + assert_eq!( + params.text_document_position.position, + lsp::Position::new(0, 14), + ); + + Ok(Some(lsp::CompletionResponse::Array(vec![ + lsp::CompletionItem { + label: "first_method(…)".into(), + detail: Some("fn(&mut self, B) -> C".into()), + text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { + new_text: "first_method($1)".to_string(), + range: lsp::Range::new( + lsp::Position::new(0, 14), + lsp::Position::new(0, 14), + ), + })), + insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), + ..Default::default() + }, + lsp::CompletionItem { + label: "second_method(…)".into(), + detail: Some("fn(&mut self, C) -> D".into()), + text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { + new_text: "second_method()".to_string(), + range: lsp::Range::new( + lsp::Position::new(0, 14), + lsp::Position::new(0, 14), + ), + })), + insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), + ..Default::default() + }, + ]))) + }) + .next() + .await + .unwrap(); + cx_a.foreground().finish_waiting(); + + // Open the buffer on the host. + let buffer_a = project_a + .update(cx_a, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx)) + .await + .unwrap(); + buffer_a + .condition(&cx_a, |buffer, _| buffer.text() == "fn main() { a. }") + .await; + + // Confirm a completion on the guest. + editor_b + .condition(&cx_b, |editor, _| editor.context_menu_visible()) + .await; + editor_b.update(cx_b, |editor, cx| { + editor.confirm_completion(&ConfirmCompletion { item_ix: Some(0) }, cx); + assert_eq!(editor.text(cx), "fn main() { a.first_method() }"); + }); + + // Return a resolved completion from the host's language server. + // The resolved completion has an additional text edit. + fake_language_server.handle_request::( + |params, _| async move { + assert_eq!(params.label, "first_method(…)"); + Ok(lsp::CompletionItem { + label: "first_method(…)".into(), + detail: Some("fn(&mut self, B) -> C".into()), + text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { + new_text: "first_method($1)".to_string(), + range: lsp::Range::new( + lsp::Position::new(0, 14), + lsp::Position::new(0, 14), + ), + })), + additional_text_edits: Some(vec![lsp::TextEdit { + new_text: "use d::SomeTrait;\n".to_string(), + range: lsp::Range::new(lsp::Position::new(0, 0), lsp::Position::new(0, 0)), + }]), + insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), + ..Default::default() + }) + }, + ); + + // The additional edit is applied. + buffer_a + .condition(&cx_a, |buffer, _| { + buffer.text() == "use d::SomeTrait;\nfn main() { a.first_method() }" + }) + .await; + buffer_b + .condition(&cx_b, |buffer, _| { + buffer.text() == "use d::SomeTrait;\nfn main() { a.first_method() }" + }) + .await; + } + + #[gpui::test(iterations = 10)] + async fn test_reloading_buffer_manually(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.rs": "let one = 1;", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + let buffer_a = project_a + .update(cx_a, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx)) + .await + .unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + let buffer_b = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) + .await + .unwrap(); + buffer_b.update(cx_b, |buffer, cx| { + buffer.edit([4..7], "six", cx); + buffer.edit([10..11], "6", cx); + assert_eq!(buffer.text(), "let six = 6;"); + assert!(buffer.is_dirty()); + assert!(!buffer.has_conflict()); + }); + buffer_a + .condition(cx_a, |buffer, _| buffer.text() == "let six = 6;") + .await; + + fs.save(Path::new("/a/a.rs"), &Rope::from("let seven = 7;")) + .await + .unwrap(); + buffer_a + .condition(cx_a, |buffer, _| buffer.has_conflict()) + .await; + buffer_b + .condition(cx_b, |buffer, _| buffer.has_conflict()) + .await; + + project_b + .update(cx_b, |project, cx| { + project.reload_buffers(HashSet::from_iter([buffer_b.clone()]), true, cx) + }) + .await + .unwrap(); + buffer_a.read_with(cx_a, |buffer, _| { + assert_eq!(buffer.text(), "let seven = 7;"); + assert!(!buffer.is_dirty()); + assert!(!buffer.has_conflict()); + }); + buffer_b.read_with(cx_b, |buffer, _| { + assert_eq!(buffer.text(), "let seven = 7;"); + assert!(!buffer.is_dirty()); + assert!(!buffer.has_conflict()); + }); + + buffer_a.update(cx_a, |buffer, cx| { + // Undoing on the host is a no-op when the reload was initiated by the guest. + buffer.undo(cx); + assert_eq!(buffer.text(), "let seven = 7;"); + assert!(!buffer.is_dirty()); + assert!(!buffer.has_conflict()); + }); + buffer_b.update(cx_b, |buffer, cx| { + // Undoing on the guest rolls back the buffer to before it was reloaded but the conflict gets cleared. + buffer.undo(cx); + assert_eq!(buffer.text(), "let six = 6;"); + assert!(buffer.is_dirty()); + assert!(!buffer.has_conflict()); + }); + } + + #[gpui::test(iterations = 10)] + async fn test_formatting_buffer(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.rs": "let one = two", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + let buffer_b = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) + .await + .unwrap(); + + let fake_language_server = fake_language_servers.next().await.unwrap(); + fake_language_server.handle_request::(|_, _| async move { + Ok(Some(vec![ + lsp::TextEdit { + range: lsp::Range::new(lsp::Position::new(0, 4), lsp::Position::new(0, 4)), + new_text: "h".to_string(), + }, + lsp::TextEdit { + range: lsp::Range::new(lsp::Position::new(0, 7), lsp::Position::new(0, 7)), + new_text: "y".to_string(), + }, + ])) + }); + + project_b + .update(cx_b, |project, cx| { + project.format(HashSet::from_iter([buffer_b.clone()]), true, cx) + }) + .await + .unwrap(); + assert_eq!( + buffer_b.read_with(cx_b, |buffer, _| buffer.text()), + "let honey = two" + ); + } + + #[gpui::test(iterations = 10)] + async fn test_definition(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + fs.insert_tree( + "/root-1", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.rs": "const ONE: usize = b::TWO + b::THREE;", + }), + ) + .await; + fs.insert_tree( + "/root-2", + json!({ + "b.rs": "const TWO: usize = 2;\nconst THREE: usize = 3;", + }), + ) + .await; + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root-1", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Open the file on client B. + let buffer_b = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) + .await + .unwrap(); + + // Request the definition of a symbol as the guest. + let fake_language_server = fake_language_servers.next().await.unwrap(); + fake_language_server.handle_request::( + |_, _| async move { + Ok(Some(lsp::GotoDefinitionResponse::Scalar( + lsp::Location::new( + lsp::Url::from_file_path("/root-2/b.rs").unwrap(), + lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), + ), + ))) + }, + ); + + let definitions_1 = project_b + .update(cx_b, |p, cx| p.definition(&buffer_b, 23, cx)) + .await + .unwrap(); + cx_b.read(|cx| { + assert_eq!(definitions_1.len(), 1); + assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); + let target_buffer = definitions_1[0].buffer.read(cx); + assert_eq!( + target_buffer.text(), + "const TWO: usize = 2;\nconst THREE: usize = 3;" + ); + assert_eq!( + definitions_1[0].range.to_point(target_buffer), + Point::new(0, 6)..Point::new(0, 9) + ); + }); + + // Try getting more definitions for the same buffer, ensuring the buffer gets reused from + // the previous call to `definition`. + fake_language_server.handle_request::( + |_, _| async move { + Ok(Some(lsp::GotoDefinitionResponse::Scalar( + lsp::Location::new( + lsp::Url::from_file_path("/root-2/b.rs").unwrap(), + lsp::Range::new(lsp::Position::new(1, 6), lsp::Position::new(1, 11)), + ), + ))) + }, + ); + + let definitions_2 = project_b + .update(cx_b, |p, cx| p.definition(&buffer_b, 33, cx)) + .await + .unwrap(); + cx_b.read(|cx| { + assert_eq!(definitions_2.len(), 1); + assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); + let target_buffer = definitions_2[0].buffer.read(cx); + assert_eq!( + target_buffer.text(), + "const TWO: usize = 2;\nconst THREE: usize = 3;" + ); + assert_eq!( + definitions_2[0].range.to_point(target_buffer), + Point::new(1, 6)..Point::new(1, 11) + ); + }); + assert_eq!(definitions_1[0].buffer, definitions_2[0].buffer); + } + + #[gpui::test(iterations = 10)] + async fn test_references(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + fs.insert_tree( + "/root-1", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "one.rs": "const ONE: usize = 1;", + "two.rs": "const TWO: usize = one::ONE + one::ONE;", + }), + ) + .await; + fs.insert_tree( + "/root-2", + json!({ + "three.rs": "const THREE: usize = two::TWO + one::ONE;", + }), + ) + .await; + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root-1", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Open the file on client B. + let buffer_b = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "one.rs"), cx))) + .await + .unwrap(); + + // Request references to a symbol as the guest. + let fake_language_server = fake_language_servers.next().await.unwrap(); + fake_language_server.handle_request::( + |params, _| async move { + assert_eq!( + params.text_document_position.text_document.uri.as_str(), + "file:///root-1/one.rs" + ); + Ok(Some(vec![ + lsp::Location { + uri: lsp::Url::from_file_path("/root-1/two.rs").unwrap(), + range: lsp::Range::new( + lsp::Position::new(0, 24), + lsp::Position::new(0, 27), + ), + }, + lsp::Location { + uri: lsp::Url::from_file_path("/root-1/two.rs").unwrap(), + range: lsp::Range::new( + lsp::Position::new(0, 35), + lsp::Position::new(0, 38), + ), + }, + lsp::Location { + uri: lsp::Url::from_file_path("/root-2/three.rs").unwrap(), + range: lsp::Range::new( + lsp::Position::new(0, 37), + lsp::Position::new(0, 40), + ), + }, + ])) + }, + ); + + let references = project_b + .update(cx_b, |p, cx| p.references(&buffer_b, 7, cx)) + .await + .unwrap(); + cx_b.read(|cx| { + assert_eq!(references.len(), 3); + assert_eq!(project_b.read(cx).worktrees(cx).count(), 2); + + let two_buffer = references[0].buffer.read(cx); + let three_buffer = references[2].buffer.read(cx); + assert_eq!( + two_buffer.file().unwrap().path().as_ref(), + Path::new("two.rs") + ); + assert_eq!(references[1].buffer, references[0].buffer); + assert_eq!( + three_buffer.file().unwrap().full_path(cx), + Path::new("three.rs") + ); + + assert_eq!(references[0].range.to_offset(&two_buffer), 24..27); + assert_eq!(references[1].range.to_offset(&two_buffer), 35..38); + assert_eq!(references[2].range.to_offset(&three_buffer), 37..40); + }); + } + + #[gpui::test(iterations = 10)] + async fn test_project_search(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + fs.insert_tree( + "/root-1", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a": "hello world", + "b": "goodnight moon", + "c": "a world of goo", + "d": "world champion of clown world", + }), + ) + .await; + fs.insert_tree( + "/root-2", + json!({ + "e": "disney world is fun", + }), + ) + .await; + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + + let (worktree_1, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root-1", true, cx) + }) + .await + .unwrap(); + worktree_1 + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let (worktree_2, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root-2", true, cx) + }) + .await + .unwrap(); + worktree_2 + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + let results = project_b + .update(cx_b, |project, cx| { + project.search(SearchQuery::text("world", false, false), cx) + }) + .await + .unwrap(); + + let mut ranges_by_path = results + .into_iter() + .map(|(buffer, ranges)| { + buffer.read_with(cx_b, |buffer, cx| { + let path = buffer.file().unwrap().full_path(cx); + let offset_ranges = ranges + .into_iter() + .map(|range| range.to_offset(buffer)) + .collect::>(); + (path, offset_ranges) + }) + }) + .collect::>(); + ranges_by_path.sort_by_key(|(path, _)| path.clone()); + + assert_eq!( + ranges_by_path, + &[ + (PathBuf::from("root-1/a"), vec![6..11]), + (PathBuf::from("root-1/c"), vec![2..7]), + (PathBuf::from("root-1/d"), vec![0..5, 24..29]), + (PathBuf::from("root-2/e"), vec![7..12]), + ] + ); + } + + #[gpui::test(iterations = 10)] + async fn test_document_highlights(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + fs.insert_tree( + "/root-1", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "main.rs": "fn double(number: i32) -> i32 { number + number }", + }), + ) + .await; + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root-1", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Open the file on client B. + let buffer_b = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "main.rs"), cx))) + .await + .unwrap(); + + // Request document highlights as the guest. + let fake_language_server = fake_language_servers.next().await.unwrap(); + fake_language_server.handle_request::( + |params, _| async move { + assert_eq!( + params + .text_document_position_params + .text_document + .uri + .as_str(), + "file:///root-1/main.rs" + ); + assert_eq!( + params.text_document_position_params.position, + lsp::Position::new(0, 34) + ); + Ok(Some(vec![ + lsp::DocumentHighlight { + kind: Some(lsp::DocumentHighlightKind::WRITE), + range: lsp::Range::new( + lsp::Position::new(0, 10), + lsp::Position::new(0, 16), + ), + }, + lsp::DocumentHighlight { + kind: Some(lsp::DocumentHighlightKind::READ), + range: lsp::Range::new( + lsp::Position::new(0, 32), + lsp::Position::new(0, 38), + ), + }, + lsp::DocumentHighlight { + kind: Some(lsp::DocumentHighlightKind::READ), + range: lsp::Range::new( + lsp::Position::new(0, 41), + lsp::Position::new(0, 47), + ), + }, + ])) + }, + ); + + let highlights = project_b + .update(cx_b, |p, cx| p.document_highlights(&buffer_b, 34, cx)) + .await + .unwrap(); + buffer_b.read_with(cx_b, |buffer, _| { + let snapshot = buffer.snapshot(); + + let highlights = highlights + .into_iter() + .map(|highlight| (highlight.kind, highlight.range.to_offset(&snapshot))) + .collect::>(); + assert_eq!( + highlights, + &[ + (lsp::DocumentHighlightKind::WRITE, 10..16), + (lsp::DocumentHighlightKind::READ, 32..38), + (lsp::DocumentHighlightKind::READ, 41..47) + ] + ) + }); + } + + #[gpui::test(iterations = 10)] + async fn test_project_symbols(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + fs.insert_tree( + "/code", + json!({ + "crate-1": { + ".zed.toml": r#"collaborators = ["user_b"]"#, + "one.rs": "const ONE: usize = 1;", + }, + "crate-2": { + "two.rs": "const TWO: usize = 2; const THREE: usize = 3;", + }, + "private": { + "passwords.txt": "the-password", + } + }), + ) + .await; + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/code/crate-1", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + // Cause the language server to start. + let _buffer = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "one.rs"), cx))) + .await + .unwrap(); + + let fake_language_server = fake_language_servers.next().await.unwrap(); + fake_language_server.handle_request::( + |_, _| async move { + #[allow(deprecated)] + Ok(Some(vec![lsp::SymbolInformation { + name: "TWO".into(), + location: lsp::Location { + uri: lsp::Url::from_file_path("/code/crate-2/two.rs").unwrap(), + range: lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), + }, + kind: lsp::SymbolKind::CONSTANT, + tags: None, + container_name: None, + deprecated: None, + }])) + }, + ); + + // Request the definition of a symbol as the guest. + let symbols = project_b + .update(cx_b, |p, cx| p.symbols("two", cx)) + .await + .unwrap(); + assert_eq!(symbols.len(), 1); + assert_eq!(symbols[0].name, "TWO"); + + // Open one of the returned symbols. + let buffer_b_2 = project_b + .update(cx_b, |project, cx| { + project.open_buffer_for_symbol(&symbols[0], cx) + }) + .await + .unwrap(); + buffer_b_2.read_with(cx_b, |buffer, _| { + assert_eq!( + buffer.file().unwrap().path().as_ref(), + Path::new("../crate-2/two.rs") + ); + }); + + // Attempt to craft a symbol and violate host's privacy by opening an arbitrary file. + let mut fake_symbol = symbols[0].clone(); + fake_symbol.path = Path::new("/code/secrets").into(); + let error = project_b + .update(cx_b, |project, cx| { + project.open_buffer_for_symbol(&fake_symbol, cx) + }) + .await + .unwrap_err(); + assert!(error.to_string().contains("invalid symbol signature")); + } + + #[gpui::test(iterations = 10)] + async fn test_open_buffer_while_getting_definition_pointing_to_it( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, + mut rng: StdRng, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + fs.insert_tree( + "/root", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "a.rs": "const ONE: usize = b::TWO;", + "b.rs": "const TWO: usize = 2", + }), + ) + .await; + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/root", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + let buffer_b1 = cx_b + .background() + .spawn(project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))) + .await + .unwrap(); + + let fake_language_server = fake_language_servers.next().await.unwrap(); + fake_language_server.handle_request::( + |_, _| async move { + Ok(Some(lsp::GotoDefinitionResponse::Scalar( + lsp::Location::new( + lsp::Url::from_file_path("/root/b.rs").unwrap(), + lsp::Range::new(lsp::Position::new(0, 6), lsp::Position::new(0, 9)), + ), + ))) + }, + ); + + let definitions; + let buffer_b2; + if rng.gen() { + definitions = project_b.update(cx_b, |p, cx| p.definition(&buffer_b1, 23, cx)); + buffer_b2 = project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.rs"), cx)); + } else { + buffer_b2 = project_b.update(cx_b, |p, cx| p.open_buffer((worktree_id, "b.rs"), cx)); + definitions = project_b.update(cx_b, |p, cx| p.definition(&buffer_b1, 23, cx)); + } + + let buffer_b2 = buffer_b2.await.unwrap(); + let definitions = definitions.await.unwrap(); + assert_eq!(definitions.len(), 1); + assert_eq!(definitions[0].buffer, buffer_b2); + } + + #[gpui::test(iterations = 10)] + async fn test_collaborating_with_code_actions( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + cx_b.update(|cx| editor::init(cx)); + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(Default::default()); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "main.rs": "mod other;\nfn main() { let foo = other::foo(); }", + "other.rs": "pub fn foo() -> usize { 4 }", + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + let mut params = cx_b.update(WorkspaceParams::test); + params.languages = lang_registry.clone(); + params.client = client_b.client.clone(); + params.user_store = client_b.user_store.clone(); + params.project = project_b; + + let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(¶ms, cx)); + let editor_b = workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "main.rs"), cx) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + let mut fake_language_server = fake_language_servers.next().await.unwrap(); + fake_language_server + .handle_request::(|params, _| async move { + assert_eq!( + params.text_document.uri, + lsp::Url::from_file_path("/a/main.rs").unwrap(), + ); + assert_eq!(params.range.start, lsp::Position::new(0, 0)); + assert_eq!(params.range.end, lsp::Position::new(0, 0)); + Ok(None) + }) + .next() + .await; + + // Move cursor to a location that contains code actions. + editor_b.update(cx_b, |editor, cx| { + editor.select_ranges([Point::new(1, 31)..Point::new(1, 31)], None, cx); + cx.focus(&editor_b); + }); + + fake_language_server + .handle_request::(|params, _| async move { + assert_eq!( + params.text_document.uri, + lsp::Url::from_file_path("/a/main.rs").unwrap(), + ); + assert_eq!(params.range.start, lsp::Position::new(1, 31)); + assert_eq!(params.range.end, lsp::Position::new(1, 31)); + + Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( + lsp::CodeAction { + title: "Inline into all callers".to_string(), + edit: Some(lsp::WorkspaceEdit { + changes: Some( + [ + ( + lsp::Url::from_file_path("/a/main.rs").unwrap(), + vec![lsp::TextEdit::new( + lsp::Range::new( + lsp::Position::new(1, 22), + lsp::Position::new(1, 34), + ), + "4".to_string(), + )], + ), + ( + lsp::Url::from_file_path("/a/other.rs").unwrap(), + vec![lsp::TextEdit::new( + lsp::Range::new( + lsp::Position::new(0, 0), + lsp::Position::new(0, 27), + ), + "".to_string(), + )], + ), + ] + .into_iter() + .collect(), + ), + ..Default::default() + }), + data: Some(json!({ + "codeActionParams": { + "range": { + "start": {"line": 1, "column": 31}, + "end": {"line": 1, "column": 31}, + } + } + })), + ..Default::default() + }, + )])) + }) + .next() + .await; + + // Toggle code actions and wait for them to display. + editor_b.update(cx_b, |editor, cx| { + editor.toggle_code_actions( + &ToggleCodeActions { + deployed_from_indicator: false, + }, + cx, + ); + }); + editor_b + .condition(&cx_b, |editor, _| editor.context_menu_visible()) + .await; + + fake_language_server.remove_request_handler::(); + + // Confirming the code action will trigger a resolve request. + let confirm_action = workspace_b + .update(cx_b, |workspace, cx| { + Editor::confirm_code_action(workspace, &ConfirmCodeAction { item_ix: Some(0) }, cx) + }) + .unwrap(); + fake_language_server.handle_request::( + |_, _| async move { + Ok(lsp::CodeAction { + title: "Inline into all callers".to_string(), + edit: Some(lsp::WorkspaceEdit { + changes: Some( + [ + ( + lsp::Url::from_file_path("/a/main.rs").unwrap(), + vec![lsp::TextEdit::new( + lsp::Range::new( + lsp::Position::new(1, 22), + lsp::Position::new(1, 34), + ), + "4".to_string(), + )], + ), + ( + lsp::Url::from_file_path("/a/other.rs").unwrap(), + vec![lsp::TextEdit::new( + lsp::Range::new( + lsp::Position::new(0, 0), + lsp::Position::new(0, 27), + ), + "".to_string(), + )], + ), + ] + .into_iter() + .collect(), + ), + ..Default::default() + }), + ..Default::default() + }) + }, + ); + + // After the action is confirmed, an editor containing both modified files is opened. + confirm_action.await.unwrap(); + let code_action_editor = workspace_b.read_with(cx_b, |workspace, cx| { + workspace + .active_item(cx) + .unwrap() + .downcast::() + .unwrap() + }); + code_action_editor.update(cx_b, |editor, cx| { + assert_eq!(editor.text(cx), "\nmod other;\nfn main() { let foo = 4; }"); + editor.undo(&Undo, cx); + assert_eq!( + editor.text(cx), + "pub fn foo() -> usize { 4 }\nmod other;\nfn main() { let foo = other::foo(); }" + ); + editor.redo(&Redo, cx); + assert_eq!(editor.text(cx), "\nmod other;\nfn main() { let foo = 4; }"); + }); + } + + #[gpui::test(iterations = 10)] + async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + cx_b.update(|cx| editor::init(cx)); + + // Set up a fake language server. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + Some(tree_sitter_rust::language()), + ); + let mut fake_language_servers = language.set_fake_lsp_adapter(FakeLspAdapter { + capabilities: lsp::ServerCapabilities { + rename_provider: Some(lsp::OneOf::Right(lsp::RenameOptions { + prepare_provider: Some(true), + work_done_progress_options: Default::default(), + })), + ..Default::default() + }, + ..Default::default() + }); + lang_registry.add(Arc::new(language)); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Share a project as client A + fs.insert_tree( + "/dir", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "one.rs": "const ONE: usize = 1;", + "two.rs": "const TWO: usize = one::ONE + one::ONE;" + }), + ) + .await; + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/dir", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + let project_id = project_a.update(cx_a, |p, _| p.next_remote_id()).await; + let worktree_id = worktree_a.read_with(cx_a, |tree, _| tree.id()); + project_a.update(cx_a, |p, cx| p.share(cx)).await.unwrap(); + + // Join the worktree as client B. + let project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + let mut params = cx_b.update(WorkspaceParams::test); + params.languages = lang_registry.clone(); + params.client = client_b.client.clone(); + params.user_store = client_b.user_store.clone(); + params.project = project_b; + + let (_window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(¶ms, cx)); + let editor_b = workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "one.rs"), cx) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + let fake_language_server = fake_language_servers.next().await.unwrap(); + + // Move cursor to a location that can be renamed. + let prepare_rename = editor_b.update(cx_b, |editor, cx| { + editor.select_ranges([7..7], None, cx); + editor.rename(&Rename, cx).unwrap() + }); + + fake_language_server + .handle_request::(|params, _| async move { + assert_eq!(params.text_document.uri.as_str(), "file:///dir/one.rs"); + assert_eq!(params.position, lsp::Position::new(0, 7)); + Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( + lsp::Position::new(0, 6), + lsp::Position::new(0, 9), + )))) + }) + .next() + .await + .unwrap(); + prepare_rename.await.unwrap(); + editor_b.update(cx_b, |editor, cx| { + let rename = editor.pending_rename().unwrap(); + let buffer = editor.buffer().read(cx).snapshot(cx); + assert_eq!( + rename.range.start.to_offset(&buffer)..rename.range.end.to_offset(&buffer), + 6..9 + ); + rename.editor.update(cx, |rename_editor, cx| { + rename_editor.buffer().update(cx, |rename_buffer, cx| { + rename_buffer.edit([0..3], "THREE", cx); + }); + }); + }); + + let confirm_rename = workspace_b.update(cx_b, |workspace, cx| { + Editor::confirm_rename(workspace, &ConfirmRename, cx).unwrap() + }); + fake_language_server + .handle_request::(|params, _| async move { + assert_eq!( + params.text_document_position.text_document.uri.as_str(), + "file:///dir/one.rs" + ); + assert_eq!( + params.text_document_position.position, + lsp::Position::new(0, 6) + ); + assert_eq!(params.new_name, "THREE"); + Ok(Some(lsp::WorkspaceEdit { + changes: Some( + [ + ( + lsp::Url::from_file_path("/dir/one.rs").unwrap(), + vec![lsp::TextEdit::new( + lsp::Range::new( + lsp::Position::new(0, 6), + lsp::Position::new(0, 9), + ), + "THREE".to_string(), + )], + ), + ( + lsp::Url::from_file_path("/dir/two.rs").unwrap(), + vec![ + lsp::TextEdit::new( + lsp::Range::new( + lsp::Position::new(0, 24), + lsp::Position::new(0, 27), + ), + "THREE".to_string(), + ), + lsp::TextEdit::new( + lsp::Range::new( + lsp::Position::new(0, 35), + lsp::Position::new(0, 38), + ), + "THREE".to_string(), + ), + ], + ), + ] + .into_iter() + .collect(), + ), + ..Default::default() + })) + }) + .next() + .await + .unwrap(); + confirm_rename.await.unwrap(); + + let rename_editor = workspace_b.read_with(cx_b, |workspace, cx| { + workspace + .active_item(cx) + .unwrap() + .downcast::() + .unwrap() + }); + rename_editor.update(cx_b, |editor, cx| { + assert_eq!( + editor.text(cx), + "const TWO: usize = one::THREE + one::THREE;\nconst THREE: usize = 1;" + ); + editor.undo(&Undo, cx); + assert_eq!( + editor.text(cx), + "const TWO: usize = one::ONE + one::ONE;\nconst ONE: usize = 1;" + ); + editor.redo(&Redo, cx); + assert_eq!( + editor.text(cx), + "const TWO: usize = one::THREE + one::THREE;\nconst THREE: usize = 1;" + ); + }); + + // Ensure temporary rename edits cannot be undone/redone. + editor_b.update(cx_b, |editor, cx| { + editor.undo(&Undo, cx); + assert_eq!(editor.text(cx), "const ONE: usize = 1;"); + editor.undo(&Undo, cx); + assert_eq!(editor.text(cx), "const ONE: usize = 1;"); + editor.redo(&Redo, cx); + assert_eq!(editor.text(cx), "const THREE: usize = 1;"); + }) + } + + #[gpui::test(iterations = 10)] + async fn test_basic_chat(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + + // Create an org that includes these 2 users. + let db = &server.app_state.db; + let org_id = db.create_org("Test Org", "test-org").await.unwrap(); + db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) + .await + .unwrap(); + db.add_org_member(org_id, client_b.current_user_id(&cx_b), false) + .await + .unwrap(); + + // Create a channel that includes all the users. + let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); + db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) + .await + .unwrap(); + db.add_channel_member(channel_id, client_b.current_user_id(&cx_b), false) + .await + .unwrap(); + db.create_channel_message( + channel_id, + client_b.current_user_id(&cx_b), + "hello A, it's B.", + OffsetDateTime::now_utc(), + 1, + ) + .await + .unwrap(); + + let channels_a = cx_a + .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); + channels_a + .condition(cx_a, |list, _| list.available_channels().is_some()) + .await; + channels_a.read_with(cx_a, |list, _| { + assert_eq!( + list.available_channels().unwrap(), + &[ChannelDetails { + id: channel_id.to_proto(), + name: "test-channel".to_string() + }] + ) + }); + let channel_a = channels_a.update(cx_a, |this, cx| { + this.get_channel(channel_id.to_proto(), cx).unwrap() + }); + channel_a.read_with(cx_a, |channel, _| assert!(channel.messages().is_empty())); + channel_a + .condition(&cx_a, |channel, _| { + channel_messages(channel) + == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] + }) + .await; + + let channels_b = cx_b + .add_model(|cx| ChannelList::new(client_b.user_store.clone(), client_b.clone(), cx)); + channels_b + .condition(cx_b, |list, _| list.available_channels().is_some()) + .await; + channels_b.read_with(cx_b, |list, _| { + assert_eq!( + list.available_channels().unwrap(), + &[ChannelDetails { + id: channel_id.to_proto(), + name: "test-channel".to_string() + }] + ) + }); + + let channel_b = channels_b.update(cx_b, |this, cx| { + this.get_channel(channel_id.to_proto(), cx).unwrap() + }); + channel_b.read_with(cx_b, |channel, _| assert!(channel.messages().is_empty())); + channel_b + .condition(&cx_b, |channel, _| { + channel_messages(channel) + == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] + }) + .await; + + channel_a + .update(cx_a, |channel, cx| { + channel + .send_message("oh, hi B.".to_string(), cx) + .unwrap() + .detach(); + let task = channel.send_message("sup".to_string(), cx).unwrap(); + assert_eq!( + channel_messages(channel), + &[ + ("user_b".to_string(), "hello A, it's B.".to_string(), false), + ("user_a".to_string(), "oh, hi B.".to_string(), true), + ("user_a".to_string(), "sup".to_string(), true) + ] + ); + task + }) + .await + .unwrap(); + + channel_b + .condition(&cx_b, |channel, _| { + channel_messages(channel) + == [ + ("user_b".to_string(), "hello A, it's B.".to_string(), false), + ("user_a".to_string(), "oh, hi B.".to_string(), false), + ("user_a".to_string(), "sup".to_string(), false), + ] + }) + .await; + + assert_eq!( + server + .state() + .await + .channel(channel_id) + .unwrap() + .connection_ids + .len(), + 2 + ); + cx_b.update(|_| drop(channel_b)); + server + .condition(|state| state.channel(channel_id).unwrap().connection_ids.len() == 1) + .await; + + cx_a.update(|_| drop(channel_a)); + server + .condition(|state| state.channel(channel_id).is_none()) + .await; + } + + #[gpui::test(iterations = 10)] + async fn test_chat_message_validation(cx_a: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + + let db = &server.app_state.db; + let org_id = db.create_org("Test Org", "test-org").await.unwrap(); + let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); + db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) + .await + .unwrap(); + db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) + .await + .unwrap(); + + let channels_a = cx_a + .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); + channels_a + .condition(cx_a, |list, _| list.available_channels().is_some()) + .await; + let channel_a = channels_a.update(cx_a, |this, cx| { + this.get_channel(channel_id.to_proto(), cx).unwrap() + }); + + // Messages aren't allowed to be too long. + channel_a + .update(cx_a, |channel, cx| { + let long_body = "this is long.\n".repeat(1024); + channel.send_message(long_body, cx).unwrap() + }) + .await + .unwrap_err(); + + // Messages aren't allowed to be blank. + channel_a.update(cx_a, |channel, cx| { + channel.send_message(String::new(), cx).unwrap_err() + }); + + // Leading and trailing whitespace are trimmed. + channel_a + .update(cx_a, |channel, cx| { + channel + .send_message("\n surrounded by whitespace \n".to_string(), cx) + .unwrap() + }) + .await + .unwrap(); + assert_eq!( + db.get_channel_messages(channel_id, 10, None) + .await + .unwrap() + .iter() + .map(|m| &m.body) + .collect::>(), + &["surrounded by whitespace"] + ); + } + + #[gpui::test(iterations = 10)] + async fn test_chat_reconnection(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + + // Connect to a server as 2 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + let mut status_b = client_b.status(); + + // Create an org that includes these 2 users. + let db = &server.app_state.db; + let org_id = db.create_org("Test Org", "test-org").await.unwrap(); + db.add_org_member(org_id, client_a.current_user_id(&cx_a), false) + .await + .unwrap(); + db.add_org_member(org_id, client_b.current_user_id(&cx_b), false) + .await + .unwrap(); + + // Create a channel that includes all the users. + let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap(); + db.add_channel_member(channel_id, client_a.current_user_id(&cx_a), false) + .await + .unwrap(); + db.add_channel_member(channel_id, client_b.current_user_id(&cx_b), false) + .await + .unwrap(); + db.create_channel_message( + channel_id, + client_b.current_user_id(&cx_b), + "hello A, it's B.", + OffsetDateTime::now_utc(), + 2, + ) + .await + .unwrap(); + + let channels_a = cx_a + .add_model(|cx| ChannelList::new(client_a.user_store.clone(), client_a.clone(), cx)); + channels_a + .condition(cx_a, |list, _| list.available_channels().is_some()) + .await; + + channels_a.read_with(cx_a, |list, _| { + assert_eq!( + list.available_channels().unwrap(), + &[ChannelDetails { + id: channel_id.to_proto(), + name: "test-channel".to_string() + }] + ) + }); + let channel_a = channels_a.update(cx_a, |this, cx| { + this.get_channel(channel_id.to_proto(), cx).unwrap() + }); + channel_a.read_with(cx_a, |channel, _| assert!(channel.messages().is_empty())); + channel_a + .condition(&cx_a, |channel, _| { + channel_messages(channel) + == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] + }) + .await; + + let channels_b = cx_b + .add_model(|cx| ChannelList::new(client_b.user_store.clone(), client_b.clone(), cx)); + channels_b + .condition(cx_b, |list, _| list.available_channels().is_some()) + .await; + channels_b.read_with(cx_b, |list, _| { + assert_eq!( + list.available_channels().unwrap(), + &[ChannelDetails { + id: channel_id.to_proto(), + name: "test-channel".to_string() + }] + ) + }); + + let channel_b = channels_b.update(cx_b, |this, cx| { + this.get_channel(channel_id.to_proto(), cx).unwrap() + }); + channel_b.read_with(cx_b, |channel, _| assert!(channel.messages().is_empty())); + channel_b + .condition(&cx_b, |channel, _| { + channel_messages(channel) + == [("user_b".to_string(), "hello A, it's B.".to_string(), false)] + }) + .await; + + // Disconnect client B, ensuring we can still access its cached channel data. + server.forbid_connections(); + server.disconnect_client(client_b.current_user_id(&cx_b)); + cx_b.foreground().advance_clock(Duration::from_secs(3)); + while !matches!( + status_b.next().await, + Some(client::Status::ReconnectionError { .. }) + ) {} + + channels_b.read_with(cx_b, |channels, _| { + assert_eq!( + channels.available_channels().unwrap(), + [ChannelDetails { + id: channel_id.to_proto(), + name: "test-channel".to_string() + }] + ) + }); + channel_b.read_with(cx_b, |channel, _| { + assert_eq!( + channel_messages(channel), + [("user_b".to_string(), "hello A, it's B.".to_string(), false)] + ) + }); + + // Send a message from client B while it is disconnected. + channel_b + .update(cx_b, |channel, cx| { + let task = channel + .send_message("can you see this?".to_string(), cx) + .unwrap(); + assert_eq!( + channel_messages(channel), + &[ + ("user_b".to_string(), "hello A, it's B.".to_string(), false), + ("user_b".to_string(), "can you see this?".to_string(), true) + ] + ); + task + }) + .await + .unwrap_err(); + + // Send a message from client A while B is disconnected. + channel_a + .update(cx_a, |channel, cx| { + channel + .send_message("oh, hi B.".to_string(), cx) + .unwrap() + .detach(); + let task = channel.send_message("sup".to_string(), cx).unwrap(); + assert_eq!( + channel_messages(channel), + &[ + ("user_b".to_string(), "hello A, it's B.".to_string(), false), + ("user_a".to_string(), "oh, hi B.".to_string(), true), + ("user_a".to_string(), "sup".to_string(), true) + ] + ); + task + }) + .await + .unwrap(); + + // Give client B a chance to reconnect. + server.allow_connections(); + cx_b.foreground().advance_clock(Duration::from_secs(10)); + + // Verify that B sees the new messages upon reconnection, as well as the message client B + // sent while offline. + channel_b + .condition(&cx_b, |channel, _| { + channel_messages(channel) + == [ + ("user_b".to_string(), "hello A, it's B.".to_string(), false), + ("user_a".to_string(), "oh, hi B.".to_string(), false), + ("user_a".to_string(), "sup".to_string(), false), + ("user_b".to_string(), "can you see this?".to_string(), false), + ] + }) + .await; + + // Ensure client A and B can communicate normally after reconnection. + channel_a + .update(cx_a, |channel, cx| { + channel.send_message("you online?".to_string(), cx).unwrap() + }) + .await + .unwrap(); + channel_b + .condition(&cx_b, |channel, _| { + channel_messages(channel) + == [ + ("user_b".to_string(), "hello A, it's B.".to_string(), false), + ("user_a".to_string(), "oh, hi B.".to_string(), false), + ("user_a".to_string(), "sup".to_string(), false), + ("user_b".to_string(), "can you see this?".to_string(), false), + ("user_a".to_string(), "you online?".to_string(), false), + ] + }) + .await; + + channel_b + .update(cx_b, |channel, cx| { + channel.send_message("yep".to_string(), cx).unwrap() + }) + .await + .unwrap(); + channel_a + .condition(&cx_a, |channel, _| { + channel_messages(channel) + == [ + ("user_b".to_string(), "hello A, it's B.".to_string(), false), + ("user_a".to_string(), "oh, hi B.".to_string(), false), + ("user_a".to_string(), "sup".to_string(), false), + ("user_b".to_string(), "can you see this?".to_string(), false), + ("user_a".to_string(), "you online?".to_string(), false), + ("user_b".to_string(), "yep".to_string(), false), + ] + }) + .await; + } + + #[gpui::test(iterations = 10)] + async fn test_contacts( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, + cx_c: &mut TestAppContext, + ) { + cx_a.foreground().forbid_parking(); + let lang_registry = Arc::new(LanguageRegistry::test()); + let fs = FakeFs::new(cx_a.background()); + + // Connect to a server as 3 clients. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + let client_c = server.create_client(cx_c, "user_c").await; + + // Share a worktree as client A. + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#, + }), + ) + .await; + + let project_a = cx_a.update(|cx| { + Project::local( + client_a.clone(), + client_a.user_store.clone(), + lang_registry.clone(), + fs.clone(), + cx, + ) + }); + let (worktree_a, _) = project_a + .update(cx_a, |p, cx| { + p.find_or_create_local_worktree("/a", true, cx) + }) + .await + .unwrap(); + worktree_a + .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + + client_a + .user_store + .condition(&cx_a, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] + }) + .await; + client_b + .user_store + .condition(&cx_b, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] + }) + .await; + client_c + .user_store + .condition(&cx_c, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", false, vec![])])] + }) + .await; + + let project_id = project_a + .update(cx_a, |project, _| project.next_remote_id()) + .await; + project_a + .update(cx_a, |project, cx| project.share(cx)) + .await + .unwrap(); + client_a + .user_store + .condition(&cx_a, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] + }) + .await; + client_b + .user_store + .condition(&cx_b, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] + }) + .await; + client_c + .user_store + .condition(&cx_c, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", true, vec![])])] + }) + .await; + + let _project_b = Project::remote( + project_id, + client_b.clone(), + client_b.user_store.clone(), + lang_registry.clone(), + fs.clone(), + &mut cx_b.to_async(), + ) + .await + .unwrap(); + + client_a + .user_store + .condition(&cx_a, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] + }) + .await; + client_b + .user_store + .condition(&cx_b, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] + }) + .await; + client_c + .user_store + .condition(&cx_c, |user_store, _| { + contacts(user_store) == vec![("user_a", vec![("a", true, vec!["user_b"])])] + }) + .await; + + project_a + .condition(&cx_a, |project, _| { + project.collaborators().contains_key(&client_b.peer_id) + }) + .await; + + cx_a.update(move |_| drop(project_a)); + client_a + .user_store + .condition(&cx_a, |user_store, _| contacts(user_store) == vec![]) + .await; + client_b + .user_store + .condition(&cx_b, |user_store, _| contacts(user_store) == vec![]) + .await; + client_c + .user_store + .condition(&cx_c, |user_store, _| contacts(user_store) == vec![]) + .await; + + fn contacts(user_store: &UserStore) -> Vec<(&str, Vec<(&str, bool, Vec<&str>)>)> { + user_store + .contacts() + .iter() + .map(|contact| { + let worktrees = contact + .projects + .iter() + .map(|p| { + ( + p.worktree_root_names[0].as_str(), + p.is_shared, + p.guests.iter().map(|p| p.github_login.as_str()).collect(), + ) + }) + .collect(); + (contact.user.github_login.as_str(), worktrees) + }) + .collect() + } + } + + #[gpui::test(iterations = 10)] + async fn test_following(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let fs = FakeFs::new(cx_a.background()); + + // 2 clients connect to a server. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let mut client_a = server.create_client(cx_a, "user_a").await; + let mut client_b = server.create_client(cx_b, "user_b").await; + cx_a.update(editor::init); + cx_b.update(editor::init); + + // Client A shares a project. + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "1.txt": "one", + "2.txt": "two", + "3.txt": "three", + }), + ) + .await; + let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; + project_a + .update(cx_a, |project, cx| project.share(cx)) + .await + .unwrap(); + + // Client B joins the project. + let project_b = client_b + .build_remote_project( + project_a + .read_with(cx_a, |project, _| project.remote_id()) + .unwrap(), + cx_b, + ) + .await; + + // Client A opens some editors. + let workspace_a = client_a.build_workspace(&project_a, cx_a); + let pane_a = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); + let editor_a1 = workspace_a + .update(cx_a, |workspace, cx| { + workspace.open_path((worktree_id, "1.txt"), cx) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + let editor_a2 = workspace_a + .update(cx_a, |workspace, cx| { + workspace.open_path((worktree_id, "2.txt"), cx) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + // Client B opens an editor. + let workspace_b = client_b.build_workspace(&project_b, cx_b); + let editor_b1 = workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "1.txt"), cx) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + let client_a_id = project_b.read_with(cx_b, |project, _| { + project.collaborators().values().next().unwrap().peer_id + }); + let client_b_id = project_a.read_with(cx_a, |project, _| { + project.collaborators().values().next().unwrap().peer_id + }); + + // When client B starts following client A, all visible view states are replicated to client B. + editor_a1.update(cx_a, |editor, cx| editor.select_ranges([0..1], None, cx)); + editor_a2.update(cx_a, |editor, cx| editor.select_ranges([2..3], None, cx)); + workspace_b + .update(cx_b, |workspace, cx| { + workspace + .toggle_follow(&ToggleFollow(client_a_id), cx) + .unwrap() + }) + .await + .unwrap(); + let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| { + workspace + .active_item(cx) + .unwrap() + .downcast::() + .unwrap() + }); + assert!(cx_b.read(|cx| editor_b2.is_focused(cx))); + assert_eq!( + editor_b2.read_with(cx_b, |editor, cx| editor.project_path(cx)), + Some((worktree_id, "2.txt").into()) + ); + assert_eq!( + editor_b2.read_with(cx_b, |editor, cx| editor.selected_ranges(cx)), + vec![2..3] + ); + assert_eq!( + editor_b1.read_with(cx_b, |editor, cx| editor.selected_ranges(cx)), + vec![0..1] + ); + + // When client A activates a different editor, client B does so as well. + workspace_a.update(cx_a, |workspace, cx| { + workspace.activate_item(&editor_a1, cx) + }); + workspace_b + .condition(cx_b, |workspace, cx| { + workspace.active_item(cx).unwrap().id() == editor_b1.id() + }) + .await; + + // When client A navigates back and forth, client B does so as well. + workspace_a + .update(cx_a, |workspace, cx| { + workspace::Pane::go_back(workspace, None, cx) + }) + .await; + workspace_b + .condition(cx_b, |workspace, cx| { + workspace.active_item(cx).unwrap().id() == editor_b2.id() + }) + .await; + + workspace_a + .update(cx_a, |workspace, cx| { + workspace::Pane::go_forward(workspace, None, cx) + }) + .await; + workspace_b + .condition(cx_b, |workspace, cx| { + workspace.active_item(cx).unwrap().id() == editor_b1.id() + }) + .await; + + // Changes to client A's editor are reflected on client B. + editor_a1.update(cx_a, |editor, cx| { + editor.select_ranges([1..1, 2..2], None, cx); + }); + editor_b1 + .condition(cx_b, |editor, cx| { + editor.selected_ranges(cx) == vec![1..1, 2..2] + }) + .await; + + editor_a1.update(cx_a, |editor, cx| editor.set_text("TWO", cx)); + editor_b1 + .condition(cx_b, |editor, cx| editor.text(cx) == "TWO") + .await; + + editor_a1.update(cx_a, |editor, cx| { + editor.select_ranges([3..3], None, cx); + editor.set_scroll_position(vec2f(0., 100.), cx); + }); + editor_b1 + .condition(cx_b, |editor, cx| editor.selected_ranges(cx) == vec![3..3]) + .await; + + // After unfollowing, client B stops receiving updates from client A. + workspace_b.update(cx_b, |workspace, cx| { + workspace.unfollow(&workspace.active_pane().clone(), cx) + }); + workspace_a.update(cx_a, |workspace, cx| { + workspace.activate_item(&editor_a2, cx) + }); + cx_a.foreground().run_until_parked(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, cx| workspace + .active_item(cx) + .unwrap() + .id()), + editor_b1.id() + ); + + // Client A starts following client B. + workspace_a + .update(cx_a, |workspace, cx| { + workspace + .toggle_follow(&ToggleFollow(client_b_id), cx) + .unwrap() + }) + .await + .unwrap(); + assert_eq!( + workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), + Some(client_b_id) + ); + assert_eq!( + workspace_a.read_with(cx_a, |workspace, cx| workspace + .active_item(cx) + .unwrap() + .id()), + editor_a1.id() + ); + + // Following interrupts when client B disconnects. + client_b.disconnect(&cx_b.to_async()).unwrap(); + cx_a.foreground().run_until_parked(); + assert_eq!( + workspace_a.read_with(cx_a, |workspace, _| workspace.leader_for_pane(&pane_a)), + None + ); + } + + #[gpui::test(iterations = 10)] + async fn test_peers_following_each_other(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let fs = FakeFs::new(cx_a.background()); + + // 2 clients connect to a server. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let mut client_a = server.create_client(cx_a, "user_a").await; + let mut client_b = server.create_client(cx_b, "user_b").await; + cx_a.update(editor::init); + cx_b.update(editor::init); + + // Client A shares a project. + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "1.txt": "one", + "2.txt": "two", + "3.txt": "three", + "4.txt": "four", + }), + ) + .await; + let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; + project_a + .update(cx_a, |project, cx| project.share(cx)) + .await + .unwrap(); + + // Client B joins the project. + let project_b = client_b + .build_remote_project( + project_a + .read_with(cx_a, |project, _| project.remote_id()) + .unwrap(), + cx_b, + ) + .await; + + // Client A opens some editors. + let workspace_a = client_a.build_workspace(&project_a, cx_a); + let pane_a1 = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); + let _editor_a1 = workspace_a + .update(cx_a, |workspace, cx| { + workspace.open_path((worktree_id, "1.txt"), cx) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + // Client B opens an editor. + let workspace_b = client_b.build_workspace(&project_b, cx_b); + let pane_b1 = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); + let _editor_b1 = workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "2.txt"), cx) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + // Clients A and B follow each other in split panes + workspace_a + .update(cx_a, |workspace, cx| { + workspace.split_pane(workspace.active_pane().clone(), SplitDirection::Right, cx); + assert_ne!(*workspace.active_pane(), pane_a1); + let leader_id = *project_a.read(cx).collaborators().keys().next().unwrap(); + workspace + .toggle_follow(&workspace::ToggleFollow(leader_id), cx) + .unwrap() + }) + .await + .unwrap(); + workspace_b + .update(cx_b, |workspace, cx| { + workspace.split_pane(workspace.active_pane().clone(), SplitDirection::Right, cx); + assert_ne!(*workspace.active_pane(), pane_b1); + let leader_id = *project_b.read(cx).collaborators().keys().next().unwrap(); + workspace + .toggle_follow(&workspace::ToggleFollow(leader_id), cx) + .unwrap() + }) + .await + .unwrap(); + + workspace_a + .update(cx_a, |workspace, cx| { + workspace.activate_next_pane(cx); + assert_eq!(*workspace.active_pane(), pane_a1); + workspace.open_path((worktree_id, "3.txt"), cx) + }) + .await + .unwrap(); + workspace_b + .update(cx_b, |workspace, cx| { + workspace.activate_next_pane(cx); + assert_eq!(*workspace.active_pane(), pane_b1); + workspace.open_path((worktree_id, "4.txt"), cx) + }) + .await + .unwrap(); + cx_a.foreground().run_until_parked(); + + // Ensure leader updates don't change the active pane of followers + workspace_a.read_with(cx_a, |workspace, _| { + assert_eq!(*workspace.active_pane(), pane_a1); + }); + workspace_b.read_with(cx_b, |workspace, _| { + assert_eq!(*workspace.active_pane(), pane_b1); + }); + + // Ensure peers following each other doesn't cause an infinite loop. + assert_eq!( + workspace_a.read_with(cx_a, |workspace, cx| workspace + .active_item(cx) + .unwrap() + .project_path(cx)), + Some((worktree_id, "3.txt").into()) + ); + workspace_a.update(cx_a, |workspace, cx| { + assert_eq!( + workspace.active_item(cx).unwrap().project_path(cx), + Some((worktree_id, "3.txt").into()) + ); + workspace.activate_next_pane(cx); + assert_eq!( + workspace.active_item(cx).unwrap().project_path(cx), + Some((worktree_id, "4.txt").into()) + ); + }); + workspace_b.update(cx_b, |workspace, cx| { + assert_eq!( + workspace.active_item(cx).unwrap().project_path(cx), + Some((worktree_id, "4.txt").into()) + ); + workspace.activate_next_pane(cx); + assert_eq!( + workspace.active_item(cx).unwrap().project_path(cx), + Some((worktree_id, "3.txt").into()) + ); + }); + } + + #[gpui::test(iterations = 10)] + async fn test_auto_unfollowing(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + cx_a.foreground().forbid_parking(); + let fs = FakeFs::new(cx_a.background()); + + // 2 clients connect to a server. + let mut server = TestServer::start(cx_a.foreground(), cx_a.background()).await; + let mut client_a = server.create_client(cx_a, "user_a").await; + let mut client_b = server.create_client(cx_b, "user_b").await; + cx_a.update(editor::init); + cx_b.update(editor::init); + + // Client A shares a project. + fs.insert_tree( + "/a", + json!({ + ".zed.toml": r#"collaborators = ["user_b"]"#, + "1.txt": "one", + "2.txt": "two", + "3.txt": "three", + }), + ) + .await; + let (project_a, worktree_id) = client_a.build_local_project(fs.clone(), "/a", cx_a).await; + project_a + .update(cx_a, |project, cx| project.share(cx)) + .await + .unwrap(); + + // Client B joins the project. + let project_b = client_b + .build_remote_project( + project_a + .read_with(cx_a, |project, _| project.remote_id()) + .unwrap(), + cx_b, + ) + .await; + + // Client A opens some editors. + let workspace_a = client_a.build_workspace(&project_a, cx_a); + let _editor_a1 = workspace_a + .update(cx_a, |workspace, cx| { + workspace.open_path((worktree_id, "1.txt"), cx) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + // Client B starts following client A. + let workspace_b = client_b.build_workspace(&project_b, cx_b); + let pane_b = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); + let leader_id = project_b.read_with(cx_b, |project, _| { + project.collaborators().values().next().unwrap().peer_id + }); + workspace_b + .update(cx_b, |workspace, cx| { + workspace + .toggle_follow(&ToggleFollow(leader_id), cx) + .unwrap() + }) + .await + .unwrap(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + Some(leader_id) + ); + let editor_b2 = workspace_b.read_with(cx_b, |workspace, cx| { + workspace + .active_item(cx) + .unwrap() + .downcast::() + .unwrap() + }); + + // When client B moves, it automatically stops following client A. + editor_b2.update(cx_b, |editor, cx| editor.move_right(&editor::MoveRight, cx)); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + None + ); + + workspace_b + .update(cx_b, |workspace, cx| { + workspace + .toggle_follow(&ToggleFollow(leader_id), cx) + .unwrap() + }) + .await + .unwrap(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + Some(leader_id) + ); + + // When client B edits, it automatically stops following client A. + editor_b2.update(cx_b, |editor, cx| editor.insert("X", cx)); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + None + ); + + workspace_b + .update(cx_b, |workspace, cx| { + workspace + .toggle_follow(&ToggleFollow(leader_id), cx) + .unwrap() + }) + .await + .unwrap(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + Some(leader_id) + ); + + // When client B scrolls, it automatically stops following client A. + editor_b2.update(cx_b, |editor, cx| { + editor.set_scroll_position(vec2f(0., 3.), cx) + }); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + None + ); + + workspace_b + .update(cx_b, |workspace, cx| { + workspace + .toggle_follow(&ToggleFollow(leader_id), cx) + .unwrap() + }) + .await + .unwrap(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + Some(leader_id) + ); + + // When client B activates a different pane, it continues following client A in the original pane. + workspace_b.update(cx_b, |workspace, cx| { + workspace.split_pane(pane_b.clone(), SplitDirection::Right, cx) + }); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + Some(leader_id) + ); + + workspace_b.update(cx_b, |workspace, cx| workspace.activate_next_pane(cx)); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + Some(leader_id) + ); + + // When client B activates a different item in the original pane, it automatically stops following client A. + workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "2.txt"), cx) + }) + .await + .unwrap(); + assert_eq!( + workspace_b.read_with(cx_b, |workspace, _| workspace.leader_for_pane(&pane_b)), + None + ); + } + + #[gpui::test(iterations = 100)] + async fn test_random_collaboration( + cx: &mut TestAppContext, + deterministic: Arc, + rng: StdRng, + ) { + cx.foreground().forbid_parking(); + let max_peers = env::var("MAX_PEERS") + .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) + .unwrap_or(5); + assert!(max_peers <= 5); + + let max_operations = env::var("OPERATIONS") + .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) + .unwrap_or(10); + + let rng = Arc::new(Mutex::new(rng)); + + let guest_lang_registry = Arc::new(LanguageRegistry::test()); + let host_language_registry = Arc::new(LanguageRegistry::test()); + + let fs = FakeFs::new(cx.background()); + fs.insert_tree( + "/_collab", + json!({ + ".zed.toml": r#"collaborators = ["guest-1", "guest-2", "guest-3", "guest-4"]"# + }), + ) + .await; + + let mut server = TestServer::start(cx.foreground(), cx.background()).await; + let mut clients = Vec::new(); + let mut user_ids = Vec::new(); + let mut op_start_signals = Vec::new(); + let files = Arc::new(Mutex::new(Vec::new())); + + let mut next_entity_id = 100000; + let mut host_cx = TestAppContext::new( + cx.foreground_platform(), + cx.platform(), + deterministic.build_foreground(next_entity_id), + deterministic.build_background(), + cx.font_cache(), + cx.leak_detector(), + next_entity_id, + ); + let host = server.create_client(&mut host_cx, "host").await; + let host_project = host_cx.update(|cx| { + Project::local( + host.client.clone(), + host.user_store.clone(), + host_language_registry.clone(), + fs.clone(), + cx, + ) + }); + let host_project_id = host_project + .update(&mut host_cx, |p, _| p.next_remote_id()) + .await; + + let (collab_worktree, _) = host_project + .update(&mut host_cx, |project, cx| { + project.find_or_create_local_worktree("/_collab", true, cx) + }) + .await + .unwrap(); + collab_worktree + .read_with(&host_cx, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + host_project + .update(&mut host_cx, |project, cx| project.share(cx)) + .await + .unwrap(); + + // Set up fake language servers. + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + None, + ); + let _fake_servers = language.set_fake_lsp_adapter(FakeLspAdapter { + name: "the-fake-language-server", + capabilities: lsp::LanguageServer::full_capabilities(), + initializer: Some(Box::new({ + let rng = rng.clone(); + let files = files.clone(); + let project = host_project.downgrade(); + move |fake_server: &mut FakeLanguageServer| { + fake_server.handle_request::( + |_, _| async move { + Ok(Some(lsp::CompletionResponse::Array(vec![ + lsp::CompletionItem { + text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { + range: lsp::Range::new( + lsp::Position::new(0, 0), + lsp::Position::new(0, 0), + ), + new_text: "the-new-text".to_string(), + })), + ..Default::default() + }, + ]))) + }, + ); + + fake_server.handle_request::( + |_, _| async move { + Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( + lsp::CodeAction { + title: "the-code-action".to_string(), + ..Default::default() + }, + )])) + }, + ); + + fake_server.handle_request::( + |params, _| async move { + Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( + params.position, + params.position, + )))) + }, + ); + + fake_server.handle_request::({ + let files = files.clone(); + let rng = rng.clone(); + move |_, _| { + let files = files.clone(); + let rng = rng.clone(); + async move { + let files = files.lock(); + let mut rng = rng.lock(); + let count = rng.gen_range::(1..3); + let files = (0..count) + .map(|_| files.choose(&mut *rng).unwrap()) + .collect::>(); + log::info!("LSP: Returning definitions in files {:?}", &files); + Ok(Some(lsp::GotoDefinitionResponse::Array( + files + .into_iter() + .map(|file| lsp::Location { + uri: lsp::Url::from_file_path(file).unwrap(), + range: Default::default(), + }) + .collect(), + ))) + } + } + }); + + fake_server.handle_request::({ + let rng = rng.clone(); + let project = project.clone(); + move |params, mut cx| { + let highlights = if let Some(project) = project.upgrade(&cx) { + project.update(&mut cx, |project, cx| { + let path = params + .text_document_position_params + .text_document + .uri + .to_file_path() + .unwrap(); + let (worktree, relative_path) = + project.find_local_worktree(&path, cx)?; + let project_path = + ProjectPath::from((worktree.read(cx).id(), relative_path)); + let buffer = + project.get_open_buffer(&project_path, cx)?.read(cx); + + let mut highlights = Vec::new(); + let highlight_count = rng.lock().gen_range(1..=5); + let mut prev_end = 0; + for _ in 0..highlight_count { + let range = + buffer.random_byte_range(prev_end, &mut *rng.lock()); + + highlights.push(lsp::DocumentHighlight { + range: range_to_lsp(range.to_point_utf16(buffer)), + kind: Some(lsp::DocumentHighlightKind::READ), + }); + prev_end = range.end; + } + Some(highlights) + }) + } else { + None + }; + async move { Ok(highlights) } + } + }); + } + })), + ..Default::default() + }); + host_language_registry.add(Arc::new(language)); + + let op_start_signal = futures::channel::mpsc::unbounded(); + user_ids.push(host.current_user_id(&host_cx)); + op_start_signals.push(op_start_signal.0); + clients.push(host_cx.foreground().spawn(host.simulate_host( + host_project, + files, + op_start_signal.1, + rng.clone(), + host_cx, + ))); + + let disconnect_host_at = if rng.lock().gen_bool(0.2) { + rng.lock().gen_range(0..max_operations) + } else { + max_operations + }; + let mut available_guests = vec![ + "guest-1".to_string(), + "guest-2".to_string(), + "guest-3".to_string(), + "guest-4".to_string(), + ]; + let mut operations = 0; + while operations < max_operations { + if operations == disconnect_host_at { + server.disconnect_client(user_ids[0]); + cx.foreground().advance_clock(RECEIVE_TIMEOUT); + drop(op_start_signals); + let mut clients = futures::future::join_all(clients).await; + cx.foreground().run_until_parked(); + + let (host, mut host_cx, host_err) = clients.remove(0); + if let Some(host_err) = host_err { + log::error!("host error - {}", host_err); + } + host.project + .as_ref() + .unwrap() + .read_with(&host_cx, |project, _| assert!(!project.is_shared())); + for (guest, mut guest_cx, guest_err) in clients { + if let Some(guest_err) = guest_err { + log::error!("{} error - {}", guest.username, guest_err); + } + let contacts = server + .store + .read() + .await + .contacts_for_user(guest.current_user_id(&guest_cx)); + assert!(!contacts + .iter() + .flat_map(|contact| &contact.projects) + .any(|project| project.id == host_project_id)); + guest + .project + .as_ref() + .unwrap() + .read_with(&guest_cx, |project, _| assert!(project.is_read_only())); + guest_cx.update(|_| drop(guest)); + } + host_cx.update(|_| drop(host)); + + return; + } + + let distribution = rng.lock().gen_range(0..100); + match distribution { + 0..=19 if !available_guests.is_empty() => { + let guest_ix = rng.lock().gen_range(0..available_guests.len()); + let guest_username = available_guests.remove(guest_ix); + log::info!("Adding new connection for {}", guest_username); + next_entity_id += 100000; + let mut guest_cx = TestAppContext::new( + cx.foreground_platform(), + cx.platform(), + deterministic.build_foreground(next_entity_id), + deterministic.build_background(), + cx.font_cache(), + cx.leak_detector(), + next_entity_id, + ); + let guest = server.create_client(&mut guest_cx, &guest_username).await; + let guest_project = Project::remote( + host_project_id, + guest.client.clone(), + guest.user_store.clone(), + guest_lang_registry.clone(), + FakeFs::new(cx.background()), + &mut guest_cx.to_async(), + ) + .await + .unwrap(); + let op_start_signal = futures::channel::mpsc::unbounded(); + user_ids.push(guest.current_user_id(&guest_cx)); + op_start_signals.push(op_start_signal.0); + clients.push(guest_cx.foreground().spawn(guest.simulate_guest( + guest_username.clone(), + guest_project, + op_start_signal.1, + rng.clone(), + guest_cx, + ))); + + log::info!("Added connection for {}", guest_username); + operations += 1; + } + 20..=29 if clients.len() > 1 => { + log::info!("Removing guest"); + let guest_ix = rng.lock().gen_range(1..clients.len()); + let removed_guest_id = user_ids.remove(guest_ix); + let guest = clients.remove(guest_ix); + op_start_signals.remove(guest_ix); + server.disconnect_client(removed_guest_id); + cx.foreground().advance_clock(RECEIVE_TIMEOUT); + let (guest, mut guest_cx, guest_err) = guest.await; + if let Some(guest_err) = guest_err { + log::error!("{} error - {}", guest.username, guest_err); + } + guest + .project + .as_ref() + .unwrap() + .read_with(&guest_cx, |project, _| assert!(project.is_read_only())); + for user_id in &user_ids { + for contact in server.store.read().await.contacts_for_user(*user_id) { + assert_ne!( + contact.user_id, removed_guest_id.0 as u64, + "removed guest is still a contact of another peer" + ); + for project in contact.projects { + for project_guest_id in project.guests { + assert_ne!( + project_guest_id, removed_guest_id.0 as u64, + "removed guest appears as still participating on a project" + ); + } + } + } + } + + log::info!("{} removed", guest.username); + available_guests.push(guest.username.clone()); + guest_cx.update(|_| drop(guest)); + + operations += 1; + } + _ => { + while operations < max_operations && rng.lock().gen_bool(0.7) { + op_start_signals + .choose(&mut *rng.lock()) + .unwrap() + .unbounded_send(()) + .unwrap(); + operations += 1; + } + + if rng.lock().gen_bool(0.8) { + cx.foreground().run_until_parked(); + } + } + } + } + + drop(op_start_signals); + let mut clients = futures::future::join_all(clients).await; + cx.foreground().run_until_parked(); + + let (host_client, mut host_cx, host_err) = clients.remove(0); + if let Some(host_err) = host_err { + panic!("host error - {}", host_err); + } + let host_project = host_client.project.as_ref().unwrap(); + let host_worktree_snapshots = host_project.read_with(&host_cx, |project, cx| { + project + .worktrees(cx) + .map(|worktree| { + let snapshot = worktree.read(cx).snapshot(); + (snapshot.id(), snapshot) + }) + .collect::>() + }); + + host_client + .project + .as_ref() + .unwrap() + .read_with(&host_cx, |project, cx| project.check_invariants(cx)); + + for (guest_client, mut guest_cx, guest_err) in clients.into_iter() { + if let Some(guest_err) = guest_err { + panic!("{} error - {}", guest_client.username, guest_err); + } + let worktree_snapshots = + guest_client + .project + .as_ref() + .unwrap() + .read_with(&guest_cx, |project, cx| { + project + .worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + (worktree.id(), worktree.snapshot()) + }) + .collect::>() + }); + + assert_eq!( + worktree_snapshots.keys().collect::>(), + host_worktree_snapshots.keys().collect::>(), + "{} has different worktrees than the host", + guest_client.username + ); + for (id, host_snapshot) in &host_worktree_snapshots { + let guest_snapshot = &worktree_snapshots[id]; + assert_eq!( + guest_snapshot.root_name(), + host_snapshot.root_name(), + "{} has different root name than the host for worktree {}", + guest_client.username, + id + ); + assert_eq!( + guest_snapshot.entries(false).collect::>(), + host_snapshot.entries(false).collect::>(), + "{} has different snapshot than the host for worktree {}", + guest_client.username, + id + ); + } + + guest_client + .project + .as_ref() + .unwrap() + .read_with(&guest_cx, |project, cx| project.check_invariants(cx)); + + for guest_buffer in &guest_client.buffers { + let buffer_id = guest_buffer.read_with(&guest_cx, |buffer, _| buffer.remote_id()); + let host_buffer = host_project.read_with(&host_cx, |project, cx| { + project.buffer_for_id(buffer_id, cx).expect(&format!( + "host does not have buffer for guest:{}, peer:{}, id:{}", + guest_client.username, guest_client.peer_id, buffer_id + )) + }); + let path = host_buffer + .read_with(&host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + + assert_eq!( + guest_buffer.read_with(&guest_cx, |buffer, _| buffer.deferred_ops_len()), + 0, + "{}, buffer {}, path {:?} has deferred operations", + guest_client.username, + buffer_id, + path, + ); + assert_eq!( + guest_buffer.read_with(&guest_cx, |buffer, _| buffer.text()), + host_buffer.read_with(&host_cx, |buffer, _| buffer.text()), + "{}, buffer {}, path {:?}, differs from the host's buffer", + guest_client.username, + buffer_id, + path + ); + } + + guest_cx.update(|_| drop(guest_client)); + } + + host_cx.update(|_| drop(host_client)); + } + + struct TestServer { + peer: Arc, + app_state: Arc, + server: Arc, + foreground: Rc, + notifications: mpsc::UnboundedReceiver<()>, + connection_killers: Arc>>>, + forbid_connections: Arc, + _test_db: TestDb, + } + + impl TestServer { + async fn start( + foreground: Rc, + background: Arc, + ) -> Self { + let test_db = TestDb::fake(background); + let app_state = Self::build_app_state(&test_db).await; + let peer = Peer::new(); + let notifications = mpsc::unbounded(); + let server = Server::new(app_state.clone(), Some(notifications.0)); + Self { + peer, + app_state, + server, + foreground, + notifications: notifications.1, + connection_killers: Default::default(), + forbid_connections: Default::default(), + _test_db: test_db, + } + } + + async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient { + cx.update(|cx| { + let settings = Settings::test(cx); + cx.set_global(settings); + }); + + let http = FakeHttpClient::with_404_response(); + let user_id = self.app_state.db.create_user(name, false).await.unwrap(); + let client_name = name.to_string(); + let mut client = Client::new(http.clone()); + let server = self.server.clone(); + let connection_killers = self.connection_killers.clone(); + let forbid_connections = self.forbid_connections.clone(); + let (connection_id_tx, mut connection_id_rx) = mpsc::channel(16); + + Arc::get_mut(&mut client) + .unwrap() + .override_authenticate(move |cx| { + cx.spawn(|_| async move { + let access_token = "the-token".to_string(); + Ok(Credentials { + user_id: user_id.0 as u64, + access_token, + }) + }) + }) + .override_establish_connection(move |credentials, cx| { + assert_eq!(credentials.user_id, user_id.0 as u64); + assert_eq!(credentials.access_token, "the-token"); + + let server = server.clone(); + let connection_killers = connection_killers.clone(); + let forbid_connections = forbid_connections.clone(); + let client_name = client_name.clone(); + let connection_id_tx = connection_id_tx.clone(); + cx.spawn(move |cx| async move { + if forbid_connections.load(SeqCst) { + Err(EstablishConnectionError::other(anyhow!( + "server is forbidding connections" + ))) + } else { + let (client_conn, server_conn, killed) = + Connection::in_memory(cx.background()); + connection_killers.lock().insert(user_id, killed); + cx.background() + .spawn(server.handle_connection( + server_conn, + client_name, + user_id, + Some(connection_id_tx), + cx.background(), + )) + .detach(); + Ok(client_conn) + } + }) + }); + + client + .authenticate_and_connect(false, &cx.to_async()) + .await + .unwrap(); + + Channel::init(&client); + Project::init(&client); + cx.update(|cx| { + workspace::init(&client, cx); + }); + + let peer_id = PeerId(connection_id_rx.next().await.unwrap().0); + let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx)); + + let client = TestClient { + client, + peer_id, + username: name.to_string(), + user_store, + language_registry: Arc::new(LanguageRegistry::test()), + project: Default::default(), + buffers: Default::default(), + }; + client.wait_for_current_user(cx).await; + client + } + + fn disconnect_client(&self, user_id: UserId) { + self.connection_killers + .lock() + .remove(&user_id) + .unwrap() + .store(true, SeqCst); + } + + fn forbid_connections(&self) { + self.forbid_connections.store(true, SeqCst); + } + + fn allow_connections(&self) { + self.forbid_connections.store(false, SeqCst); + } + + async fn build_app_state(test_db: &TestDb) -> Arc { + Arc::new(AppState { + db: test_db.db().clone(), + api_token: Default::default(), + }) + } + + async fn state<'a>(&'a self) -> RwLockReadGuard<'a, Store> { + self.server.store.read().await + } + + async fn condition(&mut self, mut predicate: F) + where + F: FnMut(&Store) -> bool, + { + assert!( + self.foreground.parking_forbidden(), + "you must call forbid_parking to use server conditions so we don't block indefinitely" + ); + while !(predicate)(&*self.server.store.read().await) { + self.foreground.start_waiting(); + self.notifications.next().await; + self.foreground.finish_waiting(); + } + } + } + + impl Deref for TestServer { + type Target = Server; + + fn deref(&self) -> &Self::Target { + &self.server + } + } + + impl Drop for TestServer { + fn drop(&mut self) { + self.peer.reset(); + } + } + + struct TestClient { + client: Arc, + username: String, + pub peer_id: PeerId, + pub user_store: ModelHandle, + language_registry: Arc, + project: Option>, + buffers: HashSet>, + } + + impl Deref for TestClient { + type Target = Arc; + + fn deref(&self) -> &Self::Target { + &self.client + } + } + + impl TestClient { + pub fn current_user_id(&self, cx: &TestAppContext) -> UserId { + UserId::from_proto( + self.user_store + .read_with(cx, |user_store, _| user_store.current_user().unwrap().id), + ) + } + + async fn wait_for_current_user(&self, cx: &TestAppContext) { + let mut authed_user = self + .user_store + .read_with(cx, |user_store, _| user_store.watch_current_user()); + while authed_user.next().await.unwrap().is_none() {} + } + + async fn build_local_project( + &mut self, + fs: Arc, + root_path: impl AsRef, + cx: &mut TestAppContext, + ) -> (ModelHandle, WorktreeId) { + let project = cx.update(|cx| { + Project::local( + self.client.clone(), + self.user_store.clone(), + self.language_registry.clone(), + fs, + cx, + ) + }); + self.project = Some(project.clone()); + let (worktree, _) = project + .update(cx, |p, cx| { + p.find_or_create_local_worktree(root_path, true, cx) + }) + .await + .unwrap(); + worktree + .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete()) + .await; + project + .update(cx, |project, _| project.next_remote_id()) + .await; + (project, worktree.read_with(cx, |tree, _| tree.id())) + } + + async fn build_remote_project( + &mut self, + project_id: u64, + cx: &mut TestAppContext, + ) -> ModelHandle { + let project = Project::remote( + project_id, + self.client.clone(), + self.user_store.clone(), + self.language_registry.clone(), + FakeFs::new(cx.background()), + &mut cx.to_async(), + ) + .await + .unwrap(); + self.project = Some(project.clone()); + project + } + + fn build_workspace( + &self, + project: &ModelHandle, + cx: &mut TestAppContext, + ) -> ViewHandle { + let (window_id, _) = cx.add_window(|_| EmptyView); + cx.add_view(window_id, |cx| { + let fs = project.read(cx).fs().clone(); + Workspace::new( + &WorkspaceParams { + fs, + project: project.clone(), + user_store: self.user_store.clone(), + languages: self.language_registry.clone(), + themes: ThemeRegistry::new((), cx.font_cache().clone()), + channel_list: cx.add_model(|cx| { + ChannelList::new(self.user_store.clone(), self.client.clone(), cx) + }), + client: self.client.clone(), + }, + cx, + ) + }) + } + + async fn simulate_host( + mut self, + project: ModelHandle, + files: Arc>>, + op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, + rng: Arc>, + mut cx: TestAppContext, + ) -> (Self, TestAppContext, Option) { + async fn simulate_host_internal( + client: &mut TestClient, + project: ModelHandle, + files: Arc>>, + mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, + rng: Arc>, + cx: &mut TestAppContext, + ) -> anyhow::Result<()> { + let fs = project.read_with(cx, |project, _| project.fs().clone()); + + while op_start_signal.next().await.is_some() { + let distribution = rng.lock().gen_range::(0..100); + match distribution { + 0..=20 if !files.lock().is_empty() => { + let path = files.lock().choose(&mut *rng.lock()).unwrap().clone(); + let mut path = path.as_path(); + while let Some(parent_path) = path.parent() { + path = parent_path; + if rng.lock().gen() { + break; + } + } + + log::info!("Host: find/create local worktree {:?}", path); + let find_or_create_worktree = project.update(cx, |project, cx| { + project.find_or_create_local_worktree(path, true, cx) + }); + if rng.lock().gen() { + cx.background().spawn(find_or_create_worktree).detach(); + } else { + find_or_create_worktree.await?; + } + } + 10..=80 if !files.lock().is_empty() => { + let buffer = if client.buffers.is_empty() || rng.lock().gen() { + let file = files.lock().choose(&mut *rng.lock()).unwrap().clone(); + let (worktree, path) = project + .update(cx, |project, cx| { + project.find_or_create_local_worktree( + file.clone(), + true, + cx, + ) + }) + .await?; + let project_path = + worktree.read_with(cx, |worktree, _| (worktree.id(), path)); + log::info!( + "Host: opening path {:?}, worktree {}, relative_path {:?}", + file, + project_path.0, + project_path.1 + ); + let buffer = project + .update(cx, |project, cx| project.open_buffer(project_path, cx)) + .await + .unwrap(); + client.buffers.insert(buffer.clone()); + buffer + } else { + client + .buffers + .iter() + .choose(&mut *rng.lock()) + .unwrap() + .clone() + }; + + if rng.lock().gen_bool(0.1) { + cx.update(|cx| { + log::info!( + "Host: dropping buffer {:?}", + buffer.read(cx).file().unwrap().full_path(cx) + ); + client.buffers.remove(&buffer); + drop(buffer); + }); + } else { + buffer.update(cx, |buffer, cx| { + log::info!( + "Host: updating buffer {:?} ({})", + buffer.file().unwrap().full_path(cx), + buffer.remote_id() + ); + + if rng.lock().gen_bool(0.7) { + buffer.randomly_edit(&mut *rng.lock(), 5, cx); + } else { + buffer.randomly_undo_redo(&mut *rng.lock(), cx); + } + }); + } + } + _ => loop { + let path_component_count = rng.lock().gen_range::(1..=5); + let mut path = PathBuf::new(); + path.push("/"); + for _ in 0..path_component_count { + let letter = rng.lock().gen_range(b'a'..=b'z'); + path.push(std::str::from_utf8(&[letter]).unwrap()); + } + path.set_extension("rs"); + let parent_path = path.parent().unwrap(); + + log::info!("Host: creating file {:?}", path,); + + if fs.create_dir(&parent_path).await.is_ok() + && fs.create_file(&path, Default::default()).await.is_ok() + { + files.lock().push(path); + break; + } else { + log::info!("Host: cannot create file"); + } + }, + } + + cx.background().simulate_random_delay().await; + } + + Ok(()) + } + + let result = simulate_host_internal( + &mut self, + project.clone(), + files, + op_start_signal, + rng, + &mut cx, + ) + .await; + log::info!("Host done"); + self.project = Some(project); + (self, cx, result.err()) + } + + pub async fn simulate_guest( + mut self, + guest_username: String, + project: ModelHandle, + op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, + rng: Arc>, + mut cx: TestAppContext, + ) -> (Self, TestAppContext, Option) { + async fn simulate_guest_internal( + client: &mut TestClient, + guest_username: &str, + project: ModelHandle, + mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, + rng: Arc>, + cx: &mut TestAppContext, + ) -> anyhow::Result<()> { + while op_start_signal.next().await.is_some() { + let buffer = if client.buffers.is_empty() || rng.lock().gen() { + let worktree = if let Some(worktree) = + project.read_with(cx, |project, cx| { + project + .worktrees(&cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() + && worktree.entries(false).any(|e| e.is_file()) + }) + .choose(&mut *rng.lock()) + }) { + worktree + } else { + cx.background().simulate_random_delay().await; + continue; + }; + + let (worktree_root_name, project_path) = + worktree.read_with(cx, |worktree, _| { + let entry = worktree + .entries(false) + .filter(|e| e.is_file()) + .choose(&mut *rng.lock()) + .unwrap(); + ( + worktree.root_name().to_string(), + (worktree.id(), entry.path.clone()), + ) + }); + log::info!( + "{}: opening path {:?} in worktree {} ({})", + guest_username, + project_path.1, + project_path.0, + worktree_root_name, + ); + let buffer = project + .update(cx, |project, cx| { + project.open_buffer(project_path.clone(), cx) + }) + .await?; + log::info!( + "{}: opened path {:?} in worktree {} ({}) with buffer id {}", + guest_username, + project_path.1, + project_path.0, + worktree_root_name, + buffer.read_with(cx, |buffer, _| buffer.remote_id()) + ); + client.buffers.insert(buffer.clone()); + buffer + } else { + client + .buffers + .iter() + .choose(&mut *rng.lock()) + .unwrap() + .clone() + }; + + let choice = rng.lock().gen_range(0..100); + match choice { + 0..=9 => { + cx.update(|cx| { + log::info!( + "{}: dropping buffer {:?}", + guest_username, + buffer.read(cx).file().unwrap().full_path(cx) + ); + client.buffers.remove(&buffer); + drop(buffer); + }); + } + 10..=19 => { + let completions = project.update(cx, |project, cx| { + log::info!( + "{}: requesting completions for buffer {} ({:?})", + guest_username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + project.completions(&buffer, offset, cx) + }); + let completions = cx.background().spawn(async move { + completions + .await + .map_err(|err| anyhow!("completions request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching completions request", guest_username); + cx.update(|cx| completions.detach_and_log_err(cx)); + } else { + completions.await?; + } + } + 20..=29 => { + let code_actions = project.update(cx, |project, cx| { + log::info!( + "{}: requesting code actions for buffer {} ({:?})", + guest_username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock()); + project.code_actions(&buffer, range, cx) + }); + let code_actions = cx.background().spawn(async move { + code_actions.await.map_err(|err| { + anyhow!("code actions request failed: {:?}", err) + }) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching code actions request", guest_username); + cx.update(|cx| code_actions.detach_and_log_err(cx)); + } else { + code_actions.await?; + } + } + 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => { + let (requested_version, save) = buffer.update(cx, |buffer, cx| { + log::info!( + "{}: saving buffer {} ({:?})", + guest_username, + buffer.remote_id(), + buffer.file().unwrap().full_path(cx) + ); + (buffer.version(), buffer.save(cx)) + }); + let save = cx.background().spawn(async move { + let (saved_version, _) = save + .await + .map_err(|err| anyhow!("save request failed: {:?}", err))?; + assert!(saved_version.observed_all(&requested_version)); + Ok::<_, anyhow::Error>(()) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching save request", guest_username); + cx.update(|cx| save.detach_and_log_err(cx)); + } else { + save.await?; + } + } + 40..=44 => { + let prepare_rename = project.update(cx, |project, cx| { + log::info!( + "{}: preparing rename for buffer {} ({:?})", + guest_username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + project.prepare_rename(buffer, offset, cx) + }); + let prepare_rename = cx.background().spawn(async move { + prepare_rename.await.map_err(|err| { + anyhow!("prepare rename request failed: {:?}", err) + }) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching prepare rename request", guest_username); + cx.update(|cx| prepare_rename.detach_and_log_err(cx)); + } else { + prepare_rename.await?; + } + } + 45..=49 => { + let definitions = project.update(cx, |project, cx| { + log::info!( + "{}: requesting definitions for buffer {} ({:?})", + guest_username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + project.definition(&buffer, offset, cx) + }); + let definitions = cx.background().spawn(async move { + definitions + .await + .map_err(|err| anyhow!("definitions request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching definitions request", guest_username); + cx.update(|cx| definitions.detach_and_log_err(cx)); + } else { + client + .buffers + .extend(definitions.await?.into_iter().map(|loc| loc.buffer)); + } + } + 50..=54 => { + let highlights = project.update(cx, |project, cx| { + log::info!( + "{}: requesting highlights for buffer {} ({:?})", + guest_username, + buffer.read(cx).remote_id(), + buffer.read(cx).file().unwrap().full_path(cx) + ); + let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + project.document_highlights(&buffer, offset, cx) + }); + let highlights = cx.background().spawn(async move { + highlights + .await + .map_err(|err| anyhow!("highlights request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching highlights request", guest_username); + cx.update(|cx| highlights.detach_and_log_err(cx)); + } else { + highlights.await?; + } + } + 55..=59 => { + let search = project.update(cx, |project, cx| { + let query = rng.lock().gen_range('a'..='z'); + log::info!("{}: project-wide search {:?}", guest_username, query); + project.search(SearchQuery::text(query, false, false), cx) + }); + let search = cx.background().spawn(async move { + search + .await + .map_err(|err| anyhow!("search request failed: {:?}", err)) + }); + if rng.lock().gen_bool(0.3) { + log::info!("{}: detaching search request", guest_username); + cx.update(|cx| search.detach_and_log_err(cx)); + } else { + client.buffers.extend(search.await?.into_keys()); + } + } + _ => { + buffer.update(cx, |buffer, cx| { + log::info!( + "{}: updating buffer {} ({:?})", + guest_username, + buffer.remote_id(), + buffer.file().unwrap().full_path(cx) + ); + if rng.lock().gen_bool(0.7) { + buffer.randomly_edit(&mut *rng.lock(), 5, cx); + } else { + buffer.randomly_undo_redo(&mut *rng.lock(), cx); + } + }); + } + } + cx.background().simulate_random_delay().await; + } + Ok(()) + } + + let result = simulate_guest_internal( + &mut self, + &guest_username, + project.clone(), + op_start_signal, + rng, + &mut cx, + ) + .await; + log::info!("{}: done", guest_username); + + self.project = Some(project); + (self, cx, result.err()) + } + } + + impl Drop for TestClient { + fn drop(&mut self) { + self.client.tear_down(); + } + } + + impl Executor for Arc { + type Sleep = gpui::executor::Timer; + + fn spawn_detached>(&self, future: F) { + self.spawn(future).detach(); + } + + fn sleep(&self, duration: Duration) -> Self::Sleep { + self.as_ref().timer(duration) + } + } + + fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> { + channel + .messages() + .cursor::<()>() + .map(|m| { + ( + m.sender.github_login.clone(), + m.body.clone(), + m.is_pending(), + ) + }) + .collect() + } + + struct EmptyView; + + impl gpui::Entity for EmptyView { + type Event = (); + } + + impl gpui::View for EmptyView { + fn ui_name() -> &'static str { + "empty view" + } + + fn render(&mut self, _: &mut gpui::RenderContext) -> gpui::ElementBox { + gpui::Element::boxed(gpui::elements::Empty) + } + } +} diff --git a/crates/rpc/src/conn.rs b/crates/rpc/src/conn.rs index 53ba00a3c0e16257783a706b80f7bb832d69a4dc..dfb09a09131e1e3ba732b0f07edf96574b5861a1 100644 --- a/crates/rpc/src/conn.rs +++ b/crates/rpc/src/conn.rs @@ -1,14 +1,14 @@ -use async_tungstenite::tungstenite::{Error as WebSocketError, Message as WebSocketMessage}; +use async_tungstenite::tungstenite::Message as WebSocketMessage; use futures::{SinkExt as _, StreamExt as _}; pub struct Connection { pub(crate) tx: - Box>, + Box>, pub(crate) rx: Box< dyn 'static + Send + Unpin - + futures::Stream>, + + futures::Stream>, >, } @@ -18,8 +18,8 @@ impl Connection { S: 'static + Send + Unpin - + futures::Sink - + futures::Stream>, + + futures::Sink + + futures::Stream>, { let (tx, rx) = stream.split(); Self { @@ -28,7 +28,7 @@ impl Connection { } } - pub async fn send(&mut self, message: WebSocketMessage) -> Result<(), WebSocketError> { + pub async fn send(&mut self, message: WebSocketMessage) -> Result<(), anyhow::Error> { self.tx.send(message).await } @@ -54,40 +54,37 @@ impl Connection { killed: Arc, executor: Arc, ) -> ( - Box>, - Box< - dyn Send + Unpin + futures::Stream>, - >, + Box>, + Box>>, ) { + use anyhow::anyhow; use futures::channel::mpsc; use std::io::{Error, ErrorKind}; let (tx, rx) = mpsc::unbounded::(); - let tx = tx - .sink_map_err(|e| WebSocketError::from(Error::new(ErrorKind::Other, e))) - .with({ + let tx = tx.sink_map_err(|error| anyhow!(error)).with({ + let killed = killed.clone(); + let executor = Arc::downgrade(&executor); + move |msg| { let killed = killed.clone(); - let executor = Arc::downgrade(&executor); - move |msg| { - let killed = killed.clone(); - let executor = executor.clone(); - Box::pin(async move { - if let Some(executor) = executor.upgrade() { - executor.simulate_random_delay().await; - } + let executor = executor.clone(); + Box::pin(async move { + if let Some(executor) = executor.upgrade() { + executor.simulate_random_delay().await; + } - // Writes to a half-open TCP connection will error. - if killed.load(SeqCst) { - std::io::Result::Err( - Error::new(ErrorKind::Other, "connection lost").into(), - )?; - } + // Writes to a half-open TCP connection will error. + if killed.load(SeqCst) { + std::io::Result::Err( + Error::new(ErrorKind::Other, "connection lost").into(), + )?; + } - Ok(msg) - }) - } - }); + Ok(msg) + }) + } + }); let rx = rx.then({ let killed = killed.clone(); diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index a9f6b80f8e8d7895d705657d023f2b95a7c073a9..2d3bf639f415334664181ada72832051360ec08c 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -1,6 +1,6 @@ use super::{ConnectionId, PeerId, TypedEnvelope}; -use anyhow::Result; -use async_tungstenite::tungstenite::{Error as WebSocketError, Message as WebSocketMessage}; +use anyhow::{anyhow, Result}; +use async_tungstenite::tungstenite::Message as WebSocketMessage; use futures::{SinkExt as _, StreamExt as _}; use prost::Message as _; use std::any::{Any, TypeId}; @@ -318,9 +318,9 @@ impl MessageStream { impl MessageStream where - S: futures::Sink + Unpin, + S: futures::Sink + Unpin, { - pub async fn write(&mut self, message: Message) -> Result<(), WebSocketError> { + pub async fn write(&mut self, message: Message) -> Result<(), anyhow::Error> { #[cfg(any(test, feature = "test-support"))] const COMPRESSION_LEVEL: i32 = -7; @@ -357,9 +357,9 @@ where impl MessageStream where - S: futures::Stream> + Unpin, + S: futures::Stream> + Unpin, { - pub async fn read(&mut self) -> Result { + pub async fn read(&mut self) -> Result { while let Some(bytes) = self.stream.next().await { match bytes? { WebSocketMessage::Binary(bytes) => { @@ -375,7 +375,7 @@ where _ => {} } } - Err(WebSocketError::ConnectionClosed) + Err(anyhow!("connection closed")) } }