Cargo.lock 🔗
@@ -7539,6 +7539,7 @@ dependencies = [
name = "gpui_tokio"
version = "0.1.0"
dependencies = [
+ "anyhow",
"gpui",
"tokio",
"util",
Julia Ryan , Conrad Irwin , and Marshall Bowers created
This was causing panics due to the handles being dropped out of order.
It doesn't seem possible to guarantee the correct drop ordering given
that we're holding them over await points, so lets just spawn on the
tokio executor itself which gives us access to the state we needed those
handles for in the first place.
Fixes: ZED-1R
Release Notes:
- N/A
Co-authored-by: Conrad Irwin <conrad.irwin@gmail.com>
Co-authored-by: Marshall Bowers <git@maxdeviant.com>
Cargo.lock | 1
crates/client/src/client.rs | 26 ++++++++++--------
crates/cloud_api_client/src/cloud_api_client.rs | 8 -----
crates/gpui_tokio/Cargo.toml | 1
crates/gpui_tokio/src/gpui_tokio.rs | 22 ++++++++++++++++
5 files changed, 39 insertions(+), 19 deletions(-)
@@ -7539,6 +7539,7 @@ dependencies = [
name = "gpui_tokio"
version = "0.1.0"
dependencies = [
+ "anyhow",
"gpui",
"tokio",
"util",
@@ -1290,19 +1290,21 @@ impl Client {
"http" => Http,
_ => Err(anyhow!("invalid rpc url: {}", rpc_url))?,
};
- let rpc_host = rpc_url
- .host_str()
- .zip(rpc_url.port_or_known_default())
- .context("missing host in rpc url")?;
-
- let stream = {
- let handle = cx.update(|cx| gpui_tokio::Tokio::handle(cx)).ok().unwrap();
- let _guard = handle.enter();
- match proxy {
- Some(proxy) => connect_proxy_stream(&proxy, rpc_host).await?,
- None => Box::new(TcpStream::connect(rpc_host).await?),
+
+ let stream = gpui_tokio::Tokio::spawn_result(cx, {
+ let rpc_url = rpc_url.clone();
+ async move {
+ let rpc_host = rpc_url
+ .host_str()
+ .zip(rpc_url.port_or_known_default())
+ .context("missing host in rpc url")?;
+ Ok(match proxy {
+ Some(proxy) => connect_proxy_stream(&proxy, rpc_host).await?,
+ None => Box::new(TcpStream::connect(rpc_host).await?),
+ })
}
- };
+ })?
+ .await?;
log::info!("connected to rpc endpoint {}", rpc_url);
@@ -102,13 +102,7 @@ impl CloudApiClient {
let credentials = credentials.as_ref().context("no credentials provided")?;
let authorization_header = format!("{} {}", credentials.user_id, credentials.access_token);
- Ok(cx.spawn(async move |cx| {
- let handle = cx
- .update(|cx| Tokio::handle(cx))
- .ok()
- .context("failed to get Tokio handle")?;
- let _guard = handle.enter();
-
+ Ok(Tokio::spawn_result(cx, async move {
let ws = WebSocket::connect(connect_url)
.with_request(
request::Builder::new()
@@ -13,6 +13,7 @@ path = "src/gpui_tokio.rs"
doctest = false
[dependencies]
+anyhow.workspace = true
util.workspace = true
gpui.workspace = true
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
@@ -52,6 +52,28 @@ impl Tokio {
})
}
+ /// Spawns the given future on Tokio's thread pool, and returns it via a GPUI task
+ /// Note that the Tokio task will be cancelled if the GPUI task is dropped
+ pub fn spawn_result<C, Fut, R>(cx: &C, f: Fut) -> C::Result<Task<anyhow::Result<R>>>
+ where
+ C: AppContext,
+ Fut: Future<Output = anyhow::Result<R>> + Send + 'static,
+ R: Send + 'static,
+ {
+ cx.read_global(|tokio: &GlobalTokio, cx| {
+ let join_handle = tokio.runtime.spawn(f);
+ let abort_handle = join_handle.abort_handle();
+ let cancel = defer(move || {
+ abort_handle.abort();
+ });
+ cx.background_spawn(async move {
+ let result = join_handle.await?;
+ drop(cancel);
+ result
+ })
+ })
+ }
+
pub fn handle(cx: &App) -> tokio::runtime::Handle {
GlobalTokio::global(cx).runtime.handle().clone()
}