Detailed changes
@@ -13870,6 +13870,7 @@ dependencies = [
"toml 0.8.23",
"unindent",
"util",
+ "uuid",
"watch",
"windows 0.61.3",
"workspace",
@@ -13926,6 +13927,7 @@ dependencies = [
"nbformat",
"picker",
"project",
+ "remote",
"runtimelib",
"serde",
"serde_json",
@@ -472,7 +472,11 @@ message Envelope {
GetFoldingRangesResponse get_folding_ranges_response = 422;
GetRemoteProfilingData get_remote_profiling_data = 423;
- GetRemoteProfilingDataResponse get_remote_profiling_data_response = 424; // current max
+ GetRemoteProfilingDataResponse get_remote_profiling_data_response = 424;
+
+ SpawnKernel spawn_kernel = 426;
+ SpawnKernelResponse spawn_kernel_response = 427;
+ KillKernel kill_kernel = 428; // current max
}
reserved 87 to 88;
@@ -546,3 +550,21 @@ message FlushBufferedMessages {}
message FlushBufferedMessagesResponse {}
message RemoteStarted {}
+
+message SpawnKernel {
+ string kernel_name = 1;
+ string working_directory = 2;
+ uint64 project_id = 3;
+ string command = 4;
+ repeated string args = 5;
+}
+
+message SpawnKernelResponse {
+ string kernel_id = 1;
+ string connection_file = 2;
+}
+
+message KillKernel {
+ string kernel_id = 1;
+ uint64 project_id = 2;
+}
@@ -359,6 +359,9 @@ messages!(
(GetSharedAgentThreadResponse, Foreground),
(FindSearchCandidatesChunk, Background),
(FindSearchCandidatesCancelled, Background),
+ (SpawnKernel, Background),
+ (SpawnKernelResponse, Background),
+ (KillKernel, Background),
(GetRemoteProfilingData, Background),
(GetRemoteProfilingDataResponse, Background),
);
@@ -557,6 +560,8 @@ request_messages!(
(TrustWorktrees, Ack),
(RestrictWorktrees, Ack),
(FindSearchCandidatesChunk, Ack),
+ (SpawnKernel, SpawnKernelResponse),
+ (KillKernel, Ack),
(GetRemoteProfilingData, GetRemoteProfilingDataResponse),
);
@@ -624,6 +629,8 @@ entity_messages!(
SemanticTokens,
JoinProject,
LeaveProject,
+ SpawnKernel,
+ KillKernel,
LinkedEditingRange,
LoadCommitDiff,
LspQuery,
@@ -7,7 +7,7 @@ mod transport;
#[cfg(target_os = "windows")]
pub use remote_client::OpenWslPath;
pub use remote_client::{
- ConnectionIdentifier, ConnectionState, Interactive, RemoteArch, RemoteClient,
+ CommandTemplate, ConnectionIdentifier, ConnectionState, Interactive, RemoteArch, RemoteClient,
RemoteClientDelegate, RemoteClientEvent, RemoteConnection, RemoteConnectionOptions, RemoteOs,
RemotePlatform, connect,
};
@@ -72,6 +72,7 @@ watch.workspace = true
worktree.workspace = true
thiserror.workspace = true
rayon.workspace = true
+uuid = { workspace = true, features = ["v4"] }
[target.'cfg(not(windows))'.dependencies]
crash-handler.workspace = true
@@ -1,5 +1,6 @@
use anyhow::{Context as _, Result, anyhow};
use client::ProjectId;
+use collections::HashMap;
use collections::HashSet;
use language::File;
use lsp::LanguageServerId;
@@ -31,6 +32,7 @@ use rpc::{
AnyProtoClient, TypedEnvelope,
proto::{self, REMOTE_SERVER_PEER_ID, REMOTE_SERVER_PROJECT_ID},
};
+use smol::process::Child;
use settings::initial_server_settings_content;
use std::{
@@ -67,6 +69,7 @@ pub struct HeadlessProject {
// Used mostly to keep alive the toolchain store for RPC handlers.
// Local variant is used within LSP store, but that's a separate entity.
pub _toolchain_store: Entity<ToolchainStore>,
+ pub kernels: HashMap<String, Child>,
}
pub struct HeadlessAppState {
@@ -319,6 +322,9 @@ impl HeadlessProject {
HeadlessExtensionStore::handle_install_extension,
);
+ session.add_request_handler(cx.weak_entity(), Self::handle_spawn_kernel);
+ session.add_request_handler(cx.weak_entity(), Self::handle_kill_kernel);
+
BufferStore::init(&session);
WorktreeStore::init(&session);
SettingsObserver::init(&session);
@@ -351,6 +357,7 @@ impl HeadlessProject {
environment,
profiling_collector: gpui::ProfilingCollector::new(startup_time),
_toolchain_store: toolchain_store,
+ kernels: Default::default(),
}
}
@@ -895,6 +902,131 @@ impl HeadlessProject {
})
}
+ async fn handle_spawn_kernel(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::SpawnKernel>,
+ cx: AsyncApp,
+ ) -> Result<proto::SpawnKernelResponse> {
+ let fs = this.update(&mut cx.clone(), |this, _| this.fs.clone());
+
+ let mut ports = Vec::new();
+ for _ in 0..5 {
+ let listener = std::net::TcpListener::bind("127.0.0.1:0")?;
+ let port = listener.local_addr()?.port();
+ ports.push(port);
+ }
+
+ let connection_info = serde_json::json!({
+ "shell_port": ports[0],
+ "iopub_port": ports[1],
+ "stdin_port": ports[2],
+ "control_port": ports[3],
+ "hb_port": ports[4],
+ "ip": "127.0.0.1",
+ "key": uuid::Uuid::new_v4().to_string(),
+ "transport": "tcp",
+ "signature_scheme": "hmac-sha256",
+ "kernel_name": envelope.payload.kernel_name,
+ });
+
+ let connection_file_content = serde_json::to_string_pretty(&connection_info)?;
+ let kernel_id = uuid::Uuid::new_v4().to_string();
+
+ let connection_file_path = std::env::temp_dir().join(format!("kernel-{}.json", kernel_id));
+ fs.save(
+ &connection_file_path,
+ &connection_file_content.as_str().into(),
+ language::LineEnding::Unix,
+ )
+ .await?;
+
+ let working_directory = if envelope.payload.working_directory.is_empty() {
+ std::env::current_dir()
+ .ok()
+ .map(|p| p.to_string_lossy().into_owned())
+ } else {
+ Some(envelope.payload.working_directory)
+ };
+
+ // Spawn kernel (Assuming python for now, or we'd need to parse kernelspec logic here or pass the command)
+
+ // Spawn kernel
+ let spawn_kernel = |binary: &str, args: &[String]| {
+ let mut command = smol::process::Command::new(binary);
+
+ if !args.is_empty() {
+ for arg in args {
+ if arg == "{connection_file}" {
+ command.arg(&connection_file_path);
+ } else {
+ command.arg(arg);
+ }
+ }
+ } else {
+ command
+ .arg("-m")
+ .arg("ipykernel_launcher")
+ .arg("-f")
+ .arg(&connection_file_path);
+ }
+
+ // This ensures subprocesses spawned from the kernel use the correct Python environment
+ let python_bin_dir = std::path::Path::new(binary).parent();
+ if let Some(bin_dir) = python_bin_dir {
+ if let Some(path_var) = std::env::var_os("PATH") {
+ let mut paths = std::env::split_paths(&path_var).collect::<Vec<_>>();
+ paths.insert(0, bin_dir.to_path_buf());
+ if let Ok(new_path) = std::env::join_paths(paths) {
+ command.env("PATH", new_path);
+ }
+ }
+
+ if let Some(venv_root) = bin_dir.parent() {
+ command.env("VIRTUAL_ENV", venv_root.to_string_lossy().to_string());
+ }
+ }
+
+ if let Some(wd) = &working_directory {
+ command.current_dir(wd);
+ }
+ command.spawn()
+ };
+
+ // We need to manage the child process lifecycle
+ let child = if !envelope.payload.command.is_empty() {
+ spawn_kernel(&envelope.payload.command, &envelope.payload.args).context(format!(
+ "failed to spawn kernel process (command: {})",
+ envelope.payload.command
+ ))?
+ } else {
+ spawn_kernel("python3", &[])
+ .or_else(|_| spawn_kernel("python", &[]))
+ .context("failed to spawn kernel process (tried python3 and python)")?
+ };
+
+ this.update(&mut cx.clone(), |this, _cx| {
+ this.kernels.insert(kernel_id.clone(), child);
+ });
+
+ Ok(proto::SpawnKernelResponse {
+ kernel_id,
+ connection_file: connection_file_content,
+ })
+ }
+
+ async fn handle_kill_kernel(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::KillKernel>,
+ mut cx: AsyncApp,
+ ) -> Result<proto::Ack> {
+ let kernel_id = envelope.payload.kernel_id;
+ let child = this.update(&mut cx, |this, _| this.kernels.remove(&kernel_id));
+ if let Some(mut child) = child {
+ child.kill().log_err();
+ }
+ Ok(proto::Ack {})
+ }
+
async fn handle_find_search_candidates(
this: Entity<Self>,
envelope: TypedEnvelope<proto::FindSearchCandidates>,
@@ -42,6 +42,7 @@ menu.workspace = true
multi_buffer.workspace = true
nbformat.workspace = true
project.workspace = true
+remote.workspace = true
runtimelib.workspace = true
serde.workspace = true
serde_json.workspace = true
@@ -75,3 +76,6 @@ tree-sitter-typescript.workspace = true
tree-sitter-python.workspace = true
workspace = { workspace = true, features = ["test-support"] }
util = { workspace = true, features = ["test-support"] }
+
+[package.metadata.cargo-machete]
+ignored = ["remote"]
@@ -22,6 +22,8 @@ pub enum KernelPickerEntry {
fn build_grouped_entries(store: &ReplStore, worktree_id: WorktreeId) -> Vec<KernelPickerEntry> {
let mut entries = Vec::new();
let mut recommended_entry: Option<KernelPickerEntry> = None;
+ let mut found_selected = false;
+ let selected_kernel = store.selected_kernel(worktree_id);
let mut python_envs = Vec::new();
let mut jupyter_kernels = Vec::new();
@@ -29,8 +31,15 @@ fn build_grouped_entries(store: &ReplStore, worktree_id: WorktreeId) -> Vec<Kern
for spec in store.kernel_specifications_for_worktree(worktree_id) {
let is_recommended = store.is_recommended_kernel(worktree_id, spec);
+ let is_selected = selected_kernel.map_or(false, |s| s == spec);
- if is_recommended {
+ if is_selected {
+ recommended_entry = Some(KernelPickerEntry::Kernel {
+ spec: spec.clone(),
+ is_recommended: true,
+ });
+ found_selected = true;
+ } else if is_recommended && !found_selected {
recommended_entry = Some(KernelPickerEntry::Kernel {
spec: spec.clone(),
is_recommended: true,
@@ -50,7 +59,9 @@ fn build_grouped_entries(store: &ReplStore, worktree_id: WorktreeId) -> Vec<Kern
is_recommended,
});
}
- KernelSpecification::Remote(_) => {
+ KernelSpecification::JupyterServer(_)
+ | KernelSpecification::SshRemote(_)
+ | KernelSpecification::WslRemote(_) => {
remote_kernels.push(KernelPickerEntry::Kernel {
spec: spec.clone(),
is_recommended,
@@ -314,7 +325,10 @@ impl PickerDelegate for KernelPickerDelegate {
let subtitle = match spec {
KernelSpecification::Jupyter(_) => None,
- KernelSpecification::PythonEnv(_) | KernelSpecification::Remote(_) => {
+ KernelSpecification::PythonEnv(_)
+ | KernelSpecification::JupyterServer(_)
+ | KernelSpecification::SshRemote(_)
+ | KernelSpecification::WslRemote(_) => {
let env_kind = spec.environment_kind_label();
let path = spec.path();
match env_kind {
@@ -4,19 +4,147 @@ use std::{fmt::Debug, future::Future, path::PathBuf};
use futures::{channel::mpsc, future::Shared};
use gpui::{App, Entity, Task, Window};
use language::LanguageName;
+use log;
pub use native_kernel::*;
mod remote_kernels;
use project::{Project, ProjectPath, Toolchains, WorktreeId};
pub use remote_kernels::*;
+mod ssh_kernel;
+pub use ssh_kernel::*;
+
+mod wsl_kernel;
+pub use wsl_kernel::*;
+
+use std::collections::HashMap;
+
use anyhow::Result;
-use gpui::Context;
-use jupyter_protocol::JupyterKernelspec;
-use runtimelib::{ExecutionState, JupyterMessage, KernelInfoReply};
+use futures::{FutureExt, StreamExt};
+use gpui::{AppContext, AsyncWindowContext, Context};
+use jupyter_protocol::{JupyterKernelspec, JupyterMessageContent};
+use runtimelib::{
+ ClientControlConnection, ClientIoPubConnection, ClientShellConnection, ClientStdinConnection,
+ ExecutionState, JupyterMessage, KernelInfoReply,
+};
use ui::{Icon, IconName, SharedString};
use util::rel_path::RelPath;
+pub fn start_kernel_tasks<S: KernelSession + 'static>(
+ session: Entity<S>,
+ iopub_socket: ClientIoPubConnection,
+ shell_socket: ClientShellConnection,
+ control_socket: ClientControlConnection,
+ stdin_socket: ClientStdinConnection,
+ cx: &mut AsyncWindowContext,
+) -> (
+ futures::channel::mpsc::Sender<JupyterMessage>,
+ futures::channel::mpsc::Sender<JupyterMessage>,
+) {
+ let (mut shell_send, shell_recv) = shell_socket.split();
+ let (mut control_send, control_recv) = control_socket.split();
+ let (mut stdin_send, stdin_recv) = stdin_socket.split();
+
+ let (request_tx, mut request_rx) = futures::channel::mpsc::channel::<JupyterMessage>(100);
+ let (stdin_tx, mut stdin_rx) = futures::channel::mpsc::channel::<JupyterMessage>(100);
+
+ let recv_task = cx.spawn({
+ let session = session.clone();
+ let mut iopub = iopub_socket;
+ let mut shell = shell_recv;
+ let mut control = control_recv;
+ let mut stdin = stdin_recv;
+
+ async move |cx| -> anyhow::Result<()> {
+ loop {
+ let (channel, result) = futures::select! {
+ msg = iopub.read().fuse() => ("iopub", msg),
+ msg = shell.read().fuse() => ("shell", msg),
+ msg = control.read().fuse() => ("control", msg),
+ msg = stdin.read().fuse() => ("stdin", msg),
+ };
+ match result {
+ Ok(message) => {
+ session
+ .update_in(cx, |session, window, cx| {
+ session.route(&message, window, cx);
+ })
+ .ok();
+ }
+ Err(
+ ref err @ (runtimelib::RuntimeError::ParseError { .. }
+ | runtimelib::RuntimeError::SerdeError(_)),
+ ) => {
+ let error_detail = format!("Kernel issue on {channel} channel\n\n{err}");
+ log::warn!("kernel: {error_detail}");
+ session
+ .update_in(cx, |session, _window, cx| {
+ session.kernel_errored(error_detail, cx);
+ cx.notify();
+ })
+ .ok();
+ }
+ Err(err) => {
+ log::warn!("kernel: error reading from {channel}: {err:?}");
+ anyhow::bail!("{channel} recv: {err}");
+ }
+ }
+ }
+ }
+ });
+
+ let routing_task = cx.background_spawn(async move {
+ while let Some(message) = request_rx.next().await {
+ match message.content {
+ JupyterMessageContent::DebugRequest(_)
+ | JupyterMessageContent::InterruptRequest(_)
+ | JupyterMessageContent::ShutdownRequest(_) => {
+ control_send.send(message).await?;
+ }
+ _ => {
+ shell_send.send(message).await?;
+ }
+ }
+ }
+ anyhow::Ok(())
+ });
+
+ let stdin_routing_task = cx.background_spawn(async move {
+ while let Some(message) = stdin_rx.next().await {
+ stdin_send.send(message).await?;
+ }
+ anyhow::Ok(())
+ });
+
+ cx.spawn({
+ async move |cx| {
+ async fn with_name(
+ name: &'static str,
+ task: Task<Result<()>>,
+ ) -> (&'static str, Result<()>) {
+ (name, task.await)
+ }
+
+ let mut tasks = futures::stream::FuturesUnordered::new();
+ tasks.push(with_name("recv task", recv_task));
+ tasks.push(with_name("routing task", routing_task));
+ tasks.push(with_name("stdin routing task", stdin_routing_task));
+
+ while let Some((name, result)) = tasks.next().await {
+ if let Err(err) = result {
+ session.update(cx, |session, cx| {
+ session.kernel_errored(format!("handling failed for {name}: {err}"), cx);
+ cx.notify();
+ });
+ }
+ }
+ }
+ })
+ .detach();
+
+ (request_tx, stdin_tx)
+}
+
pub trait KernelSession: Sized {
fn route(&mut self, message: &JupyterMessage, window: &mut Window, cx: &mut Context<Self>);
fn kernel_errored(&mut self, error_message: String, cx: &mut Context<Self>);
@@ -52,17 +180,65 @@ impl PythonEnvKernelSpecification {
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum KernelSpecification {
- Remote(RemoteKernelSpecification),
+ JupyterServer(RemoteKernelSpecification),
Jupyter(LocalKernelSpecification),
PythonEnv(PythonEnvKernelSpecification),
+ SshRemote(SshRemoteKernelSpecification),
+ WslRemote(WslKernelSpecification),
+}
+
+#[derive(Debug, Clone)]
+pub struct SshRemoteKernelSpecification {
+ pub name: String,
+ pub path: SharedString,
+ pub kernelspec: JupyterKernelspec,
+}
+
+#[derive(Debug, Clone)]
+pub struct WslKernelSpecification {
+ pub name: String,
+ pub kernelspec: JupyterKernelspec,
+ pub distro: String,
+}
+
+impl PartialEq for SshRemoteKernelSpecification {
+ fn eq(&self, other: &Self) -> bool {
+ self.name == other.name
+ && self.kernelspec.argv == other.kernelspec.argv
+ && self.path == other.path
+ && self.kernelspec.display_name == other.kernelspec.display_name
+ && self.kernelspec.language == other.kernelspec.language
+ && self.kernelspec.interrupt_mode == other.kernelspec.interrupt_mode
+ && self.kernelspec.env == other.kernelspec.env
+ && self.kernelspec.metadata == other.kernelspec.metadata
+ }
+}
+
+impl Eq for SshRemoteKernelSpecification {}
+
+impl PartialEq for WslKernelSpecification {
+ fn eq(&self, other: &Self) -> bool {
+ self.name == other.name
+ && self.kernelspec.argv == other.kernelspec.argv
+ && self.kernelspec.display_name == other.kernelspec.display_name
+ && self.kernelspec.language == other.kernelspec.language
+ && self.kernelspec.interrupt_mode == other.kernelspec.interrupt_mode
+ && self.kernelspec.env == other.kernelspec.env
+ && self.kernelspec.metadata == other.kernelspec.metadata
+ && self.distro == other.distro
+ }
}
+impl Eq for WslKernelSpecification {}
+
impl KernelSpecification {
pub fn name(&self) -> SharedString {
match self {
Self::Jupyter(spec) => spec.name.clone().into(),
Self::PythonEnv(spec) => spec.name.clone().into(),
- Self::Remote(spec) => spec.name.clone().into(),
+ Self::JupyterServer(spec) => spec.name.clone().into(),
+ Self::SshRemote(spec) => spec.name.clone().into(),
+ Self::WslRemote(spec) => spec.name.clone().into(),
}
}
@@ -74,7 +250,9 @@ impl KernelSpecification {
.clone()
.unwrap_or_else(|| "Python Environment".to_string()),
),
- Self::Remote(_) => "Remote".into(),
+ Self::JupyterServer(_) => "Jupyter Server".into(),
+ Self::SshRemote(_) => "SSH Remote".into(),
+ Self::WslRemote(_) => "WSL Remote".into(),
}
}
@@ -82,7 +260,9 @@ impl KernelSpecification {
SharedString::from(match self {
Self::Jupyter(spec) => spec.path.to_string_lossy().into_owned(),
Self::PythonEnv(spec) => spec.path.to_string_lossy().into_owned(),
- Self::Remote(spec) => spec.url.to_string(),
+ Self::JupyterServer(spec) => spec.url.to_string(),
+ Self::SshRemote(spec) => spec.path.to_string(),
+ Self::WslRemote(_) => "WSL".to_string(),
})
}
@@ -90,13 +270,17 @@ impl KernelSpecification {
SharedString::from(match self {
Self::Jupyter(spec) => spec.kernelspec.language.clone(),
Self::PythonEnv(spec) => spec.kernelspec.language.clone(),
- Self::Remote(spec) => spec.kernelspec.language.clone(),
+ Self::JupyterServer(spec) => spec.kernelspec.language.clone(),
+ Self::SshRemote(spec) => spec.kernelspec.language.clone(),
+ Self::WslRemote(spec) => spec.kernelspec.language.clone(),
})
}
pub fn has_ipykernel(&self) -> bool {
match self {
- Self::Jupyter(_) | Self::Remote(_) => true,
+ Self::Jupyter(_) | Self::JupyterServer(_) | Self::SshRemote(_) | Self::WslRemote(_) => {
+ true
+ }
Self::PythonEnv(spec) => spec.has_ipykernel,
}
}
@@ -108,7 +292,9 @@ impl KernelSpecification {
.as_ref()
.map(|kind| SharedString::from(kind.clone())),
Self::Jupyter(_) => Some("Jupyter".into()),
- Self::Remote(_) => Some("Remote".into()),
+ Self::JupyterServer(_) => Some("Jupyter Server".into()),
+ Self::SshRemote(_) => Some("SSH Remote".into()),
+ Self::WslRemote(_) => Some("WSL Remote".into()),
}
}
@@ -116,7 +302,9 @@ impl KernelSpecification {
let lang_name = match self {
Self::Jupyter(spec) => spec.kernelspec.language.clone(),
Self::PythonEnv(spec) => spec.kernelspec.language.clone(),
- Self::Remote(spec) => spec.kernelspec.language.clone(),
+ Self::JupyterServer(spec) => spec.kernelspec.language.clone(),
+ Self::SshRemote(spec) => spec.kernelspec.language.clone(),
+ Self::WslRemote(spec) => spec.kernelspec.language.clone(),
};
file_icons::FileIcons::get(cx)
@@ -159,6 +347,9 @@ pub fn python_env_kernel_specifications(
cx: &mut App,
) -> impl Future<Output = Result<Vec<KernelSpecification>>> + use<> {
let python_language = LanguageName::new_static("Python");
+ let is_remote = project.read(cx).is_remote();
+ log::info!("python_env_kernel_specifications: is_remote: {}", is_remote);
+
let toolchains = project.read(cx).available_toolchains(
ProjectPath {
worktree_id,
@@ -167,6 +358,12 @@ pub fn python_env_kernel_specifications(
python_language,
cx,
);
+ #[allow(unused)]
+ let worktree_root_path: Option<std::sync::Arc<std::path::Path>> = project
+ .read(cx)
+ .worktree_for_id(worktree_id, cx)
+ .map(|w| w.read(cx).abs_path());
+
let background_executor = cx.background_executor().clone();
async move {
@@ -187,6 +384,40 @@ pub fn python_env_kernel_specifications(
.chain(toolchains.toolchains)
.map(|toolchain| {
background_executor.spawn(async move {
+ // For remote projects, we assume python is available assuming toolchain is reported.
+ // We can skip the `ipykernel` check or run it remotely.
+ // For MVP, lets trust the toolchain existence or do the check if it's cheap.
+ // `new_smol_command` runs locally. We need to run remotely if `is_remote`.
+
+ if is_remote {
+ log::info!(
+ "python_env_kernel_specifications: returning SshRemote for toolchain {}",
+ toolchain.name
+ );
+ let default_kernelspec = JupyterKernelspec {
+ argv: vec![
+ toolchain.path.to_string(),
+ "-m".to_string(),
+ "ipykernel_launcher".to_string(),
+ "-f".to_string(),
+ "{connection_file}".to_string(),
+ ],
+ display_name: toolchain.name.to_string(),
+ language: "python".to_string(),
+ interrupt_mode: None,
+ metadata: None,
+ env: None,
+ };
+
+ return Some(KernelSpecification::SshRemote(
+ SshRemoteKernelSpecification {
+ name: format!("Remote {}", toolchain.name),
+ path: toolchain.path.clone(),
+ kernelspec: default_kernelspec,
+ },
+ ));
+ }
+
let python_path = toolchain.path.to_string();
let environment_kind = extract_environment_kind(&toolchain.as_json);
@@ -197,6 +428,32 @@ pub fn python_env_kernel_specifications(
.map(|output| output.status.success())
.unwrap_or(false);
+ let mut env = HashMap::new();
+ if let Some(python_bin_dir) = PathBuf::from(&python_path).parent() {
+ if let Some(path_var) = std::env::var_os("PATH") {
+ let mut paths = std::env::split_paths(&path_var).collect::<Vec<_>>();
+ paths.insert(0, python_bin_dir.to_path_buf());
+ if let Ok(new_path) = std::env::join_paths(paths) {
+ env.insert("PATH".to_string(), new_path.to_string_lossy().to_string());
+ }
+ }
+
+ if let Some(venv_root) = python_bin_dir.parent() {
+ env.insert("VIRTUAL_ENV".to_string(), venv_root.to_string_lossy().to_string());
+ }
+ }
+
+ log::info!("Preparing Python kernel for toolchain: {}", toolchain.name);
+ log::info!("Python path: {}", python_path);
+ if let Some(path) = env.get("PATH") {
+ log::info!("Kernel PATH: {}", path);
+ } else {
+ log::info!("Kernel PATH not set in env");
+ }
+ if let Some(venv) = env.get("VIRTUAL_ENV") {
+ log::info!("Kernel VIRTUAL_ENV: {}", venv);
+ }
+
let kernelspec = JupyterKernelspec {
argv: vec![
python_path.clone(),
@@ -209,20 +466,113 @@ pub fn python_env_kernel_specifications(
language: "python".to_string(),
interrupt_mode: None,
metadata: None,
- env: None,
+ env: Some(env),
};
- KernelSpecification::PythonEnv(PythonEnvKernelSpecification {
+ Some(KernelSpecification::PythonEnv(PythonEnvKernelSpecification {
name: toolchain.name.to_string(),
path: PathBuf::from(&python_path),
kernelspec,
has_ipykernel,
environment_kind,
- })
+ }))
})
});
- let kernel_specs = futures::future::join_all(kernelspecs).await;
+ #[allow(unused_mut)]
+ let mut kernel_specs: Vec<KernelSpecification> = futures::future::join_all(kernelspecs)
+ .await
+ .into_iter()
+ .flatten()
+ .collect();
+
+ #[cfg(target_os = "windows")]
+ if kernel_specs.is_empty() && !is_remote {
+ if let Some(root_path) = worktree_root_path {
+ let root_path_str: std::borrow::Cow<str> = root_path.to_string_lossy();
+ let (distro, internal_path) =
+ if let Some(path_without_prefix) = root_path_str.strip_prefix(r"\\wsl$\") {
+ if let Some((distro, path)) = path_without_prefix.split_once('\\') {
+ let replaced_path: String = path.replace('\\', "/");
+ (Some(distro), Some(format!("/{}", replaced_path)))
+ } else {
+ (Some(path_without_prefix), Some("/".to_string()))
+ }
+ } else if let Some(path_without_prefix) =
+ root_path_str.strip_prefix(r"\\wsl.localhost\")
+ {
+ if let Some((distro, path)) = path_without_prefix.split_once('\\') {
+ let replaced_path: String = path.replace('\\', "/");
+ (Some(distro), Some(format!("/{}", replaced_path)))
+ } else {
+ (Some(path_without_prefix), Some("/".to_string()))
+ }
+ } else {
+ (None, None)
+ };
+
+ if let (Some(distro), Some(internal_path)) = (distro, internal_path) {
+ let python_path = format!("{}/.venv/bin/python", internal_path);
+ let check = util::command::new_command("wsl")
+ .args(&["-d", distro, "test", "-f", &python_path])
+ .output()
+ .await;
+
+ if check.is_ok() && check.unwrap().status.success() {
+ let default_kernelspec = JupyterKernelspec {
+ argv: vec![
+ python_path.clone(),
+ "-m".to_string(),
+ "ipykernel_launcher".to_string(),
+ "-f".to_string(),
+ "{connection_file}".to_string(),
+ ],
+ display_name: format!("WSL: {} (.venv)", distro),
+ language: "python".to_string(),
+ interrupt_mode: None,
+ metadata: None,
+ env: None,
+ };
+
+ kernel_specs.push(KernelSpecification::WslRemote(WslKernelSpecification {
+ name: format!("WSL: {} (.venv)", distro),
+ kernelspec: default_kernelspec,
+ distro: distro.to_string(),
+ }));
+ } else {
+ let check_system = util::command::new_command("wsl")
+ .args(&["-d", distro, "command", "-v", "python3"])
+ .output()
+ .await;
+
+ if check_system.is_ok() && check_system.unwrap().status.success() {
+ let default_kernelspec = JupyterKernelspec {
+ argv: vec![
+ "python3".to_string(),
+ "-m".to_string(),
+ "ipykernel_launcher".to_string(),
+ "-f".to_string(),
+ "{connection_file}".to_string(),
+ ],
+ display_name: format!("WSL: {} (System)", distro),
+ language: "python".to_string(),
+ interrupt_mode: None,
+ metadata: None,
+ env: None,
+ };
+
+ kernel_specs.push(KernelSpecification::WslRemote(
+ WslKernelSpecification {
+ name: format!("WSL: {} (System)", distro),
+ kernelspec: default_kernelspec,
+ distro: distro.to_string(),
+ },
+ ));
+ }
+ }
+ }
+ }
+ }
anyhow::Ok(kernel_specs)
}
@@ -1,17 +1,16 @@
use anyhow::{Context as _, Result};
use futures::{
- AsyncBufReadExt as _, FutureExt as _, StreamExt as _,
+ AsyncBufReadExt as _, StreamExt as _,
channel::mpsc::{self},
io::BufReader,
- stream::FuturesUnordered,
};
-use gpui::{App, AppContext as _, ClipboardItem, Entity, EntityId, Task, Window};
+use gpui::{App, Entity, EntityId, Task, Window};
use jupyter_protocol::{
- ExecutionState, JupyterKernelspec, JupyterMessage, JupyterMessageContent, KernelInfoReply,
+ ExecutionState, JupyterKernelspec, JupyterMessage, KernelInfoReply,
connection_info::{ConnectionInfo, Transport},
};
use project::Fs;
-use runtimelib::{RuntimeError, dirs};
+use runtimelib::dirs;
use smol::net::TcpListener;
use std::{
env,
@@ -23,7 +22,7 @@ use std::{
use util::command::Command;
use uuid::Uuid;
-use super::{KernelSession, RunningKernel};
+use super::{KernelSession, RunningKernel, start_kernel_tasks};
#[derive(Debug, Clone)]
pub struct LocalKernelSpecification {
@@ -64,7 +63,13 @@ impl LocalKernelSpecification {
}
if let Some(env) = &self.kernelspec.env {
+ log::info!(
+ "LocalKernelSpecification: applying env to command: {:?}",
+ env.keys()
+ );
cmd.envs(env);
+ } else {
+ log::info!("LocalKernelSpecification: no env in kernelspec");
}
Ok(cmd)
@@ -160,13 +165,12 @@ impl NativeRunningKernel {
runtimelib::create_client_control_connection(&connection_info, &session_id).await?;
let peer_identity = runtimelib::peer_identity_for_session(&session_id)?;
- let shell_socket =
- runtimelib::create_client_shell_connection_with_identity(
- &connection_info,
- &session_id,
- peer_identity.clone(),
- )
- .await?;
+ let shell_socket = runtimelib::create_client_shell_connection_with_identity(
+ &connection_info,
+ &session_id,
+ peer_identity.clone(),
+ )
+ .await?;
let stdin_socket = runtimelib::create_client_stdin_connection_with_identity(
&connection_info,
&session_id,
@@ -174,112 +178,14 @@ impl NativeRunningKernel {
)
.await?;
- let (mut shell_send, shell_recv) = shell_socket.split();
- let (mut control_send, control_recv) = control_socket.split();
- let (mut stdin_send, stdin_recv) = stdin_socket.split();
-
- let (request_tx, mut request_rx) =
- futures::channel::mpsc::channel::<JupyterMessage>(100);
- let (stdin_tx, mut stdin_rx) =
- futures::channel::mpsc::channel::<JupyterMessage>(100);
-
- let recv_task = cx.spawn({
- let session = session.clone();
- let mut iopub = iopub_socket;
- let mut shell = shell_recv;
- let mut control = control_recv;
- let mut stdin = stdin_recv;
-
- async move |cx| -> anyhow::Result<()> {
- loop {
- let (channel, result) = futures::select! {
- msg = iopub.read().fuse() => ("iopub", msg),
- msg = shell.read().fuse() => ("shell", msg),
- msg = control.read().fuse() => ("control", msg),
- msg = stdin.read().fuse() => ("stdin", msg),
- };
- match result {
- Ok(message) => {
- session
- .update_in(cx, |session, window, cx| {
- session.route(&message, window, cx);
- })
- .ok();
- }
- Err(
- ref err @ (RuntimeError::ParseError { .. }
- | RuntimeError::SerdeError(_)),
- ) => {
- let error_detail =
- format!("Kernel issue on {channel} channel\n\n{err}");
- log::warn!("kernel: {error_detail}");
- let workspace_window = session
- .update_in(cx, |_, window, _cx| {
- window
- .window_handle()
- .downcast::<workspace::Workspace>()
- })
- .ok()
- .flatten();
- if let Some(workspace_window) = workspace_window {
- workspace_window
- .update(cx, |workspace, _window, cx| {
- struct KernelReadError;
- workspace.show_toast(
- workspace::Toast::new(
- workspace::notifications::NotificationId::unique::<KernelReadError>(),
- error_detail.clone(),
- )
- .on_click(
- "Copy Error",
- move |_window, cx| {
- cx.write_to_clipboard(
- ClipboardItem::new_string(
- error_detail.clone(),
- ),
- );
- },
- ),
- cx,
- );
- })
- .ok();
- }
- }
- Err(err) => {
- anyhow::bail!("{channel} recv: {err}");
- }
- }
- }
- }
- });
-
- let routing_task = cx.background_spawn({
- async move {
- while let Some(message) = request_rx.next().await {
- match message.content {
- JupyterMessageContent::DebugRequest(_)
- | JupyterMessageContent::InterruptRequest(_)
- | JupyterMessageContent::ShutdownRequest(_) => {
- control_send.send(message).await?;
- }
- _ => {
- shell_send.send(message).await?;
- }
- }
- }
- anyhow::Ok(())
- }
- });
-
- let stdin_routing_task = cx.background_spawn({
- async move {
- while let Some(message) = stdin_rx.next().await {
- stdin_send.send(message).await?;
- }
- anyhow::Ok(())
- }
- });
+ let (request_tx, stdin_tx) = start_kernel_tasks(
+ session.clone(),
+ iopub_socket,
+ shell_socket,
+ control_socket,
+ stdin_socket,
+ cx,
+ );
let stderr = process.stderr.take();
let stdout = process.stdout.take();
@@ -310,38 +216,6 @@ impl NativeRunningKernel {
})
.detach();
- cx.spawn({
- let session = session.clone();
- async move |cx| {
- async fn with_name(
- name: &'static str,
- task: Task<Result<()>>,
- ) -> (&'static str, Result<()>) {
- (name, task.await)
- }
-
- let mut tasks = FuturesUnordered::new();
- tasks.push(with_name("recv task", recv_task));
- tasks.push(with_name("routing task", routing_task));
- tasks.push(with_name("stdin routing task", stdin_routing_task));
-
- while let Some((name, result)) = tasks.next().await {
- if let Err(err) = result {
- log::error!("kernel: handling failed for {name}: {err:?}");
-
- session.update(cx, |session, cx| {
- session.kernel_errored(
- format!("handling failed for {name}: {err}"),
- cx,
- );
- cx.notify();
- });
- }
- }
- }
- })
- .detach();
-
let status = process.status();
let process_status_task = cx.spawn(async move |cx| {
@@ -0,0 +1,312 @@
+use super::{KernelSession, RunningKernel, SshRemoteKernelSpecification, start_kernel_tasks};
+use anyhow::{Context as _, Result};
+use client::proto;
+
+use futures::{
+ AsyncBufReadExt as _, StreamExt as _,
+ channel::mpsc::{self},
+ io::BufReader,
+};
+use gpui::{App, Entity, Task, Window};
+use project::Project;
+use runtimelib::{ExecutionState, JupyterMessage, KernelInfoReply};
+use std::path::PathBuf;
+use util::ResultExt;
+
+#[derive(Debug)]
+pub struct SshRunningKernel {
+ request_tx: mpsc::Sender<JupyterMessage>,
+ stdin_tx: mpsc::Sender<JupyterMessage>,
+ execution_state: ExecutionState,
+ kernel_info: Option<KernelInfoReply>,
+ working_directory: PathBuf,
+ _ssh_tunnel_process: util::command::Child,
+ _local_connection_file: PathBuf,
+ kernel_id: String,
+ project: Entity<Project>,
+ project_id: u64,
+}
+
+impl SshRunningKernel {
+ pub fn new<S: KernelSession + 'static>(
+ kernel_spec: SshRemoteKernelSpecification,
+ working_directory: PathBuf,
+ project: Entity<Project>,
+ session: Entity<S>,
+ window: &mut Window,
+ cx: &mut App,
+ ) -> Task<Result<Box<dyn RunningKernel>>> {
+ let client = project.read(cx).client();
+ let remote_client = project.read(cx).remote_client();
+ let project_id = project
+ .read(cx)
+ .remote_id()
+ .unwrap_or(proto::REMOTE_SERVER_PROJECT_ID);
+
+ window.spawn(cx, async move |cx| {
+ let command = kernel_spec
+ .kernelspec
+ .argv
+ .first()
+ .cloned()
+ .unwrap_or_default();
+ let args = kernel_spec
+ .kernelspec
+ .argv
+ .iter()
+ .skip(1)
+ .cloned()
+ .collect();
+
+ let request = proto::SpawnKernel {
+ kernel_name: kernel_spec.name.clone(),
+ working_directory: working_directory.to_string_lossy().to_string(),
+ project_id,
+ command,
+ args,
+ };
+ let response = if let Some(remote_client) = remote_client.as_ref() {
+ remote_client
+ .read_with(cx, |client, _| client.proto_client())
+ .request(request)
+ .await?
+ } else {
+ client.request(request).await?
+ };
+
+ let kernel_id = response.kernel_id.clone();
+ let connection_info: serde_json::Value =
+ serde_json::from_str(&response.connection_file)?;
+
+ // Setup SSH Tunneling - allocate local ports
+ let mut local_ports = Vec::new();
+ for _ in 0..5 {
+ let listener = std::net::TcpListener::bind("127.0.0.1:0")?;
+ let port = listener.local_addr()?.port();
+ drop(listener);
+ local_ports.push(port);
+ }
+
+ let remote_shell_port = connection_info["shell_port"]
+ .as_u64()
+ .context("missing shell_port")? as u16;
+ let remote_iopub_port = connection_info["iopub_port"]
+ .as_u64()
+ .context("missing iopub_port")? as u16;
+ let remote_stdin_port = connection_info["stdin_port"]
+ .as_u64()
+ .context("missing stdin_port")? as u16;
+ let remote_control_port = connection_info["control_port"]
+ .as_u64()
+ .context("missing control_port")? as u16;
+ let remote_hb_port = connection_info["hb_port"]
+ .as_u64()
+ .context("missing hb_port")? as u16;
+
+ let forwards = vec![
+ (local_ports[0], "127.0.0.1".to_string(), remote_shell_port),
+ (local_ports[1], "127.0.0.1".to_string(), remote_iopub_port),
+ (local_ports[2], "127.0.0.1".to_string(), remote_stdin_port),
+ (local_ports[3], "127.0.0.1".to_string(), remote_control_port),
+ (local_ports[4], "127.0.0.1".to_string(), remote_hb_port),
+ ];
+
+ let remote_client = remote_client.ok_or_else(|| anyhow::anyhow!("no remote client"))?;
+ let command_template = cx.update(|_window, cx| {
+ remote_client.read(cx).build_forward_ports_command(forwards)
+ })??;
+
+ let mut command = util::command::new_command(&command_template.program);
+ command.args(&command_template.args);
+ command.envs(&command_template.env);
+
+ let mut ssh_tunnel_process = command.spawn().context("failed to spawn ssh tunnel")?;
+
+ let stderr = ssh_tunnel_process.stderr.take();
+ cx.spawn(async move |_cx| {
+ if let Some(stderr) = stderr {
+ let reader = BufReader::new(stderr);
+ let mut lines = reader.lines();
+ while let Some(Ok(line)) = lines.next().await {
+ log::warn!("ssh tunnel stderr: {}", line);
+ }
+ }
+ })
+ .detach();
+
+ let stdout = ssh_tunnel_process.stdout.take();
+ cx.spawn(async move |_cx| {
+ if let Some(stdout) = stdout {
+ let reader = BufReader::new(stdout);
+ let mut lines = reader.lines();
+ while let Some(Ok(line)) = lines.next().await {
+ log::debug!("ssh tunnel stdout: {}", line);
+ }
+ }
+ })
+ .detach();
+
+ // We might or might not need this, perhaps we can just wait for a second or test it this way
+ let shell_port = local_ports[0];
+ let max_attempts = 100;
+ let mut connected = false;
+ for attempt in 0..max_attempts {
+ match smol::net::TcpStream::connect(format!("127.0.0.1:{}", shell_port)).await {
+ Ok(_) => {
+ connected = true;
+ log::info!(
+ "SSH tunnel established for kernel {} on attempt {}",
+ kernel_id,
+ attempt + 1
+ );
+ // giving the tunnel a moment to fully establish forwarding
+ cx.background_executor()
+ .timer(std::time::Duration::from_millis(500))
+ .await;
+ break;
+ }
+ Err(err) => {
+ if attempt % 10 == 0 {
+ log::debug!(
+ "Waiting for SSH tunnel (attempt {}/{}): {}",
+ attempt + 1,
+ max_attempts,
+ err
+ );
+ }
+ if attempt < max_attempts - 1 {
+ cx.background_executor()
+ .timer(std::time::Duration::from_millis(100))
+ .await;
+ }
+ }
+ }
+ }
+ if !connected {
+ anyhow::bail!(
+ "SSH tunnel failed to establish after {} attempts",
+ max_attempts
+ );
+ }
+
+ let mut local_connection_info = connection_info.clone();
+ local_connection_info["shell_port"] = serde_json::json!(local_ports[0]);
+ local_connection_info["iopub_port"] = serde_json::json!(local_ports[1]);
+ local_connection_info["stdin_port"] = serde_json::json!(local_ports[2]);
+ local_connection_info["control_port"] = serde_json::json!(local_ports[3]);
+ local_connection_info["hb_port"] = serde_json::json!(local_ports[4]);
+ local_connection_info["ip"] = serde_json::json!("127.0.0.1");
+
+ let local_connection_file =
+ std::env::temp_dir().join(format!("zed_ssh_kernel_{}.json", kernel_id));
+ std::fs::write(
+ &local_connection_file,
+ serde_json::to_string_pretty(&local_connection_info)?,
+ )?;
+
+ // Parse connection info and create ZMQ connections
+ let connection_info_struct: runtimelib::ConnectionInfo =
+ serde_json::from_value(local_connection_info)?;
+ let session_id = uuid::Uuid::new_v4().to_string();
+
+ let output_socket = runtimelib::create_client_iopub_connection(
+ &connection_info_struct,
+ "",
+ &session_id,
+ )
+ .await
+ .context("failed to create iopub connection")?;
+
+ let peer_identity = runtimelib::peer_identity_for_session(&session_id)?;
+ let shell_socket = runtimelib::create_client_shell_connection_with_identity(
+ &connection_info_struct,
+ &session_id,
+ peer_identity.clone(),
+ )
+ .await
+ .context("failed to create shell connection")?;
+ let control_socket =
+ runtimelib::create_client_control_connection(&connection_info_struct, &session_id)
+ .await
+ .context("failed to create control connection")?;
+ let stdin_socket = runtimelib::create_client_stdin_connection_with_identity(
+ &connection_info_struct,
+ &session_id,
+ peer_identity,
+ )
+ .await
+ .context("failed to create stdin connection")?;
+
+ let (request_tx, stdin_tx) = start_kernel_tasks(
+ session.clone(),
+ output_socket,
+ shell_socket,
+ control_socket,
+ stdin_socket,
+ cx,
+ );
+
+ Ok(Box::new(SshRunningKernel {
+ request_tx,
+ stdin_tx,
+ execution_state: ExecutionState::Idle,
+ kernel_info: None,
+ working_directory,
+ _ssh_tunnel_process: ssh_tunnel_process,
+ _local_connection_file: local_connection_file,
+ kernel_id,
+ project,
+ project_id,
+ }) as Box<dyn RunningKernel>)
+ })
+ }
+}
+
+impl RunningKernel for SshRunningKernel {
+ fn request_tx(&self) -> mpsc::Sender<JupyterMessage> {
+ self.request_tx.clone()
+ }
+
+ fn stdin_tx(&self) -> mpsc::Sender<JupyterMessage> {
+ self.stdin_tx.clone()
+ }
+
+ fn working_directory(&self) -> &PathBuf {
+ &self.working_directory
+ }
+
+ fn execution_state(&self) -> &ExecutionState {
+ &self.execution_state
+ }
+
+ fn set_execution_state(&mut self, state: ExecutionState) {
+ self.execution_state = state;
+ }
+
+ fn kernel_info(&self) -> Option<&KernelInfoReply> {
+ self.kernel_info.as_ref()
+ }
+
+ fn set_kernel_info(&mut self, info: KernelInfoReply) {
+ self.kernel_info = Some(info);
+ }
+
+ fn force_shutdown(&mut self, _window: &mut Window, cx: &mut App) -> Task<Result<()>> {
+ let kernel_id = self.kernel_id.clone();
+ let project_id = self.project_id;
+ let client = self.project.read(cx).client();
+
+ cx.background_executor().spawn(async move {
+ let request = proto::KillKernel {
+ kernel_id,
+ project_id,
+ };
+ client.request::<proto::KillKernel>(request).await?;
+ Ok(())
+ })
+ }
+
+ fn kill(&mut self) {
+ self._ssh_tunnel_process.kill().log_err();
+ }
+}
@@ -0,0 +1,589 @@
+use super::{
+ KernelSession, KernelSpecification, RunningKernel, WslKernelSpecification, start_kernel_tasks,
+};
+use anyhow::{Context as _, Result};
+use futures::{
+ AsyncBufReadExt as _, StreamExt as _,
+ channel::mpsc::{self},
+ io::BufReader,
+};
+use gpui::{App, BackgroundExecutor, Entity, EntityId, Task, Window};
+use jupyter_protocol::{
+ ExecutionState, JupyterMessage, KernelInfoReply,
+ connection_info::{ConnectionInfo, Transport},
+};
+use project::Fs;
+use runtimelib::dirs;
+use smol::net::TcpListener;
+use std::{
+ fmt::Debug,
+ net::{IpAddr, Ipv4Addr, SocketAddr},
+ path::PathBuf,
+ sync::Arc,
+};
+use uuid::Uuid;
+
+// Find a set of open ports. This creates a listener with port set to 0. The listener will be closed at the end when it goes out of scope.
+// There's a race condition between closing the ports and usage by a kernel, but it's inherent to the Jupyter protocol.
+async fn peek_ports(ip: IpAddr) -> Result<[u16; 5]> {
+ let mut addr_zeroport: SocketAddr = SocketAddr::new(ip, 0);
+ addr_zeroport.set_port(0);
+ let mut ports: [u16; 5] = [0; 5];
+ for i in 0..5 {
+ let listener = TcpListener::bind(addr_zeroport).await?;
+ let addr = listener.local_addr()?;
+ ports[i] = addr.port();
+ }
+ Ok(ports)
+}
+
+pub struct WslRunningKernel {
+ pub process: util::command::Child,
+ connection_path: PathBuf,
+ _process_status_task: Option<Task<()>>,
+ pub working_directory: PathBuf,
+ pub request_tx: mpsc::Sender<JupyterMessage>,
+ pub stdin_tx: mpsc::Sender<JupyterMessage>,
+ pub execution_state: ExecutionState,
+ pub kernel_info: Option<KernelInfoReply>,
+}
+
+impl Debug for WslRunningKernel {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("WslRunningKernel")
+ .field("process", &self.process)
+ .finish()
+ }
+}
+
+impl WslRunningKernel {
+ pub fn new<S: KernelSession + 'static>(
+ kernel_specification: WslKernelSpecification,
+ entity_id: EntityId,
+ working_directory: PathBuf,
+ fs: Arc<dyn Fs>,
+ session: Entity<S>,
+ window: &mut Window,
+ cx: &mut App,
+ ) -> Task<Result<Box<dyn RunningKernel>>> {
+ window.spawn(cx, async move |cx| {
+ // For WSL2, we need to get the WSL VM's IP address to connect to it
+ // because WSL2 runs in a lightweight VM with its own network namespace.
+ // The kernel will bind to 127.0.0.1 inside WSL, and we connect to localhost.
+ // WSL2 localhost forwarding handles the rest.
+ let bind_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
+
+ // Use 127.0.0.1 and rely on WSL 2 localhost forwarding.
+ // This avoids issues where the VM IP is unreachable or binding fails on Windows.
+ let connect_ip = "127.0.0.1".to_string();
+
+ let ports = peek_ports(bind_ip).await?;
+
+ let connection_info = ConnectionInfo {
+ transport: Transport::TCP,
+ ip: bind_ip.to_string(),
+ stdin_port: ports[0],
+ control_port: ports[1],
+ hb_port: ports[2],
+ shell_port: ports[3],
+ iopub_port: ports[4],
+ signature_scheme: "hmac-sha256".to_string(),
+ key: uuid::Uuid::new_v4().to_string(),
+ kernel_name: Some(format!("zed-wsl-{}", kernel_specification.name)),
+ };
+
+ let runtime_dir = dirs::runtime_dir();
+ fs.create_dir(&runtime_dir)
+ .await
+ .with_context(|| format!("Failed to create jupyter runtime dir {runtime_dir:?}"))?;
+ let connection_path = runtime_dir.join(format!("kernel-zed-wsl-{entity_id}.json"));
+ let content = serde_json::to_string(&connection_info)?;
+ fs.atomic_write(connection_path.clone(), content).await?;
+
+ // Convert connection_path to WSL path
+ // yeah we can't assume this is available on WSL.
+ // running `wsl -d <distro> wslpath -u <windows_path>`
+ let mut wslpath_cmd = util::command::new_command("wsl");
+
+ // On Windows, passing paths with backslashes to wsl.exe can sometimes cause
+ // escaping issues or be misinterpreted. Converting to forward slashes is safer
+ // and often accepted by wslpath.
+ let connection_path_str = connection_path.to_string_lossy().replace('\\', "/");
+
+ wslpath_cmd
+ .arg("-d")
+ .arg(&kernel_specification.distro)
+ .arg("wslpath")
+ .arg("-u")
+ .arg(&connection_path_str);
+
+ let output = wslpath_cmd.output().await?;
+ if !output.status.success() {
+ anyhow::bail!("Failed to convert path to WSL path: {:?}", output);
+ }
+ let wsl_connection_path = String::from_utf8_lossy(&output.stdout).trim().to_string();
+
+ // Construct the kernel command
+ // The kernel spec argv might have absolute paths valid INSIDE WSL.
+ // We need to run inside WSL.
+ // `wsl -d <distro> --exec <argv0> <argv1> ...`
+ // But we need to replace {connection_file} with wsl_connection_path.
+
+ let argv = kernel_specification.kernelspec.argv;
+ anyhow::ensure!(
+ !argv.is_empty(),
+ "Empty argv in kernelspec {}",
+ kernel_specification.name
+ );
+
+ let working_directory_str = working_directory.to_string_lossy().replace('\\', "/");
+
+ let wsl_working_directory = if working_directory_str.starts_with('/') {
+ // If path starts with /, assume it is already a WSL path (e.g. /home/user)
+ Some(working_directory_str)
+ } else {
+ let mut wslpath_wd_cmd = util::command::new_command("wsl");
+ wslpath_wd_cmd
+ .arg("-d")
+ .arg(&kernel_specification.distro)
+ .arg("wslpath")
+ .arg("-u")
+ .arg(&working_directory_str);
+
+ let wd_output = wslpath_wd_cmd.output().await;
+ if let Ok(output) = wd_output {
+ if output.status.success() {
+ Some(String::from_utf8_lossy(&output.stdout).trim().to_string())
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ };
+
+ // If we couldn't convert the working directory or it's a temp directory,
+ // and the kernel spec uses a relative path (like .venv/bin/python),
+ // we need to handle this better. For now, let's use the converted path
+ // if available, otherwise we'll rely on WSL's default home directory.
+
+ let mut cmd = util::command::new_command("wsl");
+ cmd.arg("-d").arg(&kernel_specification.distro);
+
+ // Set CWD for the host process to a safe location to avoid "Directory name is invalid"
+ // if the project root is a path not supported by Windows CWD (e.g. UNC path for some tools).
+ cmd.current_dir(std::env::temp_dir());
+
+ if let Some(wd) = wsl_working_directory.as_ref() {
+ cmd.arg("--cd").arg(wd);
+ }
+
+ // Build the command to run inside WSL
+ // We use bash -lc to run in a login shell for proper environment setup
+ let mut kernel_args: Vec<String> = Vec::new();
+
+ if let Some(env) = &kernel_specification.kernelspec.env {
+ if !env.is_empty() {
+ kernel_args.push("env".to_string());
+ for (k, v) in env {
+ kernel_args.push(format!("{}={}", k, v));
+ }
+ }
+ }
+
+ for arg in argv {
+ if arg == "{connection_file}" {
+ kernel_args.push(wsl_connection_path.clone());
+ } else {
+ kernel_args.push(arg.clone());
+ }
+ }
+
+ // because first command is python/python3 we need make sure it's present in the env
+ let first_cmd = kernel_args.first().map(|arg| {
+ arg.split_whitespace().next().unwrap_or(arg)
+ });
+
+ let needs_python_resolution = first_cmd.map_or(false, |cmd| {
+ cmd == "python" || cmd == "python3" || !cmd.starts_with('/')
+ });
+
+ let shell_command = if needs_python_resolution {
+ // 1. Check for .venv/bin/python or .venv/bin/python3 in working directory
+ // 2. Fall back to system python3 or python
+ let rest_args: Vec<String> = kernel_args.iter().skip(1).cloned().collect();
+ let rest_string = rest_args
+ .iter()
+ .map(|arg| {
+ if arg.contains(' ') || arg.contains('\'') || arg.contains('"') {
+ format!("'{}'", arg.replace('\'', "'\\''"))
+ } else {
+ arg.clone()
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(" ");
+
+ let cd_command = if let Some(wd) = wsl_working_directory.as_ref() {
+ format!("cd '{}' && ", wd.replace('\'', "'\\''"))
+ } else {
+ String::new()
+ };
+ // TODO: find a better way to debug missing python issues in WSL
+
+ format!(
+ "set -e; \
+ {} \
+ echo \"Working directory: $(pwd)\" >&2; \
+ if [ -x .venv/bin/python ]; then \
+ echo \"Found .venv/bin/python\" >&2; \
+ exec .venv/bin/python {}; \
+ elif [ -x .venv/bin/python3 ]; then \
+ echo \"Found .venv/bin/python3\" >&2; \
+ exec .venv/bin/python3 {}; \
+ elif command -v python3 >/dev/null 2>&1; then \
+ echo \"Found system python3\" >&2; \
+ exec python3 {}; \
+ elif command -v python >/dev/null 2>&1; then \
+ echo \"Found system python\" >&2; \
+ exec python {}; \
+ else \
+ echo 'Error: Python not found in .venv or PATH' >&2; \
+ echo 'Contents of current directory:' >&2; \
+ ls -la >&2; \
+ echo 'PATH:' \"$PATH\" >&2; \
+ exit 127; \
+ fi",
+ cd_command, rest_string, rest_string, rest_string, rest_string
+ )
+ } else {
+ kernel_args
+ .iter()
+ .map(|arg| {
+ if arg.contains(' ') || arg.contains('\'') || arg.contains('"') {
+ format!("'{}'", arg.replace('\'', "'\\''"))
+ } else {
+ arg.clone()
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(" ")
+ };
+
+ cmd.arg("bash")
+ .arg("-l")
+ .arg("-c")
+ .arg(&shell_command);
+
+ let mut process = cmd
+ .stdout(util::command::Stdio::piped())
+ .stderr(util::command::Stdio::piped())
+ .stdin(util::command::Stdio::piped())
+ .kill_on_drop(true)
+ .spawn()
+ .context("failed to start the kernel process")?;
+
+ let session_id = Uuid::new_v4().to_string();
+
+ let mut client_connection_info = connection_info.clone();
+ client_connection_info.ip = connect_ip.clone();
+
+ // Give the kernel a moment to start and bind to ports.
+ // WSL kernel startup can be slow, I am not sure if this is because of my testing environment
+ // or inherent to WSL. We can improve this later with better readiness checks.
+ cx.background_executor()
+ .timer(std::time::Duration::from_secs(2))
+ .await;
+
+ match process.try_status() {
+ Ok(Some(status)) => {
+ let mut stderr_content = String::new();
+ if let Some(mut stderr) = process.stderr.take() {
+ use futures::AsyncReadExt;
+ let mut buf = Vec::new();
+ if stderr.read_to_end(&mut buf).await.is_ok() {
+ stderr_content = String::from_utf8_lossy(&buf).to_string();
+ }
+ }
+
+ let mut stdout_content = String::new();
+ if let Some(mut stdout) = process.stdout.take() {
+ use futures::AsyncReadExt;
+ let mut buf = Vec::new();
+ if stdout.read_to_end(&mut buf).await.is_ok() {
+ stdout_content = String::from_utf8_lossy(&buf).to_string();
+ }
+ }
+
+ anyhow::bail!(
+ "WSL kernel process exited prematurely with status: {:?}\nstderr: {}\nstdout: {}",
+ status,
+ stderr_content,
+ stdout_content
+ );
+ }
+ Ok(None) => {}
+ Err(_) => {}
+ }
+
+ let output_socket = runtimelib::create_client_iopub_connection(
+ &client_connection_info,
+ "",
+ &session_id,
+ )
+ .await?;
+
+ let peer_identity = runtimelib::peer_identity_for_session(&session_id)?;
+ let shell_socket = runtimelib::create_client_shell_connection_with_identity(
+ &client_connection_info,
+ &session_id,
+ peer_identity.clone(),
+ )
+ .await?;
+
+ let control_socket =
+ runtimelib::create_client_control_connection(&client_connection_info, &session_id)
+ .await?;
+
+ let stdin_socket = runtimelib::create_client_stdin_connection_with_identity(
+ &client_connection_info,
+ &session_id,
+ peer_identity,
+ )
+ .await?;
+
+ let (request_tx, stdin_tx) = start_kernel_tasks(
+ session.clone(),
+ output_socket,
+ shell_socket,
+ control_socket,
+ stdin_socket,
+ cx,
+ );
+
+ let stderr = process.stderr.take();
+ cx.spawn(async move |_cx| {
+ if let Some(stderr) = stderr {
+ let reader = BufReader::new(stderr);
+ let mut lines = reader.lines();
+ while let Some(Ok(line)) = lines.next().await {
+ log::warn!("wsl kernel stderr: {}", line);
+ }
+ }
+ })
+ .detach();
+
+ let stdout = process.stdout.take();
+ cx.spawn(async move |_cx| {
+ if let Some(stdout) = stdout {
+ let reader = BufReader::new(stdout);
+ let mut lines = reader.lines();
+ while let Some(Ok(_line)) = lines.next().await {}
+ }
+ })
+ .detach();
+
+ let status = process.status();
+
+ let process_status_task = cx.spawn(async move |cx| {
+ let error_message = match status.await {
+ Ok(status) => {
+ if status.success() {
+ return;
+ }
+
+ format!("WSL kernel: kernel process exited with status: {:?}", status)
+ }
+ Err(err) => {
+ format!("WSL kernel: kernel process exited with error: {:?}", err)
+ }
+ };
+
+ session.update(cx, |session, cx| {
+ session.kernel_errored(error_message, cx);
+
+ cx.notify();
+ });
+ });
+
+ anyhow::Ok(Box::new(Self {
+ process,
+ request_tx,
+ stdin_tx,
+ working_directory,
+ _process_status_task: Some(process_status_task),
+ connection_path,
+ execution_state: ExecutionState::Idle,
+ kernel_info: None,
+ }) as Box<dyn RunningKernel>)
+ })
+ }
+}
+
+impl RunningKernel for WslRunningKernel {
+ fn request_tx(&self) -> mpsc::Sender<JupyterMessage> {
+ self.request_tx.clone()
+ }
+
+ fn stdin_tx(&self) -> mpsc::Sender<JupyterMessage> {
+ self.stdin_tx.clone()
+ }
+
+ fn working_directory(&self) -> &PathBuf {
+ &self.working_directory
+ }
+
+ fn execution_state(&self) -> &ExecutionState {
+ &self.execution_state
+ }
+
+ fn set_execution_state(&mut self, state: ExecutionState) {
+ self.execution_state = state;
+ }
+
+ fn kernel_info(&self) -> Option<&KernelInfoReply> {
+ self.kernel_info.as_ref()
+ }
+
+ fn set_kernel_info(&mut self, info: KernelInfoReply) {
+ self.kernel_info = Some(info);
+ }
+
+ fn force_shutdown(&mut self, _window: &mut Window, _cx: &mut App) -> Task<anyhow::Result<()>> {
+ self._process_status_task.take();
+ self.request_tx.close_channel();
+ self.process.kill().ok();
+ Task::ready(Ok(()))
+ }
+
+ fn kill(&mut self) {
+ self._process_status_task.take();
+ self.request_tx.close_channel();
+ self.process.kill().ok();
+ }
+}
+
+impl Drop for WslRunningKernel {
+ fn drop(&mut self) {
+ std::fs::remove_file(&self.connection_path).ok();
+ self.request_tx.close_channel();
+ self.process.kill().ok();
+ }
+}
+
+#[derive(serde::Deserialize)]
+struct LocalKernelSpecsResponse {
+ kernelspecs: std::collections::HashMap<String, LocalKernelSpec>,
+}
+
+#[derive(serde::Deserialize)]
+struct LocalKernelSpec {
+ spec: LocalKernelSpecContent,
+}
+
+#[derive(serde::Deserialize)]
+struct LocalKernelSpecContent {
+ argv: Vec<String>,
+ display_name: String,
+ language: String,
+ interrupt_mode: Option<String>,
+ env: Option<std::collections::HashMap<String, String>>,
+ metadata: Option<std::collections::HashMap<String, serde_json::Value>>,
+}
+
+pub async fn wsl_kernel_specifications(
+ background_executor: BackgroundExecutor,
+) -> Result<Vec<KernelSpecification>> {
+ let output = util::command::new_command("wsl")
+ .arg("-l")
+ .arg("-q")
+ .output()
+ .await;
+
+ if output.is_err() {
+ return Ok(Vec::new());
+ }
+
+ let output = output.unwrap();
+ if !output.status.success() {
+ return Ok(Vec::new());
+ }
+
+ // wsl output is often UTF-16LE, but -l -q might be simpler or just ASCII compatible if not using weird charsets.
+ // However, on Windows, wsl often outputs UTF-16LE.
+ // We can try to detect or use from_utf16 if valid, or just use String::from_utf8_lossy and see.
+ // Actually, `smol::process` on Windows might receive bytes that are UTF-16LE if wsl writes that.
+ // But typically terminal output for wsl is UTF-16.
+ // Let's try to parse as UTF-16LE if it looks like it (BOM or just 00 bytes).
+
+ let stdout = output.stdout;
+ let distros_str = if stdout.len() >= 2 && stdout[1] == 0 {
+ // likely UTF-16LE
+ let u16s: Vec<u16> = stdout
+ .chunks_exact(2)
+ .map(|c| u16::from_le_bytes([c[0], c[1]]))
+ .collect();
+ String::from_utf16_lossy(&u16s)
+ } else {
+ String::from_utf8_lossy(&stdout).to_string()
+ };
+
+ let distros: Vec<String> = distros_str
+ .lines()
+ .map(|line| line.trim().to_string())
+ .filter(|line| !line.is_empty())
+ .collect();
+
+ let tasks = distros.into_iter().map(|distro| {
+ background_executor.spawn(async move {
+ let output = util::command::new_command("wsl")
+ .arg("-d")
+ .arg(&distro)
+ .arg("bash")
+ .arg("-l")
+ .arg("-c")
+ .arg("jupyter kernelspec list --json")
+ .output()
+ .await;
+
+ if let Ok(output) = output {
+ if output.status.success() {
+ let json_str = String::from_utf8_lossy(&output.stdout);
+ // Use local permissive struct instead of strict KernelSpecsResponse from jupyter-protocol
+ if let Ok(specs_response) =
+ serde_json::from_str::<LocalKernelSpecsResponse>(&json_str)
+ {
+ return specs_response
+ .kernelspecs
+ .into_iter()
+ .map(|(name, spec)| {
+ KernelSpecification::WslRemote(WslKernelSpecification {
+ name,
+ kernelspec: jupyter_protocol::JupyterKernelspec {
+ argv: spec.spec.argv,
+ display_name: spec.spec.display_name,
+ language: spec.spec.language,
+ interrupt_mode: spec.spec.interrupt_mode,
+ env: spec.spec.env,
+ metadata: spec.spec.metadata,
+ },
+ distro: distro.clone(),
+ })
+ })
+ .collect::<Vec<_>>();
+ }
+ }
+ }
+
+ Vec::new()
+ })
+ });
+
+ let specs: Vec<_> = futures::future::join_all(tasks)
+ .await
+ .into_iter()
+ .flatten()
+ .collect();
+
+ Ok(specs)
+}
@@ -14,6 +14,7 @@ use gpui::{
};
use jupyter_protocol::JupyterKernelspec;
use language::{Language, LanguageRegistry};
+use log;
use project::{Project, ProjectEntryId, ProjectPath};
use settings::Settings as _;
use ui::{CommonAnimationExt, Tooltip, prelude::*};
@@ -31,7 +32,7 @@ use uuid::Uuid;
use crate::components::{KernelPickerDelegate, KernelSelector};
use crate::kernels::{
Kernel, KernelSession, KernelSpecification, KernelStatus, LocalKernelSpecification,
- NativeRunningKernel, RemoteRunningKernel,
+ NativeRunningKernel, RemoteRunningKernel, SshRunningKernel, WslRunningKernel,
};
use crate::repl_store::ReplStore;
@@ -398,7 +399,9 @@ impl NotebookEditor {
let display_name = match &spec {
KernelSpecification::Jupyter(s) => s.kernelspec.display_name.clone(),
KernelSpecification::PythonEnv(s) => s.kernelspec.display_name.clone(),
- KernelSpecification::Remote(s) => s.kernelspec.display_name.clone(),
+ KernelSpecification::JupyterServer(s) => s.kernelspec.display_name.clone(),
+ KernelSpecification::SshRemote(s) => s.kernelspec.display_name.clone(),
+ KernelSpecification::WslRemote(s) => s.kernelspec.display_name.clone(),
};
let kernelspec_json = serde_json::json!({
@@ -432,9 +435,17 @@ impl NotebookEditor {
window,
cx,
),
- KernelSpecification::Remote(remote_spec) => {
+ KernelSpecification::JupyterServer(remote_spec) => {
RemoteRunningKernel::new(remote_spec, working_directory, view, window, cx)
}
+
+ KernelSpecification::SshRemote(spec) => {
+ let project = self.project.clone();
+ SshRunningKernel::new(spec, working_directory, project, view, window, cx)
+ }
+ KernelSpecification::WslRemote(spec) => {
+ WslRunningKernel::new(spec, entity_id, working_directory, fs, view, window, cx)
+ }
};
let pending_kernel = cx
@@ -450,6 +461,7 @@ impl NotebookEditor {
.ok();
}
Err(err) => {
+ log::error!("Kernel failed to start: {:?}", err);
this.update(cx, |editor, cx| {
editor.kernel = Kernel::ErroredLaunch(err.to_string());
cx.notify();
@@ -1205,25 +1217,27 @@ impl Render for NotebookEditor {
.size_full()
.key_context("NotebookEditor")
.track_focus(&self.focus_handle)
- .on_action(cx.listener(|this, &OpenNotebook, window, cx| {
+ .on_action(cx.listener(|this, _: &OpenNotebook, window, cx| {
this.open_notebook(&OpenNotebook, window, cx)
}))
.on_action(
- cx.listener(|this, &ClearOutputs, window, cx| this.clear_outputs(window, cx)),
+ cx.listener(|this, _: &ClearOutputs, window, cx| this.clear_outputs(window, cx)),
)
.on_action(
- cx.listener(|this, &Run, window, cx| this.run_current_cell(&Run, window, cx)),
+ cx.listener(|this, _: &Run, window, cx| this.run_current_cell(&Run, window, cx)),
)
- .on_action(cx.listener(|this, &RunAll, window, cx| this.run_cells(window, cx)))
- .on_action(cx.listener(|this, &MoveCellUp, window, cx| this.move_cell_up(window, cx)))
+ .on_action(cx.listener(|this, _: &RunAll, window, cx| this.run_cells(window, cx)))
.on_action(
- cx.listener(|this, &MoveCellDown, window, cx| this.move_cell_down(window, cx)),
+ cx.listener(|this, _: &MoveCellUp, window, cx| this.move_cell_up(window, cx)),
)
- .on_action(cx.listener(|this, &AddMarkdownBlock, window, cx| {
+ .on_action(
+ cx.listener(|this, _: &MoveCellDown, window, cx| this.move_cell_down(window, cx)),
+ )
+ .on_action(cx.listener(|this, _: &AddMarkdownBlock, window, cx| {
this.add_markdown_block(window, cx)
}))
.on_action(
- cx.listener(|this, &AddCodeBlock, window, cx| this.add_code_block(window, cx)),
+ cx.listener(|this, _: &AddCodeBlock, window, cx| this.add_code_block(window, cx)),
)
.on_action(cx.listener(|this, _: &MoveUp, window, cx| {
this.select_previous(&menu::SelectPrevious, window, cx);
@@ -1335,7 +1349,10 @@ impl project::ProjectItem for NotebookItem {
.with_context(|| format!("finding the absolute path of {path:?}"))?;
// todo: watch for changes to the file
- let file_content = fs.load(abs_path.as_path()).await?;
+ let buffer = project
+ .update(cx, |project, cx| project.open_buffer(path.clone(), cx))
+ .await?;
+ let file_content = buffer.read_with(cx, |buffer, _| buffer.text());
let notebook = if file_content.trim().is_empty() {
nbformat::v4::Notebook {
@@ -1621,13 +1638,19 @@ impl Item for NotebookEditor {
window: &mut Window,
cx: &mut Context<Self>,
) -> Task<Result<()>> {
- let path = self.notebook_item.read(cx).path.clone();
- let fs = project.read(cx).fs().clone();
+ let project_path = self.notebook_item.read(cx).project_path.clone();
let languages = self.languages.clone();
let notebook_language = self.notebook_language.clone();
cx.spawn_in(window, async move |this, cx| {
- let file_content = fs.load(&path).await?;
+ let buffer = this
+ .update(cx, |this, cx| {
+ this.project
+ .update(cx, |project, cx| project.open_buffer(project_path, cx))
+ })?
+ .await?;
+
+ let file_content = buffer.read_with(cx, |buffer, _| buffer.text());
let mut json: serde_json::Value = serde_json::from_str(&file_content)?;
if let Some(cells) = json.get_mut("cells").and_then(|c| c.as_array_mut()) {
@@ -83,12 +83,15 @@ pub fn init(cx: &mut App) {
cx.defer_in(window, |editor, _window, cx| {
let project = editor.project().cloned();
- let is_local_project = project
+ let is_valid_project = project
.as_ref()
- .map(|project| project.read(cx).is_local())
+ .map(|project| {
+ let p = project.read(cx);
+ !p.is_via_collab()
+ })
.unwrap_or(false);
- if !is_local_project {
+ if !is_valid_project {
return;
}
@@ -2,7 +2,7 @@ use std::future::Future;
use std::sync::Arc;
use anyhow::{Context as _, Result};
-use collections::HashMap;
+use collections::{HashMap, HashSet};
use command_palette_hooks::CommandPaletteFilter;
use gpui::{App, Context, Entity, EntityId, Global, SharedString, Subscription, Task, prelude::*};
use jupyter_websocket_client::RemoteServer;
@@ -13,6 +13,7 @@ use util::rel_path::RelPath;
use crate::kernels::{
Kernel, list_remote_kernelspecs, local_kernel_specifications, python_env_kernel_specifications,
+ wsl_kernel_specifications,
};
use crate::{JupyterSettings, KernelSpecification, Session};
@@ -28,6 +29,7 @@ pub struct ReplStore {
selected_kernel_for_worktree: HashMap<WorktreeId, KernelSpecification>,
kernel_specifications_for_worktree: HashMap<WorktreeId, Vec<KernelSpecification>>,
active_python_toolchain_for_worktree: HashMap<WorktreeId, SharedString>,
+ remote_worktrees: HashSet<WorktreeId>,
_subscriptions: Vec<Subscription>,
}
@@ -66,6 +68,7 @@ impl ReplStore {
kernel_specifications_for_worktree: HashMap::default(),
selected_kernel_for_worktree: HashMap::default(),
active_python_toolchain_for_worktree: HashMap::default(),
+ remote_worktrees: HashSet::default(),
};
this.on_enabled_changed(cx);
this
@@ -88,11 +91,17 @@ impl ReplStore {
&self,
worktree_id: WorktreeId,
) -> impl Iterator<Item = &KernelSpecification> {
+ let global_specs = if self.remote_worktrees.contains(&worktree_id) {
+ None
+ } else {
+ Some(self.kernel_specifications.iter())
+ };
+
self.kernel_specifications_for_worktree
.get(&worktree_id)
.into_iter()
.flat_map(|specs| specs.iter())
- .chain(self.kernel_specifications.iter())
+ .chain(global_specs.into_iter().flatten())
}
pub fn pure_jupyter_kernel_specifications(&self) -> impl Iterator<Item = &KernelSpecification> {
@@ -134,6 +143,7 @@ impl ReplStore {
project: &Entity<Project>,
cx: &mut Context<Self>,
) -> Task<Result<()>> {
+ let is_remote = project.read(cx).is_remote();
let kernel_specifications = python_env_kernel_specifications(project, worktree_id, cx);
let active_toolchain = project.read(cx).active_toolchain(
ProjectPath {
@@ -158,6 +168,11 @@ impl ReplStore {
this.active_python_toolchain_for_worktree
.insert(worktree_id, path);
}
+ if is_remote {
+ this.remote_worktrees.insert(worktree_id);
+ } else {
+ this.remote_worktrees.remove(&worktree_id);
+ }
cx.notify();
})
})
@@ -180,7 +195,12 @@ impl ReplStore {
Some(cx.spawn(async move |_, _| {
list_remote_kernelspecs(remote_server, http_client)
.await
- .map(|specs| specs.into_iter().map(KernelSpecification::Remote).collect())
+ .map(|specs| {
+ specs
+ .into_iter()
+ .map(KernelSpecification::JupyterServer)
+ .collect()
+ })
}))
}
_ => None,
@@ -189,6 +209,7 @@ impl ReplStore {
pub fn refresh_kernelspecs(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
let local_kernel_specifications = local_kernel_specifications(self.fs.clone());
+ let wsl_kernel_specifications = wsl_kernel_specifications(cx.background_executor().clone());
let remote_kernel_specifications = self.get_remote_kernel_specifications(cx);
@@ -199,6 +220,10 @@ impl ReplStore {
.map(KernelSpecification::Jupyter)
.collect::<Vec<_>>();
+ if let Ok(wsl_specs) = wsl_kernel_specifications.await {
+ all_specs.extend(wsl_specs);
+ }
+
if let Some(remote_task) = remote_kernel_specifications
&& let Ok(remote_specs) = remote_task.await
{
@@ -237,6 +262,10 @@ impl ReplStore {
self.active_python_toolchain_for_worktree.get(&worktree_id)
}
+ pub fn selected_kernel(&self, worktree_id: WorktreeId) -> Option<&KernelSpecification> {
+ self.selected_kernel_for_worktree.get(&worktree_id)
+ }
+
pub fn is_recommended_kernel(
&self,
worktree_id: WorktreeId,
@@ -1,9 +1,11 @@
use crate::components::KernelListItem;
-use crate::kernels::RemoteRunningKernel;
use crate::setup_editor_session_actions;
use crate::{
KernelStatus,
- kernels::{Kernel, KernelSession, KernelSpecification, NativeRunningKernel},
+ kernels::{
+ Kernel, KernelSession, KernelSpecification, NativeRunningKernel, RemoteRunningKernel,
+ SshRunningKernel, WslRunningKernel,
+ },
outputs::{
ExecutionStatus, ExecutionView, ExecutionViewFinishedEmpty, ExecutionViewFinishedSmall,
InputReplyEvent,
@@ -34,7 +36,7 @@ use language::Point;
use project::Fs;
use runtimelib::{
ExecuteRequest, ExecutionState, InputReply, InterruptRequest, JupyterMessage,
- JupyterMessageContent, ReplyStatus, ShutdownRequest,
+ JupyterMessageContent, KernelInfoRequest, ReplyStatus, ShutdownRequest,
};
use settings::Settings as _;
use std::{env::temp_dir, ops::Range, sync::Arc, time::Duration};
@@ -266,11 +268,34 @@ impl Session {
fn start_kernel(&mut self, window: &mut Window, cx: &mut Context<Self>) {
let kernel_language = self.kernel_specification.language();
let entity_id = self.editor.entity_id();
- let working_directory = self
- .editor
- .upgrade()
- .and_then(|editor| editor.read(cx).working_directory(cx))
- .unwrap_or_else(temp_dir);
+
+ // For WSL Remote kernels, use project root instead of potentially temporary working directory
+ // which causes .venv/bin/python checks to fail
+ let is_remote_execution = matches!(
+ self.kernel_specification,
+ crate::KernelSpecification::WslRemote(_) | crate::KernelSpecification::SshRemote(_)
+ );
+
+ let working_directory = if is_remote_execution {
+ // For WSL Remote kernels, use project root instead of potentially temporary working directory
+ // which causes .venv/bin/python checks to fail
+ self.editor
+ .upgrade()
+ .and_then(|editor| editor.read(cx).project().cloned())
+ .and_then(|project| {
+ project
+ .read(cx)
+ .worktrees(cx)
+ .next()
+ .map(|worktree| worktree.read(cx).abs_path().to_path_buf())
+ })
+ .unwrap_or_else(temp_dir)
+ } else {
+ self.editor
+ .upgrade()
+ .and_then(|editor| editor.read(cx).working_directory(cx))
+ .unwrap_or_else(temp_dir)
+ };
telemetry::event!(
"Kernel Status Changed",
@@ -300,9 +325,38 @@ impl Session {
window,
cx,
),
- KernelSpecification::Remote(remote_kernel_specification) => RemoteRunningKernel::new(
- remote_kernel_specification,
+ KernelSpecification::JupyterServer(remote_kernel_specification) => {
+ RemoteRunningKernel::new(
+ remote_kernel_specification,
+ working_directory,
+ session_view,
+ window,
+ cx,
+ )
+ }
+ KernelSpecification::SshRemote(spec) => {
+ let project = self
+ .editor
+ .upgrade()
+ .and_then(|editor| editor.read(cx).project().cloned());
+ if let Some(project) = project {
+ SshRunningKernel::new(
+ spec,
+ working_directory,
+ project,
+ session_view,
+ window,
+ cx,
+ )
+ } else {
+ Task::ready(Err(anyhow::anyhow!("No project associated with editor")))
+ }
+ }
+ KernelSpecification::WslRemote(spec) => WslRunningKernel::new(
+ spec,
+ entity_id,
working_directory,
+ self.fs.clone(),
session_view,
window,
cx,
@@ -311,12 +365,15 @@ impl Session {
let pending_kernel = cx
.spawn(async move |this, cx| {
- let kernel = kernel.await;
+ let kernel: anyhow::Result<Box<dyn crate::kernels::RunningKernel>> = kernel.await;
match kernel {
Ok(kernel) => {
this.update(cx, |session, cx| {
session.kernel(Kernel::RunningKernel(kernel), cx);
+ let request =
+ JupyterMessageContent::KernelInfoRequest(KernelInfoRequest {});
+ session.send(request.into(), cx).log_err();
})
.ok();
}
@@ -38,13 +38,16 @@ impl QuickActionBar {
let editor = self.active_editor()?;
- let is_local_project = editor
+ let is_valid_project = editor
.read(cx)
.workspace()
- .map(|workspace| workspace.read(cx).project().read(cx).is_local())
+ .map(|workspace| {
+ let project = workspace.read(cx).project().read(cx);
+ !project.is_via_collab()
+ })
.unwrap_or(false);
- if !is_local_project {
+ if !is_valid_project {
return None;
}
@@ -15,6 +15,9 @@
(zed-editor.overrideAttrs (attrs: {
passthru.env = attrs.env;
})).env; # exfil `env`; it's not in drvAttrs
+
+ # Musl cross-compiler for building remote_server
+ muslCross = pkgs.pkgsCross.musl64;
in
{
devShells.default = (pkgs.mkShell.override { inherit (zed-editor) stdenv; }) {
@@ -54,6 +57,8 @@
};
PROTOC = "${pkgs.protobuf}/bin/protoc";
ZED_ZSTD_MUSL_LIB = "${pkgs.pkgsCross.musl64.pkgsStatic.zstd.out}/lib";
+ # For aws-lc-sys musl cross-compilation
+ CC_x86_64_unknown_linux_musl = "${muslCross.stdenv.cc}/bin/x86_64-unknown-linux-musl-gcc";
};
};
};