1use crate::HeadlessProject;
2use crate::headless_project::HeadlessAppState;
3use anyhow::{Context as _, Result, anyhow};
4use client::ProxySettings;
5use collections::HashMap;
6use project::trusted_worktrees;
7use util::ResultExt;
8
9use extension::ExtensionHostProxy;
10use fs::{Fs, RealFs};
11use futures::channel::{mpsc, oneshot};
12use futures::{AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, select, select_biased};
13use git::GitHostingProviderRegistry;
14use gpui::{App, AppContext as _, Context, Entity, UpdateGlobal as _};
15use gpui_tokio::Tokio;
16use http_client::{Url, read_proxy_from_env};
17use language::LanguageRegistry;
18use node_runtime::{NodeBinaryOptions, NodeRuntime};
19use paths::logs_dir;
20use project::project_settings::ProjectSettings;
21use util::command::new_smol_command;
22
23use proto::CrashReport;
24use release_channel::{AppCommitSha, AppVersion, RELEASE_CHANNEL, ReleaseChannel};
25use remote::RemoteClient;
26use remote::{
27 json_log::LogRecord,
28 protocol::{read_message, write_message},
29 proxy::ProxyLaunchError,
30};
31use reqwest_client::ReqwestClient;
32use rpc::proto::{self, Envelope, REMOTE_SERVER_PROJECT_ID};
33use rpc::{AnyProtoClient, TypedEnvelope};
34use settings::{Settings, SettingsStore, watch_config_file};
35
36use smol::channel::{Receiver, Sender};
37use smol::io::AsyncReadExt;
38use smol::{net::unix::UnixListener, stream::StreamExt as _};
39use std::{
40 env,
41 ffi::OsStr,
42 fs::File,
43 io::Write,
44 mem,
45 ops::ControlFlow,
46 path::{Path, PathBuf},
47 process::ExitStatus,
48 str::FromStr,
49 sync::{Arc, LazyLock},
50};
51use thiserror::Error;
52
53pub static VERSION: LazyLock<String> = LazyLock::new(|| match *RELEASE_CHANNEL {
54 ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION").to_owned(),
55 ReleaseChannel::Nightly | ReleaseChannel::Dev => {
56 let commit_sha = option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha");
57 let build_identifier = option_env!("ZED_BUILD_ID");
58 if let Some(build_id) = build_identifier {
59 format!("{build_id}+{commit_sha}")
60 } else {
61 commit_sha.to_owned()
62 }
63 }
64});
65
66fn init_logging_proxy() {
67 env_logger::builder()
68 .format(|buf, record| {
69 let mut log_record = LogRecord::new(record);
70 log_record.message =
71 std::borrow::Cow::Owned(format!("(remote proxy) {}", log_record.message));
72 serde_json::to_writer(&mut *buf, &log_record)?;
73 buf.write_all(b"\n")?;
74 Ok(())
75 })
76 .init();
77}
78
79fn init_logging_server(log_file_path: &Path) -> Result<Receiver<Vec<u8>>> {
80 struct MultiWrite {
81 file: File,
82 channel: Sender<Vec<u8>>,
83 buffer: Vec<u8>,
84 }
85
86 impl Write for MultiWrite {
87 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
88 let written = self.file.write(buf)?;
89 self.buffer.extend_from_slice(&buf[..written]);
90 Ok(written)
91 }
92
93 fn flush(&mut self) -> std::io::Result<()> {
94 self.channel
95 .send_blocking(self.buffer.clone())
96 .map_err(std::io::Error::other)?;
97 self.buffer.clear();
98 self.file.flush()
99 }
100 }
101
102 let log_file = std::fs::OpenOptions::new()
103 .create(true)
104 .append(true)
105 .open(log_file_path)
106 .context("Failed to open log file in append mode")?;
107
108 let (tx, rx) = smol::channel::unbounded();
109
110 let target = Box::new(MultiWrite {
111 file: log_file,
112 channel: tx,
113 buffer: Vec::new(),
114 });
115
116 let old_hook = std::panic::take_hook();
117 std::panic::set_hook(Box::new(move |info| {
118 let backtrace = std::backtrace::Backtrace::force_capture();
119 let message = info.payload_as_str().unwrap_or("Box<Any>").to_owned();
120 let location = info
121 .location()
122 .map_or_else(|| "<unknown>".to_owned(), |location| location.to_string());
123 let current_thread = std::thread::current();
124 let thread_name = current_thread.name().unwrap_or("<unnamed>");
125
126 let msg = format!("thread '{thread_name}' panicked at {location}:\n{message}\n{backtrace}");
127 // NOTE: This log never reaches the client, as the communication is handled on a main thread task
128 // which will never run once we panic.
129 log::error!("{msg}");
130 old_hook(info);
131 }));
132 env_logger::Builder::new()
133 .filter_level(log::LevelFilter::Info)
134 .parse_default_env()
135 .target(env_logger::Target::Pipe(target))
136 .format(|buf, record| {
137 let mut log_record = LogRecord::new(record);
138 log_record.message =
139 std::borrow::Cow::Owned(format!("(remote server) {}", log_record.message));
140 serde_json::to_writer(&mut *buf, &log_record)?;
141 buf.write_all(b"\n")?;
142 Ok(())
143 })
144 .init();
145
146 Ok(rx)
147}
148
149fn handle_crash_files_requests(project: &Entity<HeadlessProject>, client: &AnyProtoClient) {
150 client.add_request_handler(
151 project.downgrade(),
152 |_, _: TypedEnvelope<proto::GetCrashFiles>, _cx| async move {
153 let mut legacy_panics = Vec::new();
154 let mut crashes = Vec::new();
155 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
156 while let Some(child) = children.next().await {
157 let child = child?;
158 let child_path = child.path();
159
160 let extension = child_path.extension();
161 if extension == Some(OsStr::new("panic")) {
162 let filename = if let Some(filename) = child_path.file_name() {
163 filename.to_string_lossy()
164 } else {
165 continue;
166 };
167
168 if !filename.starts_with("zed") {
169 continue;
170 }
171
172 let file_contents = smol::fs::read_to_string(&child_path)
173 .await
174 .context("error reading panic file")?;
175
176 legacy_panics.push(file_contents);
177 smol::fs::remove_file(&child_path)
178 .await
179 .context("error removing panic")
180 .log_err();
181 } else if extension == Some(OsStr::new("dmp")) {
182 let mut json_path = child_path.clone();
183 json_path.set_extension("json");
184 if let Ok(json_content) = smol::fs::read_to_string(&json_path).await {
185 crashes.push(CrashReport {
186 metadata: json_content,
187 minidump_contents: smol::fs::read(&child_path).await?,
188 });
189 smol::fs::remove_file(&child_path).await.log_err();
190 smol::fs::remove_file(&json_path).await.log_err();
191 } else {
192 log::error!("Couldn't find json metadata for crash: {child_path:?}");
193 }
194 }
195 }
196
197 anyhow::Ok(proto::GetCrashFilesResponse { crashes })
198 },
199 );
200}
201
202struct ServerListeners {
203 stdin: UnixListener,
204 stdout: UnixListener,
205 stderr: UnixListener,
206}
207
208impl ServerListeners {
209 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
210 Ok(Self {
211 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
212 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
213 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
214 })
215 }
216}
217
218fn start_server(
219 listeners: ServerListeners,
220 log_rx: Receiver<Vec<u8>>,
221 cx: &mut App,
222 is_wsl_interop: bool,
223) -> AnyProtoClient {
224 // This is the server idle timeout. If no connection comes in this timeout, the server will shut down.
225 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(6);
226
227 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
228 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
229 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
230
231 cx.on_app_quit(move |_| {
232 let mut app_quit_tx = app_quit_tx.clone();
233 async move {
234 log::info!("app quitting. sending signal to server main loop");
235 app_quit_tx.send(()).await.ok();
236 }
237 })
238 .detach();
239
240 cx.spawn(async move |cx| {
241 let mut stdin_incoming = listeners.stdin.incoming();
242 let mut stdout_incoming = listeners.stdout.incoming();
243 let mut stderr_incoming = listeners.stderr.incoming();
244
245 loop {
246 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
247
248 log::info!("accepting new connections");
249 let result = select! {
250 streams = streams.fuse() => {
251 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
252 log::error!("failed to accept new connections");
253 break;
254 };
255 log::info!("accepted new connections");
256 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
257 }
258 _ = futures::FutureExt::fuse(cx.background_executor().timer(IDLE_TIMEOUT)) => {
259 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
260 cx.update(|cx| {
261 // TODO: This is a hack, because in a headless project, shutdown isn't executed
262 // when calling quit, but it should be.
263 cx.shutdown();
264 cx.quit();
265 });
266 break;
267 }
268 _ = app_quit_rx.next().fuse() => {
269 log::info!("app quit requested");
270 break;
271 }
272 };
273
274 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
275 break;
276 };
277
278 let mut input_buffer = Vec::new();
279 let mut output_buffer = Vec::new();
280
281 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
282 cx.background_spawn(async move {
283 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
284 if (stdin_msg_tx.send(msg).await).is_err() {
285 break;
286 }
287 }
288 }).detach();
289
290 loop {
291
292 select_biased! {
293 _ = app_quit_rx.next().fuse() => {
294 return anyhow::Ok(());
295 }
296
297 stdin_message = stdin_msg_rx.next().fuse() => {
298 let Some(message) = stdin_message else {
299 log::warn!("error reading message on stdin, dropping connection.");
300 break;
301 };
302 if let Err(error) = incoming_tx.unbounded_send(message) {
303 log::error!("failed to send message to application: {error:?}. exiting.");
304 return Err(anyhow!(error));
305 }
306 }
307
308 outgoing_message = outgoing_rx.next().fuse() => {
309 let Some(message) = outgoing_message else {
310 log::error!("stdout handler, no message");
311 break;
312 };
313
314 if let Err(error) =
315 write_message(&mut stdout_stream, &mut output_buffer, message).await
316 {
317 log::error!("failed to write stdout message: {:?}", error);
318 break;
319 }
320 if let Err(error) = stdout_stream.flush().await {
321 log::error!("failed to flush stdout message: {:?}", error);
322 break;
323 }
324 }
325
326 log_message = log_rx.recv().fuse() => {
327 if let Ok(log_message) = log_message {
328 if let Err(error) = stderr_stream.write_all(&log_message).await {
329 log::error!("failed to write log message to stderr: {:?}", error);
330 break;
331 }
332 if let Err(error) = stderr_stream.flush().await {
333 log::error!("failed to flush stderr stream: {:?}", error);
334 break;
335 }
336 }
337 }
338 }
339 }
340 }
341 anyhow::Ok(())
342 })
343 .detach();
344
345 RemoteClient::proto_client_from_channels(incoming_rx, outgoing_tx, cx, "server", is_wsl_interop)
346}
347
348fn init_paths() -> anyhow::Result<()> {
349 for path in [
350 paths::config_dir(),
351 paths::extensions_dir(),
352 paths::languages_dir(),
353 paths::logs_dir(),
354 paths::temp_dir(),
355 paths::hang_traces_dir(),
356 paths::remote_extensions_dir(),
357 paths::remote_extensions_uploads_dir(),
358 ]
359 .iter()
360 {
361 std::fs::create_dir_all(path).with_context(|| format!("creating directory {path:?}"))?;
362 }
363 Ok(())
364}
365
366pub fn execute_run(
367 log_file: PathBuf,
368 pid_file: PathBuf,
369 stdin_socket: PathBuf,
370 stdout_socket: PathBuf,
371 stderr_socket: PathBuf,
372) -> Result<()> {
373 init_paths()?;
374
375 match daemonize()? {
376 ControlFlow::Break(_) => return Ok(()),
377 ControlFlow::Continue(_) => {}
378 }
379
380 let app = gpui::Application::headless();
381 let pid = std::process::id();
382 let id = pid.to_string();
383 app.background_executor()
384 .spawn(crashes::init(crashes::InitCrashHandler {
385 session_id: id,
386 zed_version: VERSION.to_owned(),
387 binary: "zed-remote-server".to_string(),
388 release_channel: release_channel::RELEASE_CHANNEL_NAME.clone(),
389 commit_sha: option_env!("ZED_COMMIT_SHA").unwrap_or("no_sha").to_owned(),
390 }))
391 .detach();
392 let log_rx = init_logging_server(&log_file)?;
393 log::info!(
394 "starting up with PID {}:\npid_file: {:?}, log_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
395 pid,
396 pid_file,
397 log_file,
398 stdin_socket,
399 stdout_socket,
400 stderr_socket
401 );
402
403 write_pid_file(&pid_file, pid)
404 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
405
406 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
407
408 rayon::ThreadPoolBuilder::new()
409 .num_threads(std::thread::available_parallelism().map_or(1, |n| n.get().div_ceil(2)))
410 .stack_size(10 * 1024 * 1024)
411 .thread_name(|ix| format!("RayonWorker{}", ix))
412 .build_global()
413 .unwrap();
414
415 let (shell_env_loaded_tx, shell_env_loaded_rx) = oneshot::channel();
416 app.background_executor()
417 .spawn(async {
418 util::load_login_shell_environment().await.log_err();
419 shell_env_loaded_tx.send(()).ok();
420 })
421 .detach();
422
423 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
424 let run = move |cx: &mut _| {
425 settings::init(cx);
426 let app_commit_sha = option_env!("ZED_COMMIT_SHA").map(|s| AppCommitSha::new(s.to_owned()));
427 let app_version = AppVersion::load(
428 env!("ZED_PKG_VERSION"),
429 option_env!("ZED_BUILD_ID"),
430 app_commit_sha,
431 );
432 release_channel::init(app_version, cx);
433 gpui_tokio::init(cx);
434
435 HeadlessProject::init(cx);
436
437 let is_wsl_interop = if cfg!(target_os = "linux") {
438 // See: https://learn.microsoft.com/en-us/windows/wsl/filesystems#disable-interoperability
439 matches!(std::fs::read_to_string("/proc/sys/fs/binfmt_misc/WSLInterop"), Ok(s) if s.contains("enabled"))
440 } else {
441 false
442 };
443
444 log::info!("gpui app started, initializing server");
445 let session = start_server(listeners, log_rx, cx, is_wsl_interop);
446 trusted_worktrees::init(HashMap::default(), cx);
447
448 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
449 git_hosting_providers::init(cx);
450 dap_adapters::init(cx);
451
452 extension::init(cx);
453 let extension_host_proxy = ExtensionHostProxy::global(cx);
454
455 json_schema_store::init(cx);
456
457 let project = cx.new(|cx| {
458 let fs = Arc::new(RealFs::new(None, cx.background_executor().clone()));
459 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
460
461 let proxy_url = read_proxy_settings(cx);
462
463 let http_client = {
464 let _guard = Tokio::handle(cx).enter();
465 Arc::new(
466 ReqwestClient::proxy_and_user_agent(
467 proxy_url,
468 &format!(
469 "Zed-Server/{} ({}; {})",
470 env!("CARGO_PKG_VERSION"),
471 std::env::consts::OS,
472 std::env::consts::ARCH
473 ),
474 )
475 .expect("Could not start HTTP client"),
476 )
477 };
478
479 let node_runtime = NodeRuntime::new(
480 http_client.clone(),
481 Some(shell_env_loaded_rx),
482 node_settings_rx,
483 );
484
485 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
486 languages.set_language_server_download_dir(paths::languages_dir().clone());
487 let languages = Arc::new(languages);
488
489 HeadlessProject::new(
490 HeadlessAppState {
491 session: session.clone(),
492 fs,
493 http_client,
494 node_runtime,
495 languages,
496 extension_host_proxy,
497 },
498 true,
499 cx,
500 )
501 });
502
503 handle_crash_files_requests(&project, &session);
504
505 cx.background_spawn(async move { cleanup_old_binaries() })
506 .detach();
507
508 mem::forget(project);
509 };
510 // We do not reuse any of the state after unwinding, so we don't run risk of observing broken invariants.
511 let app = std::panic::AssertUnwindSafe(app);
512 let run = std::panic::AssertUnwindSafe(run);
513 let res = std::panic::catch_unwind(move || { app }.0.run({ run }.0));
514 if let Err(_) = res {
515 log::error!("app panicked. quitting.");
516 Err(anyhow::anyhow!("panicked"))
517 } else {
518 log::info!("gpui app is shut down. quitting.");
519 Ok(())
520 }
521}
522
523#[derive(Debug, Error)]
524pub enum ServerPathError {
525 #[error("Failed to create server_dir `{path}`")]
526 CreateServerDir {
527 #[source]
528 source: std::io::Error,
529 path: PathBuf,
530 },
531 #[error("Failed to create logs_dir `{path}`")]
532 CreateLogsDir {
533 #[source]
534 source: std::io::Error,
535 path: PathBuf,
536 },
537}
538
539#[derive(Clone, Debug)]
540struct ServerPaths {
541 log_file: PathBuf,
542 pid_file: PathBuf,
543 stdin_socket: PathBuf,
544 stdout_socket: PathBuf,
545 stderr_socket: PathBuf,
546}
547
548impl ServerPaths {
549 fn new(identifier: &str) -> Result<Self, ServerPathError> {
550 let server_dir = paths::remote_server_state_dir().join(identifier);
551 std::fs::create_dir_all(&server_dir).map_err(|source| {
552 ServerPathError::CreateServerDir {
553 source,
554 path: server_dir.clone(),
555 }
556 })?;
557 let log_dir = logs_dir();
558 std::fs::create_dir_all(log_dir).map_err(|source| ServerPathError::CreateLogsDir {
559 source,
560 path: log_dir.clone(),
561 })?;
562
563 let pid_file = server_dir.join("server.pid");
564 let stdin_socket = server_dir.join("stdin.sock");
565 let stdout_socket = server_dir.join("stdout.sock");
566 let stderr_socket = server_dir.join("stderr.sock");
567 let log_file = logs_dir().join(format!("server-{}.log", identifier));
568
569 Ok(Self {
570 pid_file,
571 stdin_socket,
572 stdout_socket,
573 stderr_socket,
574 log_file,
575 })
576 }
577}
578
579#[derive(Debug, Error)]
580pub enum ExecuteProxyError {
581 #[error("Failed to init server paths: {0:#}")]
582 ServerPath(#[from] ServerPathError),
583
584 #[error(transparent)]
585 ServerNotRunning(#[from] ProxyLaunchError),
586
587 #[error("Failed to check PidFile '{path}': {source:#}")]
588 CheckPidFile {
589 #[source]
590 source: CheckPidError,
591 path: PathBuf,
592 },
593
594 #[error("Failed to kill existing server with pid '{pid}': {source:#}")]
595 KillRunningServer {
596 #[source]
597 source: std::io::Error,
598 pid: u32,
599 },
600
601 #[error("failed to spawn server: {0:#}")]
602 SpawnServer(#[source] SpawnServerError),
603
604 #[error("stdin_task failed: {0:#}")]
605 StdinTask(#[source] anyhow::Error),
606 #[error("stdout_task failed: {0:#}")]
607 StdoutTask(#[source] anyhow::Error),
608 #[error("stderr_task failed: {0:#}")]
609 StderrTask(#[source] anyhow::Error),
610}
611
612impl ExecuteProxyError {
613 pub fn to_exit_code(&self) -> i32 {
614 match self {
615 ExecuteProxyError::ServerNotRunning(proxy_launch_error) => {
616 proxy_launch_error.to_exit_code()
617 }
618 _ => 1,
619 }
620 }
621}
622
623pub(crate) fn execute_proxy(
624 identifier: String,
625 is_reconnecting: bool,
626) -> Result<(), ExecuteProxyError> {
627 init_logging_proxy();
628
629 let server_paths = ServerPaths::new(&identifier)?;
630
631 let id = std::process::id().to_string();
632 smol::spawn(crashes::init(crashes::InitCrashHandler {
633 session_id: id,
634 zed_version: VERSION.to_owned(),
635 binary: "zed-remote-server".to_string(),
636 release_channel: release_channel::RELEASE_CHANNEL_NAME.clone(),
637 commit_sha: option_env!("ZED_COMMIT_SHA").unwrap_or("no_sha").to_owned(),
638 }))
639 .detach();
640
641 log::info!("starting proxy process. PID: {}", std::process::id());
642 let server_pid = smol::block_on(async {
643 let server_pid = check_pid_file(&server_paths.pid_file)
644 .await
645 .map_err(|source| ExecuteProxyError::CheckPidFile {
646 source,
647 path: server_paths.pid_file.clone(),
648 })?;
649 if is_reconnecting {
650 match server_pid {
651 None => {
652 log::error!("attempted to reconnect, but no server running");
653 Err(ExecuteProxyError::ServerNotRunning(
654 ProxyLaunchError::ServerNotRunning,
655 ))
656 }
657 Some(server_pid) => Ok(server_pid),
658 }
659 } else {
660 if let Some(pid) = server_pid {
661 log::info!(
662 "proxy found server already running with PID {}. Killing process and cleaning up files...",
663 pid
664 );
665 kill_running_server(pid, &server_paths).await?;
666 }
667 spawn_server(&server_paths)
668 .await
669 .map_err(ExecuteProxyError::SpawnServer)?;
670 std::fs::read_to_string(&server_paths.pid_file)
671 .and_then(|contents| {
672 contents.parse::<u32>().map_err(|_| {
673 std::io::Error::new(
674 std::io::ErrorKind::InvalidData,
675 "Invalid PID file contents",
676 )
677 })
678 })
679 .map_err(SpawnServerError::ProcessStatus)
680 .map_err(ExecuteProxyError::SpawnServer)
681 }
682 })?;
683
684 let stdin_task = smol::spawn(async move {
685 let stdin = smol::Unblock::new(std::io::stdin());
686 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket)
687 .await
688 .with_context(|| {
689 format!(
690 "Failed to connect to stdin socket {}",
691 server_paths.stdin_socket.display()
692 )
693 })?;
694 handle_io(stdin, stream, "stdin").await
695 });
696
697 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
698 let stdout = smol::Unblock::new(std::io::stdout());
699 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket)
700 .await
701 .with_context(|| {
702 format!(
703 "Failed to connect to stdout socket {}",
704 server_paths.stdout_socket.display()
705 )
706 })?;
707 handle_io(stream, stdout, "stdout").await
708 });
709
710 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
711 let mut stderr = smol::Unblock::new(std::io::stderr());
712 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket)
713 .await
714 .with_context(|| {
715 format!(
716 "Failed to connect to stderr socket {}",
717 server_paths.stderr_socket.display()
718 )
719 })?;
720 let mut stderr_buffer = vec![0; 2048];
721 loop {
722 match stream
723 .read(&mut stderr_buffer)
724 .await
725 .context("reading stderr")?
726 {
727 0 => {
728 let error =
729 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
730 Err(anyhow!(error))?;
731 }
732 n => {
733 stderr.write_all(&stderr_buffer[..n]).await?;
734 stderr.flush().await?;
735 }
736 }
737 }
738 });
739
740 if let Err(forwarding_result) = smol::block_on(async move {
741 futures::select! {
742 result = stdin_task.fuse() => result.map_err(ExecuteProxyError::StdinTask),
743 result = stdout_task.fuse() => result.map_err(ExecuteProxyError::StdoutTask),
744 result = stderr_task.fuse() => result.map_err(ExecuteProxyError::StderrTask),
745 }
746 }) {
747 log::error!("encountered error while forwarding messages: {forwarding_result:#}",);
748 if !matches!(smol::block_on(check_server_running(server_pid)), Ok(true)) {
749 log::error!("server exited unexpectedly");
750 return Err(ExecuteProxyError::ServerNotRunning(
751 ProxyLaunchError::ServerNotRunning,
752 ));
753 }
754 return Err(forwarding_result);
755 }
756
757 Ok(())
758}
759
760async fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<(), ExecuteProxyError> {
761 log::info!("killing existing server with PID {}", pid);
762 new_smol_command("kill")
763 .arg(pid.to_string())
764 .output()
765 .await
766 .map_err(|source| ExecuteProxyError::KillRunningServer { source, pid })?;
767
768 for file in [
769 &paths.pid_file,
770 &paths.stdin_socket,
771 &paths.stdout_socket,
772 &paths.stderr_socket,
773 ] {
774 log::debug!("cleaning up file {:?} before starting new server", file);
775 std::fs::remove_file(file).ok();
776 }
777 Ok(())
778}
779
780#[derive(Debug, Error)]
781pub enum SpawnServerError {
782 #[error("failed to remove stdin socket")]
783 RemoveStdinSocket(#[source] std::io::Error),
784
785 #[error("failed to remove stdout socket")]
786 RemoveStdoutSocket(#[source] std::io::Error),
787
788 #[error("failed to remove stderr socket")]
789 RemoveStderrSocket(#[source] std::io::Error),
790
791 #[error("failed to get current_exe")]
792 CurrentExe(#[source] std::io::Error),
793
794 #[error("failed to launch server process")]
795 ProcessStatus(#[source] std::io::Error),
796
797 #[error("failed to launch and detach server process: {status}\n{paths}")]
798 LaunchStatus { status: ExitStatus, paths: String },
799
800 #[error("failed to wait for server to be ready to accept connections")]
801 Timeout,
802}
803
804async fn spawn_server(paths: &ServerPaths) -> Result<(), SpawnServerError> {
805 log::info!("spawning server process",);
806 if paths.stdin_socket.exists() {
807 std::fs::remove_file(&paths.stdin_socket).map_err(SpawnServerError::RemoveStdinSocket)?;
808 }
809 if paths.stdout_socket.exists() {
810 std::fs::remove_file(&paths.stdout_socket).map_err(SpawnServerError::RemoveStdoutSocket)?;
811 }
812 if paths.stderr_socket.exists() {
813 std::fs::remove_file(&paths.stderr_socket).map_err(SpawnServerError::RemoveStderrSocket)?;
814 }
815
816 let binary_name = std::env::current_exe().map_err(SpawnServerError::CurrentExe)?;
817 let mut server_process = new_smol_command(binary_name);
818 server_process
819 .arg("run")
820 .arg("--log-file")
821 .arg(&paths.log_file)
822 .arg("--pid-file")
823 .arg(&paths.pid_file)
824 .arg("--stdin-socket")
825 .arg(&paths.stdin_socket)
826 .arg("--stdout-socket")
827 .arg(&paths.stdout_socket)
828 .arg("--stderr-socket")
829 .arg(&paths.stderr_socket);
830
831 let status = server_process
832 .status()
833 .await
834 .map_err(SpawnServerError::ProcessStatus)?;
835
836 if !status.success() {
837 return Err(SpawnServerError::LaunchStatus {
838 status,
839 paths: format!(
840 "log file: {:?}, pid file: {:?}",
841 paths.log_file, paths.pid_file,
842 ),
843 });
844 }
845
846 let mut total_time_waited = std::time::Duration::from_secs(0);
847 let wait_duration = std::time::Duration::from_millis(20);
848 while !paths.stdout_socket.exists()
849 || !paths.stdin_socket.exists()
850 || !paths.stderr_socket.exists()
851 {
852 log::debug!("waiting for server to be ready to accept connections...");
853 std::thread::sleep(wait_duration);
854 total_time_waited += wait_duration;
855 if total_time_waited > std::time::Duration::from_secs(10) {
856 return Err(SpawnServerError::Timeout);
857 }
858 }
859
860 log::info!(
861 "server ready to accept connections. total time waited: {:?}",
862 total_time_waited
863 );
864
865 Ok(())
866}
867
868#[derive(Debug, Error)]
869#[error("Failed to remove PID file for missing process (pid `{pid}`")]
870pub struct CheckPidError {
871 #[source]
872 source: std::io::Error,
873 pid: u32,
874}
875async fn check_server_running(pid: u32) -> std::io::Result<bool> {
876 new_smol_command("kill")
877 .arg("-0")
878 .arg(pid.to_string())
879 .output()
880 .await
881 .map(|output| output.status.success())
882}
883
884async fn check_pid_file(path: &Path) -> Result<Option<u32>, CheckPidError> {
885 let Some(pid) = std::fs::read_to_string(&path)
886 .ok()
887 .and_then(|contents| contents.parse::<u32>().ok())
888 else {
889 return Ok(None);
890 };
891
892 log::debug!("Checking if process with PID {} exists...", pid);
893 match check_server_running(pid).await {
894 Ok(true) => {
895 log::debug!(
896 "Process with PID {} exists. NOT spawning new server, but attaching to existing one.",
897 pid
898 );
899 Ok(Some(pid))
900 }
901 _ => {
902 log::debug!(
903 "Found PID file, but process with that PID does not exist. Removing PID file."
904 );
905 std::fs::remove_file(&path).map_err(|source| CheckPidError { source, pid })?;
906 Ok(None)
907 }
908 }
909}
910
911fn write_pid_file(path: &Path, pid: u32) -> Result<()> {
912 if path.exists() {
913 std::fs::remove_file(path)?;
914 }
915 log::debug!("writing PID {} to file {:?}", pid, path);
916 std::fs::write(path, pid.to_string()).context("Failed to write PID file")
917}
918
919async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
920where
921 R: AsyncRead + Unpin,
922 W: AsyncWrite + Unpin,
923{
924 use remote::protocol::{read_message_raw, write_size_prefixed_buffer};
925
926 let mut buffer = Vec::new();
927 loop {
928 read_message_raw(&mut reader, &mut buffer)
929 .await
930 .with_context(|| format!("failed to read message from {}", socket_name))?;
931 write_size_prefixed_buffer(&mut writer, &mut buffer)
932 .await
933 .with_context(|| format!("failed to write message to {}", socket_name))?;
934 writer.flush().await?;
935 buffer.clear();
936 }
937}
938
939fn initialize_settings(
940 session: AnyProtoClient,
941 fs: Arc<dyn Fs>,
942 cx: &mut App,
943) -> watch::Receiver<Option<NodeBinaryOptions>> {
944 let (user_settings_file_rx, watcher_task) =
945 watch_config_file(cx.background_executor(), fs, paths::settings_file().clone());
946
947 handle_settings_file_changes(user_settings_file_rx, watcher_task, cx, {
948 move |err, _cx| {
949 if let Some(e) = err {
950 log::info!("Server settings failed to change: {}", e);
951
952 session
953 .send(proto::Toast {
954 project_id: REMOTE_SERVER_PROJECT_ID,
955 notification_id: "server-settings-failed".to_string(),
956 message: format!(
957 "Error in settings on remote host {:?}: {}",
958 paths::settings_file(),
959 e
960 ),
961 })
962 .log_err();
963 } else {
964 session
965 .send(proto::HideToast {
966 project_id: REMOTE_SERVER_PROJECT_ID,
967 notification_id: "server-settings-failed".to_string(),
968 })
969 .log_err();
970 }
971 }
972 });
973
974 let (mut tx, rx) = watch::channel(None);
975 let mut node_settings = None;
976 cx.observe_global::<SettingsStore>(move |cx| {
977 let new_node_settings = &ProjectSettings::get_global(cx).node;
978 if Some(new_node_settings) != node_settings.as_ref() {
979 log::info!("Got new node settings: {new_node_settings:?}");
980 let options = NodeBinaryOptions {
981 allow_path_lookup: !new_node_settings.ignore_system_version,
982 // TODO: Implement this setting
983 allow_binary_download: true,
984 use_paths: new_node_settings.path.as_ref().map(|node_path| {
985 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
986 let npm_path = new_node_settings
987 .npm_path
988 .as_ref()
989 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
990 (
991 node_path.clone(),
992 npm_path.unwrap_or_else(|| {
993 let base_path = PathBuf::new();
994 node_path.parent().unwrap_or(&base_path).join("npm")
995 }),
996 )
997 }),
998 };
999 node_settings = Some(new_node_settings.clone());
1000 tx.send(Some(options)).ok();
1001 }
1002 })
1003 .detach();
1004
1005 rx
1006}
1007
1008pub fn handle_settings_file_changes(
1009 mut server_settings_file: mpsc::UnboundedReceiver<String>,
1010 watcher_task: gpui::Task<()>,
1011 cx: &mut App,
1012 settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
1013) {
1014 let server_settings_content = cx
1015 .foreground_executor()
1016 .block_on(server_settings_file.next())
1017 .unwrap();
1018 SettingsStore::update_global(cx, |store, cx| {
1019 store
1020 .set_server_settings(&server_settings_content, cx)
1021 .log_err();
1022 });
1023 cx.spawn(async move |cx| {
1024 let _watcher_task = watcher_task;
1025 while let Some(server_settings_content) = server_settings_file.next().await {
1026 cx.update_global(|store: &mut SettingsStore, cx| {
1027 let result = store.set_server_settings(&server_settings_content, cx);
1028 if let Err(err) = &result {
1029 log::error!("Failed to load server settings: {err}");
1030 }
1031 settings_changed(result.err(), cx);
1032 cx.refresh_windows();
1033 });
1034 }
1035 })
1036 .detach();
1037}
1038
1039fn read_proxy_settings(cx: &mut Context<HeadlessProject>) -> Option<Url> {
1040 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
1041
1042 proxy_str
1043 .as_deref()
1044 .map(str::trim)
1045 .filter(|input| !input.is_empty())
1046 .and_then(|input| {
1047 input
1048 .parse::<Url>()
1049 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
1050 .ok()
1051 })
1052 .or_else(read_proxy_from_env)
1053}
1054
1055fn daemonize() -> Result<ControlFlow<()>> {
1056 match fork::fork().map_err(|e| anyhow!("failed to call fork with error code {e}"))? {
1057 fork::Fork::Parent(_) => {
1058 return Ok(ControlFlow::Break(()));
1059 }
1060 fork::Fork::Child => {}
1061 }
1062
1063 // Once we've detached from the parent, we want to close stdout/stderr/stdin
1064 // so that the outer SSH process is not attached to us in any way anymore.
1065 unsafe { redirect_standard_streams() }?;
1066
1067 Ok(ControlFlow::Continue(()))
1068}
1069
1070unsafe fn redirect_standard_streams() -> Result<()> {
1071 let devnull_fd = unsafe { libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR) };
1072 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
1073
1074 let process_stdio = |name, fd| {
1075 let reopened_fd = unsafe { libc::dup2(devnull_fd, fd) };
1076 anyhow::ensure!(
1077 reopened_fd != -1,
1078 format!("failed to redirect {} to /dev/null", name)
1079 );
1080 Ok(())
1081 };
1082
1083 process_stdio("stdin", libc::STDIN_FILENO)?;
1084 process_stdio("stdout", libc::STDOUT_FILENO)?;
1085 process_stdio("stderr", libc::STDERR_FILENO)?;
1086
1087 anyhow::ensure!(
1088 unsafe { libc::close(devnull_fd) != -1 },
1089 "failed to close /dev/null fd after redirecting"
1090 );
1091
1092 Ok(())
1093}
1094
1095fn cleanup_old_binaries() -> Result<()> {
1096 let server_dir = paths::remote_server_dir_relative();
1097 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
1098 let prefix = format!("zed-remote-server-{}-", release_channel);
1099
1100 for entry in std::fs::read_dir(server_dir.as_std_path())? {
1101 let path = entry?.path();
1102
1103 if let Some(file_name) = path.file_name()
1104 && let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix)
1105 && !is_new_version(version)
1106 && !is_file_in_use(file_name)
1107 {
1108 log::info!("removing old remote server binary: {:?}", path);
1109 std::fs::remove_file(&path)?;
1110 }
1111 }
1112
1113 Ok(())
1114}
1115
1116fn is_new_version(version: &str) -> bool {
1117 semver::Version::from_str(version)
1118 .ok()
1119 .zip(semver::Version::from_str(env!("ZED_PKG_VERSION")).ok())
1120 .is_some_and(|(version, current_version)| version >= current_version)
1121}
1122
1123fn is_file_in_use(file_name: &OsStr) -> bool {
1124 let info = sysinfo::System::new_with_specifics(sysinfo::RefreshKind::nothing().with_processes(
1125 sysinfo::ProcessRefreshKind::nothing().with_exe(sysinfo::UpdateKind::Always),
1126 ));
1127
1128 for process in info.processes().values() {
1129 if process
1130 .exe()
1131 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
1132 {
1133 return true;
1134 }
1135 }
1136
1137 false
1138}