1use crate::HeadlessProject;
2use crate::headless_project::HeadlessAppState;
3use anyhow::{Context as _, Result, anyhow};
4use chrono::Utc;
5use client::{ProxySettings, telemetry};
6
7use extension::ExtensionHostProxy;
8use fs::{Fs, RealFs};
9use futures::channel::mpsc;
10use futures::{AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, select, select_biased};
11use git::GitHostingProviderRegistry;
12use gpui::{App, AppContext as _, Context, Entity, SemanticVersion, UpdateGlobal as _};
13use gpui_tokio::Tokio;
14use http_client::{Url, read_proxy_from_env};
15use language::LanguageRegistry;
16use node_runtime::{NodeBinaryOptions, NodeRuntime};
17use paths::logs_dir;
18use project::project_settings::ProjectSettings;
19
20use proto::CrashReport;
21use release_channel::{AppVersion, RELEASE_CHANNEL, ReleaseChannel};
22use remote::proxy::ProxyLaunchError;
23use remote::ssh_session::ChannelClient;
24use remote::{
25 json_log::LogRecord,
26 protocol::{read_message, write_message},
27};
28use reqwest_client::ReqwestClient;
29use rpc::proto::{self, Envelope, SSH_PROJECT_ID};
30use rpc::{AnyProtoClient, TypedEnvelope};
31use settings::{Settings, SettingsStore, watch_config_file};
32use smol::channel::{Receiver, Sender};
33use smol::io::AsyncReadExt;
34
35use smol::Async;
36use smol::{net::unix::UnixListener, stream::StreamExt as _};
37use std::collections::HashMap;
38use std::ffi::OsStr;
39use std::ops::ControlFlow;
40use std::str::FromStr;
41use std::{env, thread};
42use std::{
43 io::Write,
44 mem,
45 path::{Path, PathBuf},
46 sync::Arc,
47};
48use telemetry_events::LocationData;
49use util::ResultExt;
50
51fn init_logging_proxy() {
52 env_logger::builder()
53 .format(|buf, record| {
54 let mut log_record = LogRecord::new(record);
55 log_record.message = format!("(remote proxy) {}", log_record.message);
56 serde_json::to_writer(&mut *buf, &log_record)?;
57 buf.write_all(b"\n")?;
58 Ok(())
59 })
60 .init();
61}
62
63fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
64 struct MultiWrite {
65 file: std::fs::File,
66 channel: Sender<Vec<u8>>,
67 buffer: Vec<u8>,
68 }
69
70 impl std::io::Write for MultiWrite {
71 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
72 let written = self.file.write(buf)?;
73 self.buffer.extend_from_slice(&buf[..written]);
74 Ok(written)
75 }
76
77 fn flush(&mut self) -> std::io::Result<()> {
78 self.channel
79 .send_blocking(self.buffer.clone())
80 .map_err(|error| std::io::Error::new(std::io::ErrorKind::Other, error))?;
81 self.buffer.clear();
82 self.file.flush()
83 }
84 }
85
86 let log_file = std::fs::OpenOptions::new()
87 .create(true)
88 .append(true)
89 .open(&log_file_path)
90 .context("Failed to open log file in append mode")?;
91
92 let (tx, rx) = smol::channel::unbounded();
93
94 let target = Box::new(MultiWrite {
95 file: log_file,
96 channel: tx,
97 buffer: Vec::new(),
98 });
99
100 env_logger::Builder::from_default_env()
101 .target(env_logger::Target::Pipe(target))
102 .format(|buf, record| {
103 let mut log_record = LogRecord::new(record);
104 log_record.message = format!("(remote server) {}", log_record.message);
105 serde_json::to_writer(&mut *buf, &log_record)?;
106 buf.write_all(b"\n")?;
107 Ok(())
108 })
109 .init();
110
111 Ok(rx)
112}
113
114fn init_panic_hook(session_id: String) {
115 std::panic::set_hook(Box::new(move |info| {
116 crashes::handle_panic();
117 let payload = info
118 .payload()
119 .downcast_ref::<&str>()
120 .map(|s| s.to_string())
121 .or_else(|| info.payload().downcast_ref::<String>().cloned())
122 .unwrap_or_else(|| "Box<Any>".to_string());
123
124 let backtrace = backtrace::Backtrace::new();
125 let mut backtrace = backtrace
126 .frames()
127 .iter()
128 .flat_map(|frame| {
129 frame
130 .symbols()
131 .iter()
132 .filter_map(|frame| Some(format!("{:#}", frame.name()?)))
133 })
134 .collect::<Vec<_>>();
135
136 // Strip out leading stack frames for rust panic-handling.
137 if let Some(ix) = backtrace
138 .iter()
139 .position(|name| name == "rust_begin_unwind")
140 {
141 backtrace.drain(0..=ix);
142 }
143
144 let thread = thread::current();
145 let thread_name = thread.name().unwrap_or("<unnamed>");
146
147 log::error!(
148 "panic occurred: {}\nBacktrace:\n{}",
149 &payload,
150 (&backtrace).join("\n")
151 );
152
153 let release_channel = *RELEASE_CHANNEL;
154 let version = match release_channel {
155 ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION"),
156 ReleaseChannel::Nightly | ReleaseChannel::Dev => {
157 option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha")
158 }
159 };
160
161 let panic_data = telemetry_events::Panic {
162 thread: thread_name.into(),
163 payload: payload.clone(),
164 location_data: info.location().map(|location| LocationData {
165 file: location.file().into(),
166 line: location.line(),
167 }),
168 app_version: format!("remote-server-{version}"),
169 app_commit_sha: option_env!("ZED_COMMIT_SHA").map(|sha| sha.into()),
170 release_channel: release_channel.dev_name().into(),
171 target: env!("TARGET").to_owned().into(),
172 os_name: telemetry::os_name(),
173 os_version: Some(telemetry::os_version()),
174 architecture: env::consts::ARCH.into(),
175 panicked_on: Utc::now().timestamp_millis(),
176 backtrace,
177 system_id: None, // Set on SSH client
178 installation_id: None, // Set on SSH client
179
180 // used on this end to associate panics with minidumps, but will be replaced on the SSH client
181 session_id: session_id.clone(),
182 };
183
184 if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
185 let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
186 let panic_file_path = paths::logs_dir().join(format!("zed-{timestamp}.panic"));
187 let panic_file = std::fs::OpenOptions::new()
188 .append(true)
189 .create(true)
190 .open(&panic_file_path)
191 .log_err();
192 if let Some(mut panic_file) = panic_file {
193 writeln!(&mut panic_file, "{panic_data_json}").log_err();
194 panic_file.flush().log_err();
195 }
196 }
197
198 std::process::abort();
199 }));
200}
201
202fn handle_crash_files_requests(project: &Entity<HeadlessProject>, client: &Arc<ChannelClient>) {
203 let client: AnyProtoClient = client.clone().into();
204 client.add_request_handler(
205 project.downgrade(),
206 |_, _: TypedEnvelope<proto::GetCrashFiles>, _cx| async move {
207 let mut crashes = Vec::new();
208 let mut minidumps_by_session_id = HashMap::new();
209 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
210 while let Some(child) = children.next().await {
211 let child = child?;
212 let child_path = child.path();
213
214 let extension = child_path.extension();
215 if extension == Some(OsStr::new("panic")) {
216 let filename = if let Some(filename) = child_path.file_name() {
217 filename.to_string_lossy()
218 } else {
219 continue;
220 };
221
222 if !filename.starts_with("zed") {
223 continue;
224 }
225
226 let file_contents = smol::fs::read_to_string(&child_path)
227 .await
228 .context("error reading panic file")?;
229
230 crashes.push(proto::CrashReport {
231 panic_contents: Some(file_contents),
232 minidump_contents: None,
233 });
234 } else if extension == Some(OsStr::new("dmp")) {
235 let session_id = child_path.file_stem().unwrap().to_string_lossy();
236 minidumps_by_session_id
237 .insert(session_id.to_string(), smol::fs::read(&child_path).await?);
238 }
239
240 // We've done what we can, delete the file
241 smol::fs::remove_file(&child_path)
242 .await
243 .context("error removing panic")
244 .log_err();
245 }
246
247 for crash in &mut crashes {
248 let panic: telemetry_events::Panic =
249 serde_json::from_str(crash.panic_contents.as_ref().unwrap())?;
250 if let dump @ Some(_) = minidumps_by_session_id.remove(&panic.session_id) {
251 crash.minidump_contents = dump;
252 }
253 }
254
255 crashes.extend(
256 minidumps_by_session_id
257 .into_values()
258 .map(|dmp| CrashReport {
259 panic_contents: None,
260 minidump_contents: Some(dmp),
261 }),
262 );
263
264 anyhow::Ok(proto::GetCrashFilesResponse { crashes })
265 },
266 );
267}
268
269struct ServerListeners {
270 stdin: UnixListener,
271 stdout: UnixListener,
272 stderr: UnixListener,
273}
274
275impl ServerListeners {
276 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
277 Ok(Self {
278 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
279 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
280 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
281 })
282 }
283}
284
285fn start_server(
286 listeners: ServerListeners,
287 log_rx: Receiver<Vec<u8>>,
288 cx: &mut App,
289) -> Arc<ChannelClient> {
290 // This is the server idle timeout. If no connection comes in this timeout, the server will shut down.
291 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
292
293 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
294 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
295 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
296
297 cx.on_app_quit(move |_| {
298 let mut app_quit_tx = app_quit_tx.clone();
299 async move {
300 log::info!("app quitting. sending signal to server main loop");
301 app_quit_tx.send(()).await.ok();
302 }
303 })
304 .detach();
305
306 cx.spawn(async move |cx| {
307 let mut stdin_incoming = listeners.stdin.incoming();
308 let mut stdout_incoming = listeners.stdout.incoming();
309 let mut stderr_incoming = listeners.stderr.incoming();
310
311 loop {
312 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
313
314 log::info!("accepting new connections");
315 let result = select! {
316 streams = streams.fuse() => {
317 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
318 break;
319 };
320 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
321 }
322 _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
323 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
324 cx.update(|cx| {
325 // TODO: This is a hack, because in a headless project, shutdown isn't executed
326 // when calling quit, but it should be.
327 cx.shutdown();
328 cx.quit();
329 })?;
330 break;
331 }
332 _ = app_quit_rx.next().fuse() => {
333 break;
334 }
335 };
336
337 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
338 break;
339 };
340
341 let mut input_buffer = Vec::new();
342 let mut output_buffer = Vec::new();
343
344 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
345 cx.background_spawn(async move {
346 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
347 if let Err(_) = stdin_msg_tx.send(msg).await {
348 break;
349 }
350 }
351 }).detach();
352
353 loop {
354
355 select_biased! {
356 _ = app_quit_rx.next().fuse() => {
357 return anyhow::Ok(());
358 }
359
360 stdin_message = stdin_msg_rx.next().fuse() => {
361 let Some(message) = stdin_message else {
362 log::warn!("error reading message on stdin. exiting.");
363 break;
364 };
365 if let Err(error) = incoming_tx.unbounded_send(message) {
366 log::error!("failed to send message to application: {error:?}. exiting.");
367 return Err(anyhow!(error));
368 }
369 }
370
371 outgoing_message = outgoing_rx.next().fuse() => {
372 let Some(message) = outgoing_message else {
373 log::error!("stdout handler, no message");
374 break;
375 };
376
377 if let Err(error) =
378 write_message(&mut stdout_stream, &mut output_buffer, message).await
379 {
380 log::error!("failed to write stdout message: {:?}", error);
381 break;
382 }
383 if let Err(error) = stdout_stream.flush().await {
384 log::error!("failed to flush stdout message: {:?}", error);
385 break;
386 }
387 }
388
389 log_message = log_rx.recv().fuse() => {
390 if let Ok(log_message) = log_message {
391 if let Err(error) = stderr_stream.write_all(&log_message).await {
392 log::error!("failed to write log message to stderr: {:?}", error);
393 break;
394 }
395 if let Err(error) = stderr_stream.flush().await {
396 log::error!("failed to flush stderr stream: {:?}", error);
397 break;
398 }
399 }
400 }
401 }
402 }
403 }
404 anyhow::Ok(())
405 })
406 .detach();
407
408 ChannelClient::new(incoming_rx, outgoing_tx, cx, "server")
409}
410
411fn init_paths() -> anyhow::Result<()> {
412 for path in [
413 paths::config_dir(),
414 paths::extensions_dir(),
415 paths::languages_dir(),
416 paths::logs_dir(),
417 paths::temp_dir(),
418 paths::remote_extensions_dir(),
419 paths::remote_extensions_uploads_dir(),
420 ]
421 .iter()
422 {
423 std::fs::create_dir_all(path).with_context(|| format!("creating directory {path:?}"))?;
424 }
425 Ok(())
426}
427
428pub fn execute_run(
429 log_file: PathBuf,
430 pid_file: PathBuf,
431 stdin_socket: PathBuf,
432 stdout_socket: PathBuf,
433 stderr_socket: PathBuf,
434) -> Result<()> {
435 init_paths()?;
436
437 match daemonize()? {
438 ControlFlow::Break(_) => return Ok(()),
439 ControlFlow::Continue(_) => {}
440 }
441
442 let app = gpui::Application::headless();
443 let id = std::process::id().to_string();
444 app.background_executor()
445 .spawn(crashes::init(id.clone()))
446 .detach();
447 init_panic_hook(id);
448 let log_rx = init_logging_server(log_file)?;
449 log::info!(
450 "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
451 pid_file,
452 stdin_socket,
453 stdout_socket,
454 stderr_socket
455 );
456
457 write_pid_file(&pid_file)
458 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
459
460 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
461
462 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
463 app.run(move |cx| {
464 settings::init(cx);
465 let app_version = AppVersion::load(env!("ZED_PKG_VERSION"));
466 release_channel::init(app_version, cx);
467 gpui_tokio::init(cx);
468
469 HeadlessProject::init(cx);
470
471 log::info!("gpui app started, initializing server");
472 let session = start_server(listeners, log_rx, cx);
473
474 client::init_settings(cx);
475
476 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
477 git_hosting_providers::init(cx);
478 dap_adapters::init(cx);
479
480 extension::init(cx);
481 let extension_host_proxy = ExtensionHostProxy::global(cx);
482
483 let project = cx.new(|cx| {
484 let fs = Arc::new(RealFs::new(None, cx.background_executor().clone()));
485 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
486
487 let proxy_url = read_proxy_settings(cx);
488
489 let http_client = {
490 let _guard = Tokio::handle(cx).enter();
491 Arc::new(
492 ReqwestClient::proxy_and_user_agent(
493 proxy_url,
494 &format!(
495 "Zed-Server/{} ({}; {})",
496 env!("CARGO_PKG_VERSION"),
497 std::env::consts::OS,
498 std::env::consts::ARCH
499 ),
500 )
501 .expect("Could not start HTTP client"),
502 )
503 };
504
505 let node_runtime = NodeRuntime::new(http_client.clone(), None, node_settings_rx);
506
507 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
508 languages.set_language_server_download_dir(paths::languages_dir().clone());
509 let languages = Arc::new(languages);
510
511 HeadlessProject::new(
512 HeadlessAppState {
513 session: session.clone(),
514 fs,
515 http_client,
516 node_runtime,
517 languages,
518 extension_host_proxy,
519 },
520 cx,
521 )
522 });
523
524 handle_crash_files_requests(&project, &session);
525
526 cx.background_spawn(async move { cleanup_old_binaries() })
527 .detach();
528
529 mem::forget(project);
530 });
531 log::info!("gpui app is shut down. quitting.");
532 Ok(())
533}
534
535#[derive(Clone)]
536struct ServerPaths {
537 log_file: PathBuf,
538 pid_file: PathBuf,
539 stdin_socket: PathBuf,
540 stdout_socket: PathBuf,
541 stderr_socket: PathBuf,
542}
543
544impl ServerPaths {
545 fn new(identifier: &str) -> Result<Self> {
546 let server_dir = paths::remote_server_state_dir().join(identifier);
547 std::fs::create_dir_all(&server_dir)?;
548 std::fs::create_dir_all(&logs_dir())?;
549
550 let pid_file = server_dir.join("server.pid");
551 let stdin_socket = server_dir.join("stdin.sock");
552 let stdout_socket = server_dir.join("stdout.sock");
553 let stderr_socket = server_dir.join("stderr.sock");
554 let log_file = logs_dir().join(format!("server-{}.log", identifier));
555
556 Ok(Self {
557 pid_file,
558 stdin_socket,
559 stdout_socket,
560 stderr_socket,
561 log_file,
562 })
563 }
564}
565
566pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
567 init_logging_proxy();
568
569 let server_paths = ServerPaths::new(&identifier)?;
570
571 let id = std::process::id().to_string();
572 smol::spawn(crashes::init(id.clone())).detach();
573 init_panic_hook(id);
574
575 log::info!("starting proxy process. PID: {}", std::process::id());
576
577 let server_pid = check_pid_file(&server_paths.pid_file)?;
578 let server_running = server_pid.is_some();
579 if is_reconnecting {
580 if !server_running {
581 log::error!("attempted to reconnect, but no server running");
582 anyhow::bail!(ProxyLaunchError::ServerNotRunning);
583 }
584 } else {
585 if let Some(pid) = server_pid {
586 log::info!(
587 "proxy found server already running with PID {}. Killing process and cleaning up files...",
588 pid
589 );
590 kill_running_server(pid, &server_paths)?;
591 }
592
593 spawn_server(&server_paths)?;
594 };
595
596 let stdin_task = smol::spawn(async move {
597 let stdin = Async::new(std::io::stdin())?;
598 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
599 handle_io(stdin, stream, "stdin").await
600 });
601
602 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
603 let stdout = Async::new(std::io::stdout())?;
604 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
605 handle_io(stream, stdout, "stdout").await
606 });
607
608 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
609 let mut stderr = Async::new(std::io::stderr())?;
610 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
611 let mut stderr_buffer = vec![0; 2048];
612 loop {
613 match stream
614 .read(&mut stderr_buffer)
615 .await
616 .context("reading stderr")?
617 {
618 0 => {
619 let error =
620 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
621 Err(anyhow!(error))?;
622 }
623 n => {
624 stderr.write_all(&mut stderr_buffer[..n]).await?;
625 stderr.flush().await?;
626 }
627 }
628 }
629 });
630
631 if let Err(forwarding_result) = smol::block_on(async move {
632 futures::select! {
633 result = stdin_task.fuse() => result.context("stdin_task failed"),
634 result = stdout_task.fuse() => result.context("stdout_task failed"),
635 result = stderr_task.fuse() => result.context("stderr_task failed"),
636 }
637 }) {
638 log::error!(
639 "encountered error while forwarding messages: {:?}, terminating...",
640 forwarding_result
641 );
642 return Err(forwarding_result);
643 }
644
645 Ok(())
646}
647
648fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<()> {
649 log::info!("killing existing server with PID {}", pid);
650 std::process::Command::new("kill")
651 .arg(pid.to_string())
652 .output()
653 .context("failed to kill existing server")?;
654
655 for file in [
656 &paths.pid_file,
657 &paths.stdin_socket,
658 &paths.stdout_socket,
659 &paths.stderr_socket,
660 ] {
661 log::debug!("cleaning up file {:?} before starting new server", file);
662 std::fs::remove_file(file).ok();
663 }
664 Ok(())
665}
666
667fn spawn_server(paths: &ServerPaths) -> Result<()> {
668 if paths.stdin_socket.exists() {
669 std::fs::remove_file(&paths.stdin_socket)?;
670 }
671 if paths.stdout_socket.exists() {
672 std::fs::remove_file(&paths.stdout_socket)?;
673 }
674 if paths.stderr_socket.exists() {
675 std::fs::remove_file(&paths.stderr_socket)?;
676 }
677
678 let binary_name = std::env::current_exe()?;
679 let mut server_process = std::process::Command::new(binary_name);
680 server_process
681 .arg("run")
682 .arg("--log-file")
683 .arg(&paths.log_file)
684 .arg("--pid-file")
685 .arg(&paths.pid_file)
686 .arg("--stdin-socket")
687 .arg(&paths.stdin_socket)
688 .arg("--stdout-socket")
689 .arg(&paths.stdout_socket)
690 .arg("--stderr-socket")
691 .arg(&paths.stderr_socket);
692
693 let status = server_process
694 .status()
695 .context("failed to launch server process")?;
696 anyhow::ensure!(
697 status.success(),
698 "failed to launch and detach server process"
699 );
700
701 let mut total_time_waited = std::time::Duration::from_secs(0);
702 let wait_duration = std::time::Duration::from_millis(20);
703 while !paths.stdout_socket.exists()
704 || !paths.stdin_socket.exists()
705 || !paths.stderr_socket.exists()
706 {
707 log::debug!("waiting for server to be ready to accept connections...");
708 std::thread::sleep(wait_duration);
709 total_time_waited += wait_duration;
710 }
711
712 log::info!(
713 "server ready to accept connections. total time waited: {:?}",
714 total_time_waited
715 );
716
717 Ok(())
718}
719
720fn check_pid_file(path: &Path) -> Result<Option<u32>> {
721 let Some(pid) = std::fs::read_to_string(&path)
722 .ok()
723 .and_then(|contents| contents.parse::<u32>().ok())
724 else {
725 return Ok(None);
726 };
727
728 log::debug!("Checking if process with PID {} exists...", pid);
729 match std::process::Command::new("kill")
730 .arg("-0")
731 .arg(pid.to_string())
732 .output()
733 {
734 Ok(output) if output.status.success() => {
735 log::debug!(
736 "Process with PID {} exists. NOT spawning new server, but attaching to existing one.",
737 pid
738 );
739 Ok(Some(pid))
740 }
741 _ => {
742 log::debug!(
743 "Found PID file, but process with that PID does not exist. Removing PID file."
744 );
745 std::fs::remove_file(&path).context("Failed to remove PID file")?;
746 Ok(None)
747 }
748 }
749}
750
751fn write_pid_file(path: &Path) -> Result<()> {
752 if path.exists() {
753 std::fs::remove_file(path)?;
754 }
755 let pid = std::process::id().to_string();
756 log::debug!("writing PID {} to file {:?}", pid, path);
757 std::fs::write(path, pid).context("Failed to write PID file")
758}
759
760async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
761where
762 R: AsyncRead + Unpin,
763 W: AsyncWrite + Unpin,
764{
765 use remote::protocol::read_message_raw;
766
767 let mut buffer = Vec::new();
768 loop {
769 read_message_raw(&mut reader, &mut buffer)
770 .await
771 .with_context(|| format!("failed to read message from {}", socket_name))?;
772
773 write_size_prefixed_buffer(&mut writer, &mut buffer)
774 .await
775 .with_context(|| format!("failed to write message to {}", socket_name))?;
776
777 writer.flush().await?;
778
779 buffer.clear();
780 }
781}
782
783async fn write_size_prefixed_buffer<S: AsyncWrite + Unpin>(
784 stream: &mut S,
785 buffer: &mut Vec<u8>,
786) -> Result<()> {
787 let len = buffer.len() as u32;
788 stream.write_all(len.to_le_bytes().as_slice()).await?;
789 stream.write_all(buffer).await?;
790 Ok(())
791}
792
793fn initialize_settings(
794 session: Arc<ChannelClient>,
795 fs: Arc<dyn Fs>,
796 cx: &mut App,
797) -> watch::Receiver<Option<NodeBinaryOptions>> {
798 let user_settings_file_rx = watch_config_file(
799 &cx.background_executor(),
800 fs,
801 paths::settings_file().clone(),
802 );
803
804 handle_settings_file_changes(user_settings_file_rx, cx, {
805 let session = session.clone();
806 move |err, _cx| {
807 if let Some(e) = err {
808 log::info!("Server settings failed to change: {}", e);
809
810 session
811 .send(proto::Toast {
812 project_id: SSH_PROJECT_ID,
813 notification_id: "server-settings-failed".to_string(),
814 message: format!(
815 "Error in settings on remote host {:?}: {}",
816 paths::settings_file(),
817 e
818 ),
819 })
820 .log_err();
821 } else {
822 session
823 .send(proto::HideToast {
824 project_id: SSH_PROJECT_ID,
825 notification_id: "server-settings-failed".to_string(),
826 })
827 .log_err();
828 }
829 }
830 });
831
832 let (mut tx, rx) = watch::channel(None);
833 cx.observe_global::<SettingsStore>(move |cx| {
834 let settings = &ProjectSettings::get_global(cx).node;
835 log::info!("Got new node settings: {:?}", settings);
836 let options = NodeBinaryOptions {
837 allow_path_lookup: !settings.ignore_system_version,
838 // TODO: Implement this setting
839 allow_binary_download: true,
840 use_paths: settings.path.as_ref().map(|node_path| {
841 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
842 let npm_path = settings
843 .npm_path
844 .as_ref()
845 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
846 (
847 node_path.clone(),
848 npm_path.unwrap_or_else(|| {
849 let base_path = PathBuf::new();
850 node_path.parent().unwrap_or(&base_path).join("npm")
851 }),
852 )
853 }),
854 };
855 tx.send(Some(options)).log_err();
856 })
857 .detach();
858
859 rx
860}
861
862pub fn handle_settings_file_changes(
863 mut server_settings_file: mpsc::UnboundedReceiver<String>,
864 cx: &mut App,
865 settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
866) {
867 let server_settings_content = cx
868 .background_executor()
869 .block(server_settings_file.next())
870 .unwrap();
871 SettingsStore::update_global(cx, |store, cx| {
872 store
873 .set_server_settings(&server_settings_content, cx)
874 .log_err();
875 });
876 cx.spawn(async move |cx| {
877 while let Some(server_settings_content) = server_settings_file.next().await {
878 let result = cx.update_global(|store: &mut SettingsStore, cx| {
879 let result = store.set_server_settings(&server_settings_content, cx);
880 if let Err(err) = &result {
881 log::error!("Failed to load server settings: {err}");
882 }
883 settings_changed(result.err(), cx);
884 cx.refresh_windows();
885 });
886 if result.is_err() {
887 break; // App dropped
888 }
889 }
890 })
891 .detach();
892}
893
894fn read_proxy_settings(cx: &mut Context<HeadlessProject>) -> Option<Url> {
895 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
896 let proxy_url = proxy_str
897 .as_ref()
898 .and_then(|input: &String| {
899 input
900 .parse::<Url>()
901 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
902 .ok()
903 })
904 .or_else(read_proxy_from_env);
905 proxy_url
906}
907
908fn daemonize() -> Result<ControlFlow<()>> {
909 match fork::fork().map_err(|e| anyhow!("failed to call fork with error code {e}"))? {
910 fork::Fork::Parent(_) => {
911 return Ok(ControlFlow::Break(()));
912 }
913 fork::Fork::Child => {}
914 }
915
916 // Once we've detached from the parent, we want to close stdout/stderr/stdin
917 // so that the outer SSH process is not attached to us in any way anymore.
918 unsafe { redirect_standard_streams() }?;
919
920 Ok(ControlFlow::Continue(()))
921}
922
923unsafe fn redirect_standard_streams() -> Result<()> {
924 let devnull_fd = unsafe { libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR) };
925 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
926
927 let process_stdio = |name, fd| {
928 let reopened_fd = unsafe { libc::dup2(devnull_fd, fd) };
929 anyhow::ensure!(
930 reopened_fd != -1,
931 format!("failed to redirect {} to /dev/null", name)
932 );
933 Ok(())
934 };
935
936 process_stdio("stdin", libc::STDIN_FILENO)?;
937 process_stdio("stdout", libc::STDOUT_FILENO)?;
938 process_stdio("stderr", libc::STDERR_FILENO)?;
939
940 anyhow::ensure!(
941 unsafe { libc::close(devnull_fd) != -1 },
942 "failed to close /dev/null fd after redirecting"
943 );
944
945 Ok(())
946}
947
948fn cleanup_old_binaries() -> Result<()> {
949 let server_dir = paths::remote_server_dir_relative();
950 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
951 let prefix = format!("zed-remote-server-{}-", release_channel);
952
953 for entry in std::fs::read_dir(server_dir)? {
954 let path = entry?.path();
955
956 if let Some(file_name) = path.file_name() {
957 if let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix) {
958 if !is_new_version(version) && !is_file_in_use(file_name) {
959 log::info!("removing old remote server binary: {:?}", path);
960 std::fs::remove_file(&path)?;
961 }
962 }
963 }
964 }
965
966 Ok(())
967}
968
969fn is_new_version(version: &str) -> bool {
970 SemanticVersion::from_str(version)
971 .ok()
972 .zip(SemanticVersion::from_str(env!("ZED_PKG_VERSION")).ok())
973 .is_some_and(|(version, current_version)| version >= current_version)
974}
975
976fn is_file_in_use(file_name: &OsStr) -> bool {
977 let info =
978 sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes(
979 sysinfo::ProcessRefreshKind::new().with_exe(sysinfo::UpdateKind::Always),
980 ));
981
982 for process in info.processes().values() {
983 if process
984 .exe()
985 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
986 {
987 return true;
988 }
989 }
990
991 false
992}