1use crate::HeadlessProject;
2use crate::headless_project::HeadlessAppState;
3use anyhow::{Context as _, Result, anyhow};
4use chrono::Utc;
5use client::{ProxySettings, telemetry};
6use dap::DapRegistry;
7use extension::ExtensionHostProxy;
8use fs::{Fs, RealFs};
9use futures::channel::mpsc;
10use futures::{AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, select, select_biased};
11use git::GitHostingProviderRegistry;
12use gpui::{App, AppContext as _, Context, Entity, SemanticVersion, UpdateGlobal as _};
13use gpui_tokio::Tokio;
14use http_client::{Uri, read_proxy_from_env};
15use language::LanguageRegistry;
16use node_runtime::{NodeBinaryOptions, NodeRuntime};
17use paths::logs_dir;
18use project::project_settings::ProjectSettings;
19
20use release_channel::{AppVersion, RELEASE_CHANNEL, ReleaseChannel};
21use remote::proxy::ProxyLaunchError;
22use remote::ssh_session::ChannelClient;
23use remote::{
24 json_log::LogRecord,
25 protocol::{read_message, write_message},
26};
27use reqwest_client::ReqwestClient;
28use rpc::proto::{self, Envelope, SSH_PROJECT_ID};
29use rpc::{AnyProtoClient, TypedEnvelope};
30use settings::{Settings, SettingsStore, watch_config_file};
31use smol::channel::{Receiver, Sender};
32use smol::io::AsyncReadExt;
33
34use smol::Async;
35use smol::{net::unix::UnixListener, stream::StreamExt as _};
36use std::ffi::OsStr;
37use std::ops::ControlFlow;
38use std::str::FromStr;
39use std::{env, thread};
40use std::{
41 io::Write,
42 mem,
43 path::{Path, PathBuf},
44 sync::Arc,
45};
46use telemetry_events::LocationData;
47use util::ResultExt;
48
49fn init_logging_proxy() {
50 env_logger::builder()
51 .format(|buf, record| {
52 let mut log_record = LogRecord::new(record);
53 log_record.message = format!("(remote proxy) {}", log_record.message);
54 serde_json::to_writer(&mut *buf, &log_record)?;
55 buf.write_all(b"\n")?;
56 Ok(())
57 })
58 .init();
59}
60
61fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
62 struct MultiWrite {
63 file: std::fs::File,
64 channel: Sender<Vec<u8>>,
65 buffer: Vec<u8>,
66 }
67
68 impl std::io::Write for MultiWrite {
69 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
70 let written = self.file.write(buf)?;
71 self.buffer.extend_from_slice(&buf[..written]);
72 Ok(written)
73 }
74
75 fn flush(&mut self) -> std::io::Result<()> {
76 self.channel
77 .send_blocking(self.buffer.clone())
78 .map_err(|error| std::io::Error::new(std::io::ErrorKind::Other, error))?;
79 self.buffer.clear();
80 self.file.flush()
81 }
82 }
83
84 let log_file = std::fs::OpenOptions::new()
85 .create(true)
86 .append(true)
87 .open(&log_file_path)
88 .context("Failed to open log file in append mode")?;
89
90 let (tx, rx) = smol::channel::unbounded();
91
92 let target = Box::new(MultiWrite {
93 file: log_file,
94 channel: tx,
95 buffer: Vec::new(),
96 });
97
98 env_logger::Builder::from_default_env()
99 .target(env_logger::Target::Pipe(target))
100 .format(|buf, record| {
101 let mut log_record = LogRecord::new(record);
102 log_record.message = format!("(remote server) {}", log_record.message);
103 serde_json::to_writer(&mut *buf, &log_record)?;
104 buf.write_all(b"\n")?;
105 Ok(())
106 })
107 .init();
108
109 Ok(rx)
110}
111
112fn init_panic_hook() {
113 std::panic::set_hook(Box::new(|info| {
114 let payload = info
115 .payload()
116 .downcast_ref::<&str>()
117 .map(|s| s.to_string())
118 .or_else(|| info.payload().downcast_ref::<String>().cloned())
119 .unwrap_or_else(|| "Box<Any>".to_string());
120
121 let backtrace = backtrace::Backtrace::new();
122 let mut backtrace = backtrace
123 .frames()
124 .iter()
125 .flat_map(|frame| {
126 frame
127 .symbols()
128 .iter()
129 .filter_map(|frame| Some(format!("{:#}", frame.name()?)))
130 })
131 .collect::<Vec<_>>();
132
133 // Strip out leading stack frames for rust panic-handling.
134 if let Some(ix) = backtrace
135 .iter()
136 .position(|name| name == "rust_begin_unwind")
137 {
138 backtrace.drain(0..=ix);
139 }
140
141 let thread = thread::current();
142 let thread_name = thread.name().unwrap_or("<unnamed>");
143
144 log::error!(
145 "panic occurred: {}\nBacktrace:\n{}",
146 &payload,
147 (&backtrace).join("\n")
148 );
149
150 let release_channel = *RELEASE_CHANNEL;
151 let version = match release_channel {
152 ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION"),
153 ReleaseChannel::Nightly | ReleaseChannel::Dev => {
154 option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha")
155 }
156 };
157
158 let panic_data = telemetry_events::Panic {
159 thread: thread_name.into(),
160 payload: payload.clone(),
161 location_data: info.location().map(|location| LocationData {
162 file: location.file().into(),
163 line: location.line(),
164 }),
165 app_version: format!("remote-server-{version}"),
166 app_commit_sha: option_env!("ZED_COMMIT_SHA").map(|sha| sha.into()),
167 release_channel: release_channel.display_name().into(),
168 target: env!("TARGET").to_owned().into(),
169 os_name: telemetry::os_name(),
170 os_version: Some(telemetry::os_version()),
171 architecture: env::consts::ARCH.into(),
172 panicked_on: Utc::now().timestamp_millis(),
173 backtrace,
174 system_id: None, // Set on SSH client
175 installation_id: None, // Set on SSH client
176 session_id: "".to_string(), // Set on SSH client
177 };
178
179 if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
180 let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
181 let panic_file_path = paths::logs_dir().join(format!("zed-{timestamp}.panic"));
182 let panic_file = std::fs::OpenOptions::new()
183 .append(true)
184 .create(true)
185 .open(&panic_file_path)
186 .log_err();
187 if let Some(mut panic_file) = panic_file {
188 writeln!(&mut panic_file, "{panic_data_json}").log_err();
189 panic_file.flush().log_err();
190 }
191 }
192
193 std::process::abort();
194 }));
195}
196
197fn handle_panic_requests(project: &Entity<HeadlessProject>, client: &Arc<ChannelClient>) {
198 let client: AnyProtoClient = client.clone().into();
199 client.add_request_handler(
200 project.downgrade(),
201 |_, _: TypedEnvelope<proto::GetPanicFiles>, _cx| async move {
202 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
203 let mut panic_files = Vec::new();
204 while let Some(child) = children.next().await {
205 let child = child?;
206 let child_path = child.path();
207
208 if child_path.extension() != Some(OsStr::new("panic")) {
209 continue;
210 }
211 let filename = if let Some(filename) = child_path.file_name() {
212 filename.to_string_lossy()
213 } else {
214 continue;
215 };
216
217 if !filename.starts_with("zed") {
218 continue;
219 }
220
221 let file_contents = smol::fs::read_to_string(&child_path)
222 .await
223 .context("error reading panic file")?;
224
225 panic_files.push(file_contents);
226
227 // We've done what we can, delete the file
228 std::fs::remove_file(child_path)
229 .context("error removing panic")
230 .log_err();
231 }
232 anyhow::Ok(proto::GetPanicFilesResponse {
233 file_contents: panic_files,
234 })
235 },
236 );
237}
238
239struct ServerListeners {
240 stdin: UnixListener,
241 stdout: UnixListener,
242 stderr: UnixListener,
243}
244
245impl ServerListeners {
246 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
247 Ok(Self {
248 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
249 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
250 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
251 })
252 }
253}
254
255fn start_server(
256 listeners: ServerListeners,
257 log_rx: Receiver<Vec<u8>>,
258 cx: &mut App,
259) -> Arc<ChannelClient> {
260 // This is the server idle timeout. If no connection comes in in this timeout, the server will shut down.
261 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
262
263 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
264 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
265 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
266
267 cx.on_app_quit(move |_| {
268 let mut app_quit_tx = app_quit_tx.clone();
269 async move {
270 log::info!("app quitting. sending signal to server main loop");
271 app_quit_tx.send(()).await.ok();
272 }
273 })
274 .detach();
275
276 cx.spawn(async move |cx| {
277 let mut stdin_incoming = listeners.stdin.incoming();
278 let mut stdout_incoming = listeners.stdout.incoming();
279 let mut stderr_incoming = listeners.stderr.incoming();
280
281 loop {
282 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
283
284 log::info!("accepting new connections");
285 let result = select! {
286 streams = streams.fuse() => {
287 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
288 break;
289 };
290 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
291 }
292 _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
293 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
294 cx.update(|cx| {
295 // TODO: This is a hack, because in a headless project, shutdown isn't executed
296 // when calling quit, but it should be.
297 cx.shutdown();
298 cx.quit();
299 })?;
300 break;
301 }
302 _ = app_quit_rx.next().fuse() => {
303 break;
304 }
305 };
306
307 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
308 break;
309 };
310
311 let mut input_buffer = Vec::new();
312 let mut output_buffer = Vec::new();
313
314 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
315 cx.background_spawn(async move {
316 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
317 if let Err(_) = stdin_msg_tx.send(msg).await {
318 break;
319 }
320 }
321 }).detach();
322
323 loop {
324
325 select_biased! {
326 _ = app_quit_rx.next().fuse() => {
327 return anyhow::Ok(());
328 }
329
330 stdin_message = stdin_msg_rx.next().fuse() => {
331 let Some(message) = stdin_message else {
332 log::warn!("error reading message on stdin. exiting.");
333 break;
334 };
335 if let Err(error) = incoming_tx.unbounded_send(message) {
336 log::error!("failed to send message to application: {:?}. exiting.", error);
337 return Err(anyhow!(error));
338 }
339 }
340
341 outgoing_message = outgoing_rx.next().fuse() => {
342 let Some(message) = outgoing_message else {
343 log::error!("stdout handler, no message");
344 break;
345 };
346
347 if let Err(error) =
348 write_message(&mut stdout_stream, &mut output_buffer, message).await
349 {
350 log::error!("failed to write stdout message: {:?}", error);
351 break;
352 }
353 if let Err(error) = stdout_stream.flush().await {
354 log::error!("failed to flush stdout message: {:?}", error);
355 break;
356 }
357 }
358
359 log_message = log_rx.recv().fuse() => {
360 if let Ok(log_message) = log_message {
361 if let Err(error) = stderr_stream.write_all(&log_message).await {
362 log::error!("failed to write log message to stderr: {:?}", error);
363 break;
364 }
365 if let Err(error) = stderr_stream.flush().await {
366 log::error!("failed to flush stderr stream: {:?}", error);
367 break;
368 }
369 }
370 }
371 }
372 }
373 }
374 anyhow::Ok(())
375 })
376 .detach();
377
378 ChannelClient::new(incoming_rx, outgoing_tx, cx, "server")
379}
380
381fn init_paths() -> anyhow::Result<()> {
382 for path in [
383 paths::config_dir(),
384 paths::extensions_dir(),
385 paths::languages_dir(),
386 paths::logs_dir(),
387 paths::temp_dir(),
388 paths::remote_extensions_dir(),
389 paths::remote_extensions_uploads_dir(),
390 ]
391 .iter()
392 {
393 std::fs::create_dir_all(path)
394 .map_err(|e| anyhow!("Could not create directory {:?}: {}", path, e))?;
395 }
396 Ok(())
397}
398
399pub fn execute_run(
400 log_file: PathBuf,
401 pid_file: PathBuf,
402 stdin_socket: PathBuf,
403 stdout_socket: PathBuf,
404 stderr_socket: PathBuf,
405) -> Result<()> {
406 init_paths()?;
407
408 match daemonize()? {
409 ControlFlow::Break(_) => return Ok(()),
410 ControlFlow::Continue(_) => {}
411 }
412
413 init_panic_hook();
414 let log_rx = init_logging_server(log_file)?;
415 log::info!(
416 "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
417 pid_file,
418 stdin_socket,
419 stdout_socket,
420 stderr_socket
421 );
422
423 write_pid_file(&pid_file)
424 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
425
426 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
427
428 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
429 gpui::Application::headless().run(move |cx| {
430 settings::init(cx);
431 let app_version = AppVersion::init(env!("ZED_PKG_VERSION"));
432 release_channel::init(app_version, cx);
433 gpui_tokio::init(cx);
434
435 HeadlessProject::init(cx);
436
437 log::info!("gpui app started, initializing server");
438 let session = start_server(listeners, log_rx, cx);
439
440 client::init_settings(cx);
441
442 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
443 git_hosting_providers::init(cx);
444
445 extension::init(cx);
446 let extension_host_proxy = ExtensionHostProxy::global(cx);
447
448 let project = cx.new(|cx| {
449 let fs = Arc::new(RealFs::new(None, cx.background_executor().clone()));
450 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
451
452 let proxy_url = read_proxy_settings(cx);
453
454 let http_client = {
455 let _guard = Tokio::handle(cx).enter();
456 Arc::new(
457 ReqwestClient::proxy_and_user_agent(
458 proxy_url,
459 &format!(
460 "Zed-Server/{} ({}; {})",
461 env!("CARGO_PKG_VERSION"),
462 std::env::consts::OS,
463 std::env::consts::ARCH
464 ),
465 )
466 .expect("Could not start HTTP client"),
467 )
468 };
469
470 let node_runtime = NodeRuntime::new(http_client.clone(), node_settings_rx);
471
472 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
473 languages.set_language_server_download_dir(paths::languages_dir().clone());
474 let languages = Arc::new(languages);
475 let debug_adapters = DapRegistry::default().into();
476
477 HeadlessProject::new(
478 HeadlessAppState {
479 session: session.clone(),
480 fs,
481 http_client,
482 node_runtime,
483 languages,
484 debug_adapters,
485 extension_host_proxy,
486 },
487 cx,
488 )
489 });
490
491 handle_panic_requests(&project, &session);
492
493 cx.background_spawn(async move { cleanup_old_binaries() })
494 .detach();
495
496 mem::forget(project);
497 });
498 log::info!("gpui app is shut down. quitting.");
499 Ok(())
500}
501
502#[derive(Clone)]
503struct ServerPaths {
504 log_file: PathBuf,
505 pid_file: PathBuf,
506 stdin_socket: PathBuf,
507 stdout_socket: PathBuf,
508 stderr_socket: PathBuf,
509}
510
511impl ServerPaths {
512 fn new(identifier: &str) -> Result<Self> {
513 let server_dir = paths::remote_server_state_dir().join(identifier);
514 std::fs::create_dir_all(&server_dir)?;
515 std::fs::create_dir_all(&logs_dir())?;
516
517 let pid_file = server_dir.join("server.pid");
518 let stdin_socket = server_dir.join("stdin.sock");
519 let stdout_socket = server_dir.join("stdout.sock");
520 let stderr_socket = server_dir.join("stderr.sock");
521 let log_file = logs_dir().join(format!("server-{}.log", identifier));
522
523 Ok(Self {
524 pid_file,
525 stdin_socket,
526 stdout_socket,
527 stderr_socket,
528 log_file,
529 })
530 }
531}
532
533pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
534 init_logging_proxy();
535 init_panic_hook();
536
537 log::info!("starting proxy process. PID: {}", std::process::id());
538
539 let server_paths = ServerPaths::new(&identifier)?;
540
541 let server_pid = check_pid_file(&server_paths.pid_file)?;
542 let server_running = server_pid.is_some();
543 if is_reconnecting {
544 if !server_running {
545 log::error!("attempted to reconnect, but no server running");
546 return Err(anyhow!(ProxyLaunchError::ServerNotRunning));
547 }
548 } else {
549 if let Some(pid) = server_pid {
550 log::info!(
551 "proxy found server already running with PID {}. Killing process and cleaning up files...",
552 pid
553 );
554 kill_running_server(pid, &server_paths)?;
555 }
556
557 spawn_server(&server_paths)?;
558 };
559
560 let stdin_task = smol::spawn(async move {
561 let stdin = Async::new(std::io::stdin())?;
562 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
563 handle_io(stdin, stream, "stdin").await
564 });
565
566 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
567 let stdout = Async::new(std::io::stdout())?;
568 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
569 handle_io(stream, stdout, "stdout").await
570 });
571
572 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
573 let mut stderr = Async::new(std::io::stderr())?;
574 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
575 let mut stderr_buffer = vec![0; 2048];
576 loop {
577 match stream.read(&mut stderr_buffer).await {
578 Ok(0) => {
579 let error =
580 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
581 Err(anyhow!(error))?;
582 }
583 Ok(n) => {
584 stderr.write_all(&mut stderr_buffer[..n]).await?;
585 stderr.flush().await?;
586 }
587 Err(error) => {
588 Err(anyhow!("error reading stderr: {error:?}"))?;
589 }
590 }
591 }
592 });
593
594 if let Err(forwarding_result) = smol::block_on(async move {
595 futures::select! {
596 result = stdin_task.fuse() => result.context("stdin_task failed"),
597 result = stdout_task.fuse() => result.context("stdout_task failed"),
598 result = stderr_task.fuse() => result.context("stderr_task failed"),
599 }
600 }) {
601 log::error!(
602 "encountered error while forwarding messages: {:?}, terminating...",
603 forwarding_result
604 );
605 return Err(forwarding_result);
606 }
607
608 Ok(())
609}
610
611fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<()> {
612 log::info!("killing existing server with PID {}", pid);
613 std::process::Command::new("kill")
614 .arg(pid.to_string())
615 .output()
616 .context("failed to kill existing server")?;
617
618 for file in [
619 &paths.pid_file,
620 &paths.stdin_socket,
621 &paths.stdout_socket,
622 &paths.stderr_socket,
623 ] {
624 log::debug!("cleaning up file {:?} before starting new server", file);
625 std::fs::remove_file(file).ok();
626 }
627 Ok(())
628}
629
630fn spawn_server(paths: &ServerPaths) -> Result<()> {
631 if paths.stdin_socket.exists() {
632 std::fs::remove_file(&paths.stdin_socket)?;
633 }
634 if paths.stdout_socket.exists() {
635 std::fs::remove_file(&paths.stdout_socket)?;
636 }
637 if paths.stderr_socket.exists() {
638 std::fs::remove_file(&paths.stderr_socket)?;
639 }
640
641 let binary_name = std::env::current_exe()?;
642 let mut server_process = std::process::Command::new(binary_name);
643 server_process
644 .arg("run")
645 .arg("--log-file")
646 .arg(&paths.log_file)
647 .arg("--pid-file")
648 .arg(&paths.pid_file)
649 .arg("--stdin-socket")
650 .arg(&paths.stdin_socket)
651 .arg("--stdout-socket")
652 .arg(&paths.stdout_socket)
653 .arg("--stderr-socket")
654 .arg(&paths.stderr_socket);
655
656 let status = server_process
657 .status()
658 .context("failed to launch server process")?;
659 anyhow::ensure!(
660 status.success(),
661 "failed to launch and detach server process"
662 );
663
664 let mut total_time_waited = std::time::Duration::from_secs(0);
665 let wait_duration = std::time::Duration::from_millis(20);
666 while !paths.stdout_socket.exists()
667 || !paths.stdin_socket.exists()
668 || !paths.stderr_socket.exists()
669 {
670 log::debug!("waiting for server to be ready to accept connections...");
671 std::thread::sleep(wait_duration);
672 total_time_waited += wait_duration;
673 }
674
675 log::info!(
676 "server ready to accept connections. total time waited: {:?}",
677 total_time_waited
678 );
679
680 Ok(())
681}
682
683fn check_pid_file(path: &Path) -> Result<Option<u32>> {
684 let Some(pid) = std::fs::read_to_string(&path)
685 .ok()
686 .and_then(|contents| contents.parse::<u32>().ok())
687 else {
688 return Ok(None);
689 };
690
691 log::debug!("Checking if process with PID {} exists...", pid);
692 match std::process::Command::new("kill")
693 .arg("-0")
694 .arg(pid.to_string())
695 .output()
696 {
697 Ok(output) if output.status.success() => {
698 log::debug!(
699 "Process with PID {} exists. NOT spawning new server, but attaching to existing one.",
700 pid
701 );
702 Ok(Some(pid))
703 }
704 _ => {
705 log::debug!(
706 "Found PID file, but process with that PID does not exist. Removing PID file."
707 );
708 std::fs::remove_file(&path).context("Failed to remove PID file")?;
709 Ok(None)
710 }
711 }
712}
713
714fn write_pid_file(path: &Path) -> Result<()> {
715 if path.exists() {
716 std::fs::remove_file(path)?;
717 }
718 let pid = std::process::id().to_string();
719 log::debug!("writing PID {} to file {:?}", pid, path);
720 std::fs::write(path, pid).context("Failed to write PID file")
721}
722
723async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
724where
725 R: AsyncRead + Unpin,
726 W: AsyncWrite + Unpin,
727{
728 use remote::protocol::read_message_raw;
729
730 let mut buffer = Vec::new();
731 loop {
732 read_message_raw(&mut reader, &mut buffer)
733 .await
734 .with_context(|| format!("failed to read message from {}", socket_name))?;
735
736 write_size_prefixed_buffer(&mut writer, &mut buffer)
737 .await
738 .with_context(|| format!("failed to write message to {}", socket_name))?;
739
740 writer.flush().await?;
741
742 buffer.clear();
743 }
744}
745
746async fn write_size_prefixed_buffer<S: AsyncWrite + Unpin>(
747 stream: &mut S,
748 buffer: &mut Vec<u8>,
749) -> Result<()> {
750 let len = buffer.len() as u32;
751 stream.write_all(len.to_le_bytes().as_slice()).await?;
752 stream.write_all(buffer).await?;
753 Ok(())
754}
755
756fn initialize_settings(
757 session: Arc<ChannelClient>,
758 fs: Arc<dyn Fs>,
759 cx: &mut App,
760) -> async_watch::Receiver<Option<NodeBinaryOptions>> {
761 let user_settings_file_rx = watch_config_file(
762 &cx.background_executor(),
763 fs,
764 paths::settings_file().clone(),
765 );
766
767 handle_settings_file_changes(user_settings_file_rx, cx, {
768 let session = session.clone();
769 move |err, _cx| {
770 if let Some(e) = err {
771 log::info!("Server settings failed to change: {}", e);
772
773 session
774 .send(proto::Toast {
775 project_id: SSH_PROJECT_ID,
776 notification_id: "server-settings-failed".to_string(),
777 message: format!(
778 "Error in settings on remote host {:?}: {}",
779 paths::settings_file(),
780 e
781 ),
782 })
783 .log_err();
784 } else {
785 session
786 .send(proto::HideToast {
787 project_id: SSH_PROJECT_ID,
788 notification_id: "server-settings-failed".to_string(),
789 })
790 .log_err();
791 }
792 }
793 });
794
795 let (tx, rx) = async_watch::channel(None);
796 cx.observe_global::<SettingsStore>(move |cx| {
797 let settings = &ProjectSettings::get_global(cx).node;
798 log::info!("Got new node settings: {:?}", settings);
799 let options = NodeBinaryOptions {
800 allow_path_lookup: !settings.ignore_system_version.unwrap_or_default(),
801 // TODO: Implement this setting
802 allow_binary_download: true,
803 use_paths: settings.path.as_ref().map(|node_path| {
804 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
805 let npm_path = settings
806 .npm_path
807 .as_ref()
808 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
809 (
810 node_path.clone(),
811 npm_path.unwrap_or_else(|| {
812 let base_path = PathBuf::new();
813 node_path.parent().unwrap_or(&base_path).join("npm")
814 }),
815 )
816 }),
817 };
818 tx.send(Some(options)).log_err();
819 })
820 .detach();
821
822 rx
823}
824
825pub fn handle_settings_file_changes(
826 mut server_settings_file: mpsc::UnboundedReceiver<String>,
827 cx: &mut App,
828 settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
829) {
830 let server_settings_content = cx
831 .background_executor()
832 .block(server_settings_file.next())
833 .unwrap();
834 SettingsStore::update_global(cx, |store, cx| {
835 store
836 .set_server_settings(&server_settings_content, cx)
837 .log_err();
838 });
839 cx.spawn(async move |cx| {
840 while let Some(server_settings_content) = server_settings_file.next().await {
841 let result = cx.update_global(|store: &mut SettingsStore, cx| {
842 let result = store.set_server_settings(&server_settings_content, cx);
843 if let Err(err) = &result {
844 log::error!("Failed to load server settings: {err}");
845 }
846 settings_changed(result.err(), cx);
847 cx.refresh_windows();
848 });
849 if result.is_err() {
850 break; // App dropped
851 }
852 }
853 })
854 .detach();
855}
856
857fn read_proxy_settings(cx: &mut Context<HeadlessProject>) -> Option<Uri> {
858 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
859 let proxy_url = proxy_str
860 .as_ref()
861 .and_then(|input: &String| {
862 input
863 .parse::<Uri>()
864 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
865 .ok()
866 })
867 .or_else(read_proxy_from_env);
868 proxy_url
869}
870
871fn daemonize() -> Result<ControlFlow<()>> {
872 match fork::fork().map_err(|e| anyhow::anyhow!("failed to call fork with error code {}", e))? {
873 fork::Fork::Parent(_) => {
874 return Ok(ControlFlow::Break(()));
875 }
876 fork::Fork::Child => {}
877 }
878
879 // Once we've detached from the parent, we want to close stdout/stderr/stdin
880 // so that the outer SSH process is not attached to us in any way anymore.
881 unsafe { redirect_standard_streams() }?;
882
883 Ok(ControlFlow::Continue(()))
884}
885
886unsafe fn redirect_standard_streams() -> Result<()> {
887 let devnull_fd = unsafe { libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR) };
888 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
889
890 let process_stdio = |name, fd| {
891 let reopened_fd = unsafe { libc::dup2(devnull_fd, fd) };
892 anyhow::ensure!(
893 reopened_fd != -1,
894 format!("failed to redirect {} to /dev/null", name)
895 );
896 Ok(())
897 };
898
899 process_stdio("stdin", libc::STDIN_FILENO)?;
900 process_stdio("stdout", libc::STDOUT_FILENO)?;
901 process_stdio("stderr", libc::STDERR_FILENO)?;
902
903 anyhow::ensure!(
904 unsafe { libc::close(devnull_fd) != -1 },
905 "failed to close /dev/null fd after redirecting"
906 );
907
908 Ok(())
909}
910
911fn cleanup_old_binaries() -> Result<()> {
912 let server_dir = paths::remote_server_dir_relative();
913 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
914 let prefix = format!("zed-remote-server-{}-", release_channel);
915
916 for entry in std::fs::read_dir(server_dir)? {
917 let path = entry?.path();
918
919 if let Some(file_name) = path.file_name() {
920 if let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix) {
921 if !is_new_version(version) && !is_file_in_use(file_name) {
922 log::info!("removing old remote server binary: {:?}", path);
923 std::fs::remove_file(&path)?;
924 }
925 }
926 }
927 }
928
929 Ok(())
930}
931
932fn is_new_version(version: &str) -> bool {
933 SemanticVersion::from_str(version)
934 .ok()
935 .zip(SemanticVersion::from_str(env!("ZED_PKG_VERSION")).ok())
936 .is_some_and(|(version, current_version)| version >= current_version)
937}
938
939fn is_file_in_use(file_name: &OsStr) -> bool {
940 let info =
941 sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes(
942 sysinfo::ProcessRefreshKind::new().with_exe(sysinfo::UpdateKind::Always),
943 ));
944
945 for process in info.processes().values() {
946 if process
947 .exe()
948 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
949 {
950 return true;
951 }
952 }
953
954 false
955}