1use crate::headless_project::HeadlessAppState;
2use crate::HeadlessProject;
3use anyhow::{anyhow, Context as _, Result};
4use chrono::Utc;
5use client::{telemetry, ProxySettings};
6use extension::ExtensionHostProxy;
7use fs::{Fs, RealFs};
8use futures::channel::mpsc;
9use futures::{select, select_biased, AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt};
10use git::GitHostingProviderRegistry;
11use gpui::{App, AppContext as _, Context, Entity, SemanticVersion, UpdateGlobal as _};
12use gpui_tokio::Tokio;
13use http_client::{read_proxy_from_env, Uri};
14use language::LanguageRegistry;
15use node_runtime::{NodeBinaryOptions, NodeRuntime};
16use paths::logs_dir;
17use project::project_settings::ProjectSettings;
18
19use release_channel::AppVersion;
20use remote::proxy::ProxyLaunchError;
21use remote::ssh_session::ChannelClient;
22use remote::{
23 json_log::LogRecord,
24 protocol::{read_message, write_message},
25};
26use reqwest_client::ReqwestClient;
27use rpc::proto::{self, Envelope, SSH_PROJECT_ID};
28use rpc::{AnyProtoClient, TypedEnvelope};
29use settings::{watch_config_file, Settings, SettingsStore};
30use smol::channel::{Receiver, Sender};
31use smol::io::AsyncReadExt;
32
33use smol::Async;
34use smol::{net::unix::UnixListener, stream::StreamExt as _};
35use std::ffi::OsStr;
36use std::ops::ControlFlow;
37use std::str::FromStr;
38use std::{env, thread};
39use std::{
40 io::Write,
41 mem,
42 path::{Path, PathBuf},
43 sync::Arc,
44};
45use telemetry_events::LocationData;
46use util::ResultExt;
47
48fn init_logging_proxy() {
49 env_logger::builder()
50 .format(|buf, record| {
51 let mut log_record = LogRecord::new(record);
52 log_record.message = format!("(remote proxy) {}", log_record.message);
53 serde_json::to_writer(&mut *buf, &log_record)?;
54 buf.write_all(b"\n")?;
55 Ok(())
56 })
57 .init();
58}
59
60fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
61 struct MultiWrite {
62 file: Box<dyn std::io::Write + Send + 'static>,
63 channel: Sender<Vec<u8>>,
64 buffer: Vec<u8>,
65 }
66
67 impl std::io::Write for MultiWrite {
68 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
69 let written = self.file.write(buf)?;
70 self.buffer.extend_from_slice(&buf[..written]);
71 Ok(written)
72 }
73
74 fn flush(&mut self) -> std::io::Result<()> {
75 self.channel
76 .send_blocking(self.buffer.clone())
77 .map_err(|error| std::io::Error::new(std::io::ErrorKind::Other, error))?;
78 self.buffer.clear();
79 self.file.flush()
80 }
81 }
82
83 let log_file = Box::new(if log_file_path.exists() {
84 std::fs::OpenOptions::new()
85 .append(true)
86 .open(&log_file_path)
87 .context("Failed to open log file in append mode")?
88 } else {
89 std::fs::File::create(&log_file_path).context("Failed to create log file")?
90 });
91
92 let (tx, rx) = smol::channel::unbounded();
93
94 let target = Box::new(MultiWrite {
95 file: log_file,
96 channel: tx,
97 buffer: Vec::new(),
98 });
99
100 env_logger::Builder::from_default_env()
101 .target(env_logger::Target::Pipe(target))
102 .format(|buf, record| {
103 let mut log_record = LogRecord::new(record);
104 log_record.message = format!("(remote server) {}", log_record.message);
105 serde_json::to_writer(&mut *buf, &log_record)?;
106 buf.write_all(b"\n")?;
107 Ok(())
108 })
109 .init();
110
111 Ok(rx)
112}
113
114fn init_panic_hook() {
115 std::panic::set_hook(Box::new(|info| {
116 let payload = info
117 .payload()
118 .downcast_ref::<&str>()
119 .map(|s| s.to_string())
120 .or_else(|| info.payload().downcast_ref::<String>().cloned())
121 .unwrap_or_else(|| "Box<Any>".to_string());
122
123 let backtrace = backtrace::Backtrace::new();
124 let mut backtrace = backtrace
125 .frames()
126 .iter()
127 .flat_map(|frame| {
128 frame
129 .symbols()
130 .iter()
131 .filter_map(|frame| Some(format!("{:#}", frame.name()?)))
132 })
133 .collect::<Vec<_>>();
134
135 // Strip out leading stack frames for rust panic-handling.
136 if let Some(ix) = backtrace
137 .iter()
138 .position(|name| name == "rust_begin_unwind")
139 {
140 backtrace.drain(0..=ix);
141 }
142
143 let thread = thread::current();
144 let thread_name = thread.name().unwrap_or("<unnamed>");
145
146 log::error!(
147 "panic occurred: {}\nBacktrace:\n{}",
148 &payload,
149 (&backtrace).join("\n")
150 );
151
152 let panic_data = telemetry_events::Panic {
153 thread: thread_name.into(),
154 payload: payload.clone(),
155 location_data: info.location().map(|location| LocationData {
156 file: location.file().into(),
157 line: location.line(),
158 }),
159 app_version: format!(
160 "remote-server-{}",
161 option_env!("ZED_COMMIT_SHA").unwrap_or(&env!("ZED_PKG_VERSION"))
162 ),
163 release_channel: release_channel::RELEASE_CHANNEL.display_name().into(),
164 target: env!("TARGET").to_owned().into(),
165 os_name: telemetry::os_name(),
166 os_version: Some(telemetry::os_version()),
167 architecture: env::consts::ARCH.into(),
168 panicked_on: Utc::now().timestamp_millis(),
169 backtrace,
170 system_id: None, // Set on SSH client
171 installation_id: None, // Set on SSH client
172 session_id: "".to_string(), // Set on SSH client
173 };
174
175 if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
176 let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
177 let panic_file_path = paths::logs_dir().join(format!("zed-{timestamp}.panic"));
178 let panic_file = std::fs::OpenOptions::new()
179 .append(true)
180 .create(true)
181 .open(&panic_file_path)
182 .log_err();
183 if let Some(mut panic_file) = panic_file {
184 writeln!(&mut panic_file, "{panic_data_json}").log_err();
185 panic_file.flush().log_err();
186 }
187 }
188
189 std::process::abort();
190 }));
191}
192
193fn handle_panic_requests(project: &Entity<HeadlessProject>, client: &Arc<ChannelClient>) {
194 let client: AnyProtoClient = client.clone().into();
195 client.add_request_handler(
196 project.downgrade(),
197 |_, _: TypedEnvelope<proto::GetPanicFiles>, _cx| async move {
198 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
199 let mut panic_files = Vec::new();
200 while let Some(child) = children.next().await {
201 let child = child?;
202 let child_path = child.path();
203
204 if child_path.extension() != Some(OsStr::new("panic")) {
205 continue;
206 }
207 let filename = if let Some(filename) = child_path.file_name() {
208 filename.to_string_lossy()
209 } else {
210 continue;
211 };
212
213 if !filename.starts_with("zed") {
214 continue;
215 }
216
217 let file_contents = smol::fs::read_to_string(&child_path)
218 .await
219 .context("error reading panic file")?;
220
221 panic_files.push(file_contents);
222
223 // We've done what we can, delete the file
224 std::fs::remove_file(child_path)
225 .context("error removing panic")
226 .log_err();
227 }
228 anyhow::Ok(proto::GetPanicFilesResponse {
229 file_contents: panic_files,
230 })
231 },
232 );
233}
234
235struct ServerListeners {
236 stdin: UnixListener,
237 stdout: UnixListener,
238 stderr: UnixListener,
239}
240
241impl ServerListeners {
242 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
243 Ok(Self {
244 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
245 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
246 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
247 })
248 }
249}
250
251fn start_server(
252 listeners: ServerListeners,
253 log_rx: Receiver<Vec<u8>>,
254 cx: &mut App,
255) -> Arc<ChannelClient> {
256 // This is the server idle timeout. If no connection comes in in this timeout, the server will shut down.
257 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
258
259 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
260 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
261 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
262
263 cx.on_app_quit(move |_| {
264 let mut app_quit_tx = app_quit_tx.clone();
265 async move {
266 log::info!("app quitting. sending signal to server main loop");
267 app_quit_tx.send(()).await.ok();
268 }
269 })
270 .detach();
271
272 cx.spawn(|cx| async move {
273 let mut stdin_incoming = listeners.stdin.incoming();
274 let mut stdout_incoming = listeners.stdout.incoming();
275 let mut stderr_incoming = listeners.stderr.incoming();
276
277 loop {
278 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
279
280 log::info!("accepting new connections");
281 let result = select! {
282 streams = streams.fuse() => {
283 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
284 break;
285 };
286 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
287 }
288 _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
289 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
290 cx.update(|cx| {
291 // TODO: This is a hack, because in a headless project, shutdown isn't executed
292 // when calling quit, but it should be.
293 cx.shutdown();
294 cx.quit();
295 })?;
296 break;
297 }
298 _ = app_quit_rx.next().fuse() => {
299 break;
300 }
301 };
302
303 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
304 break;
305 };
306
307 let mut input_buffer = Vec::new();
308 let mut output_buffer = Vec::new();
309
310 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
311 cx.background_executor().spawn(async move {
312 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
313 if let Err(_) = stdin_msg_tx.send(msg).await {
314 break;
315 }
316 }
317 }).detach();
318
319 loop {
320
321 select_biased! {
322 _ = app_quit_rx.next().fuse() => {
323 return anyhow::Ok(());
324 }
325
326 stdin_message = stdin_msg_rx.next().fuse() => {
327 let Some(message) = stdin_message else {
328 log::warn!("error reading message on stdin. exiting.");
329 break;
330 };
331 if let Err(error) = incoming_tx.unbounded_send(message) {
332 log::error!("failed to send message to application: {:?}. exiting.", error);
333 return Err(anyhow!(error));
334 }
335 }
336
337 outgoing_message = outgoing_rx.next().fuse() => {
338 let Some(message) = outgoing_message else {
339 log::error!("stdout handler, no message");
340 break;
341 };
342
343 if let Err(error) =
344 write_message(&mut stdout_stream, &mut output_buffer, message).await
345 {
346 log::error!("failed to write stdout message: {:?}", error);
347 break;
348 }
349 if let Err(error) = stdout_stream.flush().await {
350 log::error!("failed to flush stdout message: {:?}", error);
351 break;
352 }
353 }
354
355 log_message = log_rx.recv().fuse() => {
356 if let Ok(log_message) = log_message {
357 if let Err(error) = stderr_stream.write_all(&log_message).await {
358 log::error!("failed to write log message to stderr: {:?}", error);
359 break;
360 }
361 if let Err(error) = stderr_stream.flush().await {
362 log::error!("failed to flush stderr stream: {:?}", error);
363 break;
364 }
365 }
366 }
367 }
368 }
369 }
370 anyhow::Ok(())
371 })
372 .detach();
373
374 ChannelClient::new(incoming_rx, outgoing_tx, cx, "server")
375}
376
377fn init_paths() -> anyhow::Result<()> {
378 for path in [
379 paths::config_dir(),
380 paths::extensions_dir(),
381 paths::languages_dir(),
382 paths::logs_dir(),
383 paths::temp_dir(),
384 paths::remote_extensions_dir(),
385 paths::remote_extensions_uploads_dir(),
386 ]
387 .iter()
388 {
389 std::fs::create_dir_all(path)
390 .map_err(|e| anyhow!("Could not create directory {:?}: {}", path, e))?;
391 }
392 Ok(())
393}
394
395pub fn execute_run(
396 log_file: PathBuf,
397 pid_file: PathBuf,
398 stdin_socket: PathBuf,
399 stdout_socket: PathBuf,
400 stderr_socket: PathBuf,
401) -> Result<()> {
402 init_paths()?;
403
404 match daemonize()? {
405 ControlFlow::Break(_) => return Ok(()),
406 ControlFlow::Continue(_) => {}
407 }
408
409 init_panic_hook();
410 let log_rx = init_logging_server(log_file)?;
411 log::info!(
412 "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
413 pid_file,
414 stdin_socket,
415 stdout_socket,
416 stderr_socket
417 );
418
419 write_pid_file(&pid_file)
420 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
421
422 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
423
424 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
425 gpui::Application::headless().run(move |cx| {
426 settings::init(cx);
427 let app_version = AppVersion::init(env!("ZED_PKG_VERSION"));
428 release_channel::init(app_version, cx);
429 gpui_tokio::init(cx);
430
431 HeadlessProject::init(cx);
432
433 log::info!("gpui app started, initializing server");
434 let session = start_server(listeners, log_rx, cx);
435
436 client::init_settings(cx);
437
438 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
439 git_hosting_providers::init(cx);
440
441 extension::init(cx);
442 let extension_host_proxy = ExtensionHostProxy::global(cx);
443
444 let project = cx.new(|cx| {
445 let fs = Arc::new(RealFs::new(Default::default(), None));
446 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
447
448 let proxy_url = read_proxy_settings(cx);
449
450 let http_client = {
451 let _guard = Tokio::handle(cx).enter();
452 Arc::new(
453 ReqwestClient::proxy_and_user_agent(
454 proxy_url,
455 &format!(
456 "Zed-Server/{} ({}; {})",
457 env!("CARGO_PKG_VERSION"),
458 std::env::consts::OS,
459 std::env::consts::ARCH
460 ),
461 )
462 .expect("Could not start HTTP client"),
463 )
464 };
465
466 let node_runtime = NodeRuntime::new(http_client.clone(), node_settings_rx);
467
468 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
469 languages.set_language_server_download_dir(paths::languages_dir().clone());
470 let languages = Arc::new(languages);
471
472 HeadlessProject::new(
473 HeadlessAppState {
474 session: session.clone(),
475 fs,
476 http_client,
477 node_runtime,
478 languages,
479 extension_host_proxy,
480 },
481 cx,
482 )
483 });
484
485 handle_panic_requests(&project, &session);
486
487 cx.background_executor()
488 .spawn(async move { cleanup_old_binaries() })
489 .detach();
490
491 mem::forget(project);
492 });
493 log::info!("gpui app is shut down. quitting.");
494 Ok(())
495}
496
497#[derive(Clone)]
498struct ServerPaths {
499 log_file: PathBuf,
500 pid_file: PathBuf,
501 stdin_socket: PathBuf,
502 stdout_socket: PathBuf,
503 stderr_socket: PathBuf,
504}
505
506impl ServerPaths {
507 fn new(identifier: &str) -> Result<Self> {
508 let server_dir = paths::remote_server_state_dir().join(identifier);
509 std::fs::create_dir_all(&server_dir)?;
510 std::fs::create_dir_all(&logs_dir())?;
511
512 let pid_file = server_dir.join("server.pid");
513 let stdin_socket = server_dir.join("stdin.sock");
514 let stdout_socket = server_dir.join("stdout.sock");
515 let stderr_socket = server_dir.join("stderr.sock");
516 let log_file = logs_dir().join(format!("server-{}.log", identifier));
517
518 Ok(Self {
519 pid_file,
520 stdin_socket,
521 stdout_socket,
522 stderr_socket,
523 log_file,
524 })
525 }
526}
527
528pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
529 init_logging_proxy();
530 init_panic_hook();
531
532 log::info!("starting proxy process. PID: {}", std::process::id());
533
534 let server_paths = ServerPaths::new(&identifier)?;
535
536 let server_pid = check_pid_file(&server_paths.pid_file)?;
537 let server_running = server_pid.is_some();
538 if is_reconnecting {
539 if !server_running {
540 log::error!("attempted to reconnect, but no server running");
541 return Err(anyhow!(ProxyLaunchError::ServerNotRunning));
542 }
543 } else {
544 if let Some(pid) = server_pid {
545 log::info!("proxy found server already running with PID {}. Killing process and cleaning up files...", pid);
546 kill_running_server(pid, &server_paths)?;
547 }
548
549 spawn_server(&server_paths)?;
550 };
551
552 let stdin_task = smol::spawn(async move {
553 let stdin = Async::new(std::io::stdin())?;
554 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
555 handle_io(stdin, stream, "stdin").await
556 });
557
558 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
559 let stdout = Async::new(std::io::stdout())?;
560 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
561 handle_io(stream, stdout, "stdout").await
562 });
563
564 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
565 let mut stderr = Async::new(std::io::stderr())?;
566 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
567 let mut stderr_buffer = vec![0; 2048];
568 loop {
569 match stream.read(&mut stderr_buffer).await {
570 Ok(0) => {
571 let error =
572 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
573 Err(anyhow!(error))?;
574 }
575 Ok(n) => {
576 stderr.write_all(&mut stderr_buffer[..n]).await?;
577 stderr.flush().await?;
578 }
579 Err(error) => {
580 Err(anyhow!("error reading stderr: {error:?}"))?;
581 }
582 }
583 }
584 });
585
586 if let Err(forwarding_result) = smol::block_on(async move {
587 futures::select! {
588 result = stdin_task.fuse() => result.context("stdin_task failed"),
589 result = stdout_task.fuse() => result.context("stdout_task failed"),
590 result = stderr_task.fuse() => result.context("stderr_task failed"),
591 }
592 }) {
593 log::error!(
594 "encountered error while forwarding messages: {:?}, terminating...",
595 forwarding_result
596 );
597 return Err(forwarding_result);
598 }
599
600 Ok(())
601}
602
603fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<()> {
604 log::info!("killing existing server with PID {}", pid);
605 std::process::Command::new("kill")
606 .arg(pid.to_string())
607 .output()
608 .context("failed to kill existing server")?;
609
610 for file in [
611 &paths.pid_file,
612 &paths.stdin_socket,
613 &paths.stdout_socket,
614 &paths.stderr_socket,
615 ] {
616 log::debug!("cleaning up file {:?} before starting new server", file);
617 std::fs::remove_file(file).ok();
618 }
619 Ok(())
620}
621
622fn spawn_server(paths: &ServerPaths) -> Result<()> {
623 if paths.stdin_socket.exists() {
624 std::fs::remove_file(&paths.stdin_socket)?;
625 }
626 if paths.stdout_socket.exists() {
627 std::fs::remove_file(&paths.stdout_socket)?;
628 }
629 if paths.stderr_socket.exists() {
630 std::fs::remove_file(&paths.stderr_socket)?;
631 }
632
633 let binary_name = std::env::current_exe()?;
634 let mut server_process = std::process::Command::new(binary_name);
635 server_process
636 .arg("run")
637 .arg("--log-file")
638 .arg(&paths.log_file)
639 .arg("--pid-file")
640 .arg(&paths.pid_file)
641 .arg("--stdin-socket")
642 .arg(&paths.stdin_socket)
643 .arg("--stdout-socket")
644 .arg(&paths.stdout_socket)
645 .arg("--stderr-socket")
646 .arg(&paths.stderr_socket);
647
648 let status = server_process
649 .status()
650 .context("failed to launch server process")?;
651 anyhow::ensure!(
652 status.success(),
653 "failed to launch and detach server process"
654 );
655
656 let mut total_time_waited = std::time::Duration::from_secs(0);
657 let wait_duration = std::time::Duration::from_millis(20);
658 while !paths.stdout_socket.exists()
659 || !paths.stdin_socket.exists()
660 || !paths.stderr_socket.exists()
661 {
662 log::debug!("waiting for server to be ready to accept connections...");
663 std::thread::sleep(wait_duration);
664 total_time_waited += wait_duration;
665 }
666
667 log::info!(
668 "server ready to accept connections. total time waited: {:?}",
669 total_time_waited
670 );
671
672 Ok(())
673}
674
675fn check_pid_file(path: &Path) -> Result<Option<u32>> {
676 let Some(pid) = std::fs::read_to_string(&path)
677 .ok()
678 .and_then(|contents| contents.parse::<u32>().ok())
679 else {
680 return Ok(None);
681 };
682
683 log::debug!("Checking if process with PID {} exists...", pid);
684 match std::process::Command::new("kill")
685 .arg("-0")
686 .arg(pid.to_string())
687 .output()
688 {
689 Ok(output) if output.status.success() => {
690 log::debug!("Process with PID {} exists. NOT spawning new server, but attaching to existing one.", pid);
691 Ok(Some(pid))
692 }
693 _ => {
694 log::debug!(
695 "Found PID file, but process with that PID does not exist. Removing PID file."
696 );
697 std::fs::remove_file(&path).context("Failed to remove PID file")?;
698 Ok(None)
699 }
700 }
701}
702
703fn write_pid_file(path: &Path) -> Result<()> {
704 if path.exists() {
705 std::fs::remove_file(path)?;
706 }
707 let pid = std::process::id().to_string();
708 log::debug!("writing PID {} to file {:?}", pid, path);
709 std::fs::write(path, pid).context("Failed to write PID file")
710}
711
712async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
713where
714 R: AsyncRead + Unpin,
715 W: AsyncWrite + Unpin,
716{
717 use remote::protocol::read_message_raw;
718
719 let mut buffer = Vec::new();
720 loop {
721 read_message_raw(&mut reader, &mut buffer)
722 .await
723 .with_context(|| format!("failed to read message from {}", socket_name))?;
724
725 write_size_prefixed_buffer(&mut writer, &mut buffer)
726 .await
727 .with_context(|| format!("failed to write message to {}", socket_name))?;
728
729 writer.flush().await?;
730
731 buffer.clear();
732 }
733}
734
735async fn write_size_prefixed_buffer<S: AsyncWrite + Unpin>(
736 stream: &mut S,
737 buffer: &mut Vec<u8>,
738) -> Result<()> {
739 let len = buffer.len() as u32;
740 stream.write_all(len.to_le_bytes().as_slice()).await?;
741 stream.write_all(buffer).await?;
742 Ok(())
743}
744
745fn initialize_settings(
746 session: Arc<ChannelClient>,
747 fs: Arc<dyn Fs>,
748 cx: &mut App,
749) -> async_watch::Receiver<Option<NodeBinaryOptions>> {
750 let user_settings_file_rx = watch_config_file(
751 &cx.background_executor(),
752 fs,
753 paths::settings_file().clone(),
754 );
755
756 handle_settings_file_changes(user_settings_file_rx, cx, {
757 let session = session.clone();
758 move |err, _cx| {
759 if let Some(e) = err {
760 log::info!("Server settings failed to change: {}", e);
761
762 session
763 .send(proto::Toast {
764 project_id: SSH_PROJECT_ID,
765 notification_id: "server-settings-failed".to_string(),
766 message: format!(
767 "Error in settings on remote host {:?}: {}",
768 paths::settings_file(),
769 e
770 ),
771 })
772 .log_err();
773 } else {
774 session
775 .send(proto::HideToast {
776 project_id: SSH_PROJECT_ID,
777 notification_id: "server-settings-failed".to_string(),
778 })
779 .log_err();
780 }
781 }
782 });
783
784 let (tx, rx) = async_watch::channel(None);
785 cx.observe_global::<SettingsStore>(move |cx| {
786 let settings = &ProjectSettings::get_global(cx).node;
787 log::info!("Got new node settings: {:?}", settings);
788 let options = NodeBinaryOptions {
789 allow_path_lookup: !settings.ignore_system_version.unwrap_or_default(),
790 // TODO: Implement this setting
791 allow_binary_download: true,
792 use_paths: settings.path.as_ref().map(|node_path| {
793 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
794 let npm_path = settings
795 .npm_path
796 .as_ref()
797 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
798 (
799 node_path.clone(),
800 npm_path.unwrap_or_else(|| {
801 let base_path = PathBuf::new();
802 node_path.parent().unwrap_or(&base_path).join("npm")
803 }),
804 )
805 }),
806 };
807 tx.send(Some(options)).log_err();
808 })
809 .detach();
810
811 rx
812}
813
814pub fn handle_settings_file_changes(
815 mut server_settings_file: mpsc::UnboundedReceiver<String>,
816 cx: &mut App,
817 settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
818) {
819 let server_settings_content = cx
820 .background_executor()
821 .block(server_settings_file.next())
822 .unwrap();
823 SettingsStore::update_global(cx, |store, cx| {
824 store
825 .set_server_settings(&server_settings_content, cx)
826 .log_err();
827 });
828 cx.spawn(move |cx| async move {
829 while let Some(server_settings_content) = server_settings_file.next().await {
830 let result = cx.update_global(|store: &mut SettingsStore, cx| {
831 let result = store.set_server_settings(&server_settings_content, cx);
832 if let Err(err) = &result {
833 log::error!("Failed to load server settings: {err}");
834 }
835 settings_changed(result.err(), cx);
836 cx.refresh_windows();
837 });
838 if result.is_err() {
839 break; // App dropped
840 }
841 }
842 })
843 .detach();
844}
845
846fn read_proxy_settings(cx: &mut Context<'_, HeadlessProject>) -> Option<Uri> {
847 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
848 let proxy_url = proxy_str
849 .as_ref()
850 .and_then(|input: &String| {
851 input
852 .parse::<Uri>()
853 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
854 .ok()
855 })
856 .or_else(read_proxy_from_env);
857 proxy_url
858}
859
860fn daemonize() -> Result<ControlFlow<()>> {
861 match fork::fork().map_err(|e| anyhow::anyhow!("failed to call fork with error code {}", e))? {
862 fork::Fork::Parent(_) => {
863 return Ok(ControlFlow::Break(()));
864 }
865 fork::Fork::Child => {}
866 }
867
868 // Once we've detached from the parent, we want to close stdout/stderr/stdin
869 // so that the outer SSH process is not attached to us in any way anymore.
870 unsafe { redirect_standard_streams() }?;
871
872 Ok(ControlFlow::Continue(()))
873}
874
875unsafe fn redirect_standard_streams() -> Result<()> {
876 let devnull_fd = libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR);
877 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
878
879 let process_stdio = |name, fd| {
880 let reopened_fd = libc::dup2(devnull_fd, fd);
881 anyhow::ensure!(
882 reopened_fd != -1,
883 format!("failed to redirect {} to /dev/null", name)
884 );
885 Ok(())
886 };
887
888 process_stdio("stdin", libc::STDIN_FILENO)?;
889 process_stdio("stdout", libc::STDOUT_FILENO)?;
890 process_stdio("stderr", libc::STDERR_FILENO)?;
891
892 anyhow::ensure!(
893 libc::close(devnull_fd) != -1,
894 "failed to close /dev/null fd after redirecting"
895 );
896
897 Ok(())
898}
899
900fn cleanup_old_binaries() -> Result<()> {
901 let server_dir = paths::remote_server_dir_relative();
902 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
903 let prefix = format!("zed-remote-server-{}-", release_channel);
904
905 for entry in std::fs::read_dir(server_dir)? {
906 let path = entry?.path();
907
908 if let Some(file_name) = path.file_name() {
909 if let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix) {
910 if !is_new_version(version) && !is_file_in_use(file_name) {
911 log::info!("removing old remote server binary: {:?}", path);
912 std::fs::remove_file(&path)?;
913 }
914 }
915 }
916 }
917
918 Ok(())
919}
920
921fn is_new_version(version: &str) -> bool {
922 SemanticVersion::from_str(version)
923 .ok()
924 .zip(SemanticVersion::from_str(env!("ZED_PKG_VERSION")).ok())
925 .is_some_and(|(version, current_version)| version >= current_version)
926}
927
928fn is_file_in_use(file_name: &OsStr) -> bool {
929 let info =
930 sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes(
931 sysinfo::ProcessRefreshKind::new().with_exe(sysinfo::UpdateKind::Always),
932 ));
933
934 for process in info.processes().values() {
935 if process
936 .exe()
937 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
938 {
939 return true;
940 }
941 }
942
943 false
944}