1use crate::headless_project::HeadlessAppState;
2use crate::HeadlessProject;
3use anyhow::{anyhow, Context as _, Result};
4use chrono::Utc;
5use client::{telemetry, ProxySettings};
6use extension::ExtensionHostProxy;
7use fs::{Fs, RealFs};
8use futures::channel::mpsc;
9use futures::{select, select_biased, AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt};
10use git::GitHostingProviderRegistry;
11use gpui::{App, AppContext as _, Context, Entity, SemanticVersion, UpdateGlobal as _};
12use gpui_tokio::Tokio;
13use http_client::{read_proxy_from_env, Uri};
14use language::LanguageRegistry;
15use node_runtime::{NodeBinaryOptions, NodeRuntime};
16use paths::logs_dir;
17use project::project_settings::ProjectSettings;
18
19use release_channel::{AppVersion, ReleaseChannel, RELEASE_CHANNEL};
20use remote::proxy::ProxyLaunchError;
21use remote::ssh_session::ChannelClient;
22use remote::{
23 json_log::LogRecord,
24 protocol::{read_message, write_message},
25};
26use reqwest_client::ReqwestClient;
27use rpc::proto::{self, Envelope, SSH_PROJECT_ID};
28use rpc::{AnyProtoClient, TypedEnvelope};
29use settings::{watch_config_file, Settings, SettingsStore};
30use smol::channel::{Receiver, Sender};
31use smol::io::AsyncReadExt;
32
33use smol::Async;
34use smol::{net::unix::UnixListener, stream::StreamExt as _};
35use std::ffi::OsStr;
36use std::ops::ControlFlow;
37use std::str::FromStr;
38use std::{env, thread};
39use std::{
40 io::Write,
41 mem,
42 path::{Path, PathBuf},
43 sync::Arc,
44};
45use telemetry_events::LocationData;
46use util::ResultExt;
47
48fn init_logging_proxy() {
49 env_logger::builder()
50 .format(|buf, record| {
51 let mut log_record = LogRecord::new(record);
52 log_record.message = format!("(remote proxy) {}", log_record.message);
53 serde_json::to_writer(&mut *buf, &log_record)?;
54 buf.write_all(b"\n")?;
55 Ok(())
56 })
57 .init();
58}
59
60fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
61 struct MultiWrite {
62 file: std::fs::File,
63 channel: Sender<Vec<u8>>,
64 buffer: Vec<u8>,
65 }
66
67 impl std::io::Write for MultiWrite {
68 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
69 let written = self.file.write(buf)?;
70 self.buffer.extend_from_slice(&buf[..written]);
71 Ok(written)
72 }
73
74 fn flush(&mut self) -> std::io::Result<()> {
75 self.channel
76 .send_blocking(self.buffer.clone())
77 .map_err(|error| std::io::Error::new(std::io::ErrorKind::Other, error))?;
78 self.buffer.clear();
79 self.file.flush()
80 }
81 }
82
83 let log_file = std::fs::OpenOptions::new()
84 .create(true)
85 .append(true)
86 .open(&log_file_path)
87 .context("Failed to open log file in append mode")?;
88
89 let (tx, rx) = smol::channel::unbounded();
90
91 let target = Box::new(MultiWrite {
92 file: log_file,
93 channel: tx,
94 buffer: Vec::new(),
95 });
96
97 env_logger::Builder::from_default_env()
98 .target(env_logger::Target::Pipe(target))
99 .format(|buf, record| {
100 let mut log_record = LogRecord::new(record);
101 log_record.message = format!("(remote server) {}", log_record.message);
102 serde_json::to_writer(&mut *buf, &log_record)?;
103 buf.write_all(b"\n")?;
104 Ok(())
105 })
106 .init();
107
108 Ok(rx)
109}
110
111fn init_panic_hook() {
112 std::panic::set_hook(Box::new(|info| {
113 let payload = info
114 .payload()
115 .downcast_ref::<&str>()
116 .map(|s| s.to_string())
117 .or_else(|| info.payload().downcast_ref::<String>().cloned())
118 .unwrap_or_else(|| "Box<Any>".to_string());
119
120 let backtrace = backtrace::Backtrace::new();
121 let mut backtrace = backtrace
122 .frames()
123 .iter()
124 .flat_map(|frame| {
125 frame
126 .symbols()
127 .iter()
128 .filter_map(|frame| Some(format!("{:#}", frame.name()?)))
129 })
130 .collect::<Vec<_>>();
131
132 // Strip out leading stack frames for rust panic-handling.
133 if let Some(ix) = backtrace
134 .iter()
135 .position(|name| name == "rust_begin_unwind")
136 {
137 backtrace.drain(0..=ix);
138 }
139
140 let thread = thread::current();
141 let thread_name = thread.name().unwrap_or("<unnamed>");
142
143 log::error!(
144 "panic occurred: {}\nBacktrace:\n{}",
145 &payload,
146 (&backtrace).join("\n")
147 );
148
149 let release_channel = *RELEASE_CHANNEL;
150 let version = match release_channel {
151 ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION"),
152 ReleaseChannel::Nightly | ReleaseChannel::Dev => {
153 option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha")
154 }
155 };
156
157 let panic_data = telemetry_events::Panic {
158 thread: thread_name.into(),
159 payload: payload.clone(),
160 location_data: info.location().map(|location| LocationData {
161 file: location.file().into(),
162 line: location.line(),
163 }),
164 app_version: format!("remote-server-{version}"),
165 app_commit_sha: option_env!("ZED_COMMIT_SHA").map(|sha| sha.into()),
166 release_channel: release_channel.display_name().into(),
167 target: env!("TARGET").to_owned().into(),
168 os_name: telemetry::os_name(),
169 os_version: Some(telemetry::os_version()),
170 architecture: env::consts::ARCH.into(),
171 panicked_on: Utc::now().timestamp_millis(),
172 backtrace,
173 system_id: None, // Set on SSH client
174 installation_id: None, // Set on SSH client
175 session_id: "".to_string(), // Set on SSH client
176 };
177
178 if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
179 let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
180 let panic_file_path = paths::logs_dir().join(format!("zed-{timestamp}.panic"));
181 let panic_file = std::fs::OpenOptions::new()
182 .append(true)
183 .create(true)
184 .open(&panic_file_path)
185 .log_err();
186 if let Some(mut panic_file) = panic_file {
187 writeln!(&mut panic_file, "{panic_data_json}").log_err();
188 panic_file.flush().log_err();
189 }
190 }
191
192 std::process::abort();
193 }));
194}
195
196fn handle_panic_requests(project: &Entity<HeadlessProject>, client: &Arc<ChannelClient>) {
197 let client: AnyProtoClient = client.clone().into();
198 client.add_request_handler(
199 project.downgrade(),
200 |_, _: TypedEnvelope<proto::GetPanicFiles>, _cx| async move {
201 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
202 let mut panic_files = Vec::new();
203 while let Some(child) = children.next().await {
204 let child = child?;
205 let child_path = child.path();
206
207 if child_path.extension() != Some(OsStr::new("panic")) {
208 continue;
209 }
210 let filename = if let Some(filename) = child_path.file_name() {
211 filename.to_string_lossy()
212 } else {
213 continue;
214 };
215
216 if !filename.starts_with("zed") {
217 continue;
218 }
219
220 let file_contents = smol::fs::read_to_string(&child_path)
221 .await
222 .context("error reading panic file")?;
223
224 panic_files.push(file_contents);
225
226 // We've done what we can, delete the file
227 std::fs::remove_file(child_path)
228 .context("error removing panic")
229 .log_err();
230 }
231 anyhow::Ok(proto::GetPanicFilesResponse {
232 file_contents: panic_files,
233 })
234 },
235 );
236}
237
238struct ServerListeners {
239 stdin: UnixListener,
240 stdout: UnixListener,
241 stderr: UnixListener,
242}
243
244impl ServerListeners {
245 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
246 Ok(Self {
247 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
248 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
249 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
250 })
251 }
252}
253
254fn start_server(
255 listeners: ServerListeners,
256 log_rx: Receiver<Vec<u8>>,
257 cx: &mut App,
258) -> Arc<ChannelClient> {
259 // This is the server idle timeout. If no connection comes in in this timeout, the server will shut down.
260 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
261
262 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
263 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
264 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
265
266 cx.on_app_quit(move |_| {
267 let mut app_quit_tx = app_quit_tx.clone();
268 async move {
269 log::info!("app quitting. sending signal to server main loop");
270 app_quit_tx.send(()).await.ok();
271 }
272 })
273 .detach();
274
275 cx.spawn(async move |cx| {
276 let mut stdin_incoming = listeners.stdin.incoming();
277 let mut stdout_incoming = listeners.stdout.incoming();
278 let mut stderr_incoming = listeners.stderr.incoming();
279
280 loop {
281 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
282
283 log::info!("accepting new connections");
284 let result = select! {
285 streams = streams.fuse() => {
286 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
287 break;
288 };
289 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
290 }
291 _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
292 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
293 cx.update(|cx| {
294 // TODO: This is a hack, because in a headless project, shutdown isn't executed
295 // when calling quit, but it should be.
296 cx.shutdown();
297 cx.quit();
298 })?;
299 break;
300 }
301 _ = app_quit_rx.next().fuse() => {
302 break;
303 }
304 };
305
306 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
307 break;
308 };
309
310 let mut input_buffer = Vec::new();
311 let mut output_buffer = Vec::new();
312
313 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
314 cx.background_spawn(async move {
315 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
316 if let Err(_) = stdin_msg_tx.send(msg).await {
317 break;
318 }
319 }
320 }).detach();
321
322 loop {
323
324 select_biased! {
325 _ = app_quit_rx.next().fuse() => {
326 return anyhow::Ok(());
327 }
328
329 stdin_message = stdin_msg_rx.next().fuse() => {
330 let Some(message) = stdin_message else {
331 log::warn!("error reading message on stdin. exiting.");
332 break;
333 };
334 if let Err(error) = incoming_tx.unbounded_send(message) {
335 log::error!("failed to send message to application: {:?}. exiting.", error);
336 return Err(anyhow!(error));
337 }
338 }
339
340 outgoing_message = outgoing_rx.next().fuse() => {
341 let Some(message) = outgoing_message else {
342 log::error!("stdout handler, no message");
343 break;
344 };
345
346 if let Err(error) =
347 write_message(&mut stdout_stream, &mut output_buffer, message).await
348 {
349 log::error!("failed to write stdout message: {:?}", error);
350 break;
351 }
352 if let Err(error) = stdout_stream.flush().await {
353 log::error!("failed to flush stdout message: {:?}", error);
354 break;
355 }
356 }
357
358 log_message = log_rx.recv().fuse() => {
359 if let Ok(log_message) = log_message {
360 if let Err(error) = stderr_stream.write_all(&log_message).await {
361 log::error!("failed to write log message to stderr: {:?}", error);
362 break;
363 }
364 if let Err(error) = stderr_stream.flush().await {
365 log::error!("failed to flush stderr stream: {:?}", error);
366 break;
367 }
368 }
369 }
370 }
371 }
372 }
373 anyhow::Ok(())
374 })
375 .detach();
376
377 ChannelClient::new(incoming_rx, outgoing_tx, cx, "server")
378}
379
380fn init_paths() -> anyhow::Result<()> {
381 for path in [
382 paths::config_dir(),
383 paths::extensions_dir(),
384 paths::languages_dir(),
385 paths::logs_dir(),
386 paths::temp_dir(),
387 paths::remote_extensions_dir(),
388 paths::remote_extensions_uploads_dir(),
389 ]
390 .iter()
391 {
392 std::fs::create_dir_all(path)
393 .map_err(|e| anyhow!("Could not create directory {:?}: {}", path, e))?;
394 }
395 Ok(())
396}
397
398pub fn execute_run(
399 log_file: PathBuf,
400 pid_file: PathBuf,
401 stdin_socket: PathBuf,
402 stdout_socket: PathBuf,
403 stderr_socket: PathBuf,
404) -> Result<()> {
405 init_paths()?;
406
407 match daemonize()? {
408 ControlFlow::Break(_) => return Ok(()),
409 ControlFlow::Continue(_) => {}
410 }
411
412 init_panic_hook();
413 let log_rx = init_logging_server(log_file)?;
414 log::info!(
415 "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
416 pid_file,
417 stdin_socket,
418 stdout_socket,
419 stderr_socket
420 );
421
422 write_pid_file(&pid_file)
423 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
424
425 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
426
427 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
428 gpui::Application::headless().run(move |cx| {
429 settings::init(cx);
430 let app_version = AppVersion::init(env!("ZED_PKG_VERSION"));
431 release_channel::init(app_version, cx);
432 gpui_tokio::init(cx);
433
434 HeadlessProject::init(cx);
435
436 log::info!("gpui app started, initializing server");
437 let session = start_server(listeners, log_rx, cx);
438
439 client::init_settings(cx);
440
441 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
442 git_hosting_providers::init(cx);
443
444 extension::init(cx);
445 let extension_host_proxy = ExtensionHostProxy::global(cx);
446
447 let project = cx.new(|cx| {
448 let fs = Arc::new(RealFs::new(None));
449 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
450
451 let proxy_url = read_proxy_settings(cx);
452
453 let http_client = {
454 let _guard = Tokio::handle(cx).enter();
455 Arc::new(
456 ReqwestClient::proxy_and_user_agent(
457 proxy_url,
458 &format!(
459 "Zed-Server/{} ({}; {})",
460 env!("CARGO_PKG_VERSION"),
461 std::env::consts::OS,
462 std::env::consts::ARCH
463 ),
464 )
465 .expect("Could not start HTTP client"),
466 )
467 };
468
469 let node_runtime = NodeRuntime::new(http_client.clone(), node_settings_rx);
470
471 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
472 languages.set_language_server_download_dir(paths::languages_dir().clone());
473 let languages = Arc::new(languages);
474
475 HeadlessProject::new(
476 HeadlessAppState {
477 session: session.clone(),
478 fs,
479 http_client,
480 node_runtime,
481 languages,
482 extension_host_proxy,
483 },
484 cx,
485 )
486 });
487
488 handle_panic_requests(&project, &session);
489
490 cx.background_spawn(async move { cleanup_old_binaries() })
491 .detach();
492
493 mem::forget(project);
494 });
495 log::info!("gpui app is shut down. quitting.");
496 Ok(())
497}
498
499#[derive(Clone)]
500struct ServerPaths {
501 log_file: PathBuf,
502 pid_file: PathBuf,
503 stdin_socket: PathBuf,
504 stdout_socket: PathBuf,
505 stderr_socket: PathBuf,
506}
507
508impl ServerPaths {
509 fn new(identifier: &str) -> Result<Self> {
510 let server_dir = paths::remote_server_state_dir().join(identifier);
511 std::fs::create_dir_all(&server_dir)?;
512 std::fs::create_dir_all(&logs_dir())?;
513
514 let pid_file = server_dir.join("server.pid");
515 let stdin_socket = server_dir.join("stdin.sock");
516 let stdout_socket = server_dir.join("stdout.sock");
517 let stderr_socket = server_dir.join("stderr.sock");
518 let log_file = logs_dir().join(format!("server-{}.log", identifier));
519
520 Ok(Self {
521 pid_file,
522 stdin_socket,
523 stdout_socket,
524 stderr_socket,
525 log_file,
526 })
527 }
528}
529
530pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
531 init_logging_proxy();
532 init_panic_hook();
533
534 log::info!("starting proxy process. PID: {}", std::process::id());
535
536 let server_paths = ServerPaths::new(&identifier)?;
537
538 let server_pid = check_pid_file(&server_paths.pid_file)?;
539 let server_running = server_pid.is_some();
540 if is_reconnecting {
541 if !server_running {
542 log::error!("attempted to reconnect, but no server running");
543 return Err(anyhow!(ProxyLaunchError::ServerNotRunning));
544 }
545 } else {
546 if let Some(pid) = server_pid {
547 log::info!("proxy found server already running with PID {}. Killing process and cleaning up files...", pid);
548 kill_running_server(pid, &server_paths)?;
549 }
550
551 spawn_server(&server_paths)?;
552 };
553
554 let stdin_task = smol::spawn(async move {
555 let stdin = Async::new(std::io::stdin())?;
556 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
557 handle_io(stdin, stream, "stdin").await
558 });
559
560 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
561 let stdout = Async::new(std::io::stdout())?;
562 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
563 handle_io(stream, stdout, "stdout").await
564 });
565
566 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
567 let mut stderr = Async::new(std::io::stderr())?;
568 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
569 let mut stderr_buffer = vec![0; 2048];
570 loop {
571 match stream.read(&mut stderr_buffer).await {
572 Ok(0) => {
573 let error =
574 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
575 Err(anyhow!(error))?;
576 }
577 Ok(n) => {
578 stderr.write_all(&mut stderr_buffer[..n]).await?;
579 stderr.flush().await?;
580 }
581 Err(error) => {
582 Err(anyhow!("error reading stderr: {error:?}"))?;
583 }
584 }
585 }
586 });
587
588 if let Err(forwarding_result) = smol::block_on(async move {
589 futures::select! {
590 result = stdin_task.fuse() => result.context("stdin_task failed"),
591 result = stdout_task.fuse() => result.context("stdout_task failed"),
592 result = stderr_task.fuse() => result.context("stderr_task failed"),
593 }
594 }) {
595 log::error!(
596 "encountered error while forwarding messages: {:?}, terminating...",
597 forwarding_result
598 );
599 return Err(forwarding_result);
600 }
601
602 Ok(())
603}
604
605fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<()> {
606 log::info!("killing existing server with PID {}", pid);
607 std::process::Command::new("kill")
608 .arg(pid.to_string())
609 .output()
610 .context("failed to kill existing server")?;
611
612 for file in [
613 &paths.pid_file,
614 &paths.stdin_socket,
615 &paths.stdout_socket,
616 &paths.stderr_socket,
617 ] {
618 log::debug!("cleaning up file {:?} before starting new server", file);
619 std::fs::remove_file(file).ok();
620 }
621 Ok(())
622}
623
624fn spawn_server(paths: &ServerPaths) -> Result<()> {
625 if paths.stdin_socket.exists() {
626 std::fs::remove_file(&paths.stdin_socket)?;
627 }
628 if paths.stdout_socket.exists() {
629 std::fs::remove_file(&paths.stdout_socket)?;
630 }
631 if paths.stderr_socket.exists() {
632 std::fs::remove_file(&paths.stderr_socket)?;
633 }
634
635 let binary_name = std::env::current_exe()?;
636 let mut server_process = std::process::Command::new(binary_name);
637 server_process
638 .arg("run")
639 .arg("--log-file")
640 .arg(&paths.log_file)
641 .arg("--pid-file")
642 .arg(&paths.pid_file)
643 .arg("--stdin-socket")
644 .arg(&paths.stdin_socket)
645 .arg("--stdout-socket")
646 .arg(&paths.stdout_socket)
647 .arg("--stderr-socket")
648 .arg(&paths.stderr_socket);
649
650 let status = server_process
651 .status()
652 .context("failed to launch server process")?;
653 anyhow::ensure!(
654 status.success(),
655 "failed to launch and detach server process"
656 );
657
658 let mut total_time_waited = std::time::Duration::from_secs(0);
659 let wait_duration = std::time::Duration::from_millis(20);
660 while !paths.stdout_socket.exists()
661 || !paths.stdin_socket.exists()
662 || !paths.stderr_socket.exists()
663 {
664 log::debug!("waiting for server to be ready to accept connections...");
665 std::thread::sleep(wait_duration);
666 total_time_waited += wait_duration;
667 }
668
669 log::info!(
670 "server ready to accept connections. total time waited: {:?}",
671 total_time_waited
672 );
673
674 Ok(())
675}
676
677fn check_pid_file(path: &Path) -> Result<Option<u32>> {
678 let Some(pid) = std::fs::read_to_string(&path)
679 .ok()
680 .and_then(|contents| contents.parse::<u32>().ok())
681 else {
682 return Ok(None);
683 };
684
685 log::debug!("Checking if process with PID {} exists...", pid);
686 match std::process::Command::new("kill")
687 .arg("-0")
688 .arg(pid.to_string())
689 .output()
690 {
691 Ok(output) if output.status.success() => {
692 log::debug!("Process with PID {} exists. NOT spawning new server, but attaching to existing one.", pid);
693 Ok(Some(pid))
694 }
695 _ => {
696 log::debug!(
697 "Found PID file, but process with that PID does not exist. Removing PID file."
698 );
699 std::fs::remove_file(&path).context("Failed to remove PID file")?;
700 Ok(None)
701 }
702 }
703}
704
705fn write_pid_file(path: &Path) -> Result<()> {
706 if path.exists() {
707 std::fs::remove_file(path)?;
708 }
709 let pid = std::process::id().to_string();
710 log::debug!("writing PID {} to file {:?}", pid, path);
711 std::fs::write(path, pid).context("Failed to write PID file")
712}
713
714async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
715where
716 R: AsyncRead + Unpin,
717 W: AsyncWrite + Unpin,
718{
719 use remote::protocol::read_message_raw;
720
721 let mut buffer = Vec::new();
722 loop {
723 read_message_raw(&mut reader, &mut buffer)
724 .await
725 .with_context(|| format!("failed to read message from {}", socket_name))?;
726
727 write_size_prefixed_buffer(&mut writer, &mut buffer)
728 .await
729 .with_context(|| format!("failed to write message to {}", socket_name))?;
730
731 writer.flush().await?;
732
733 buffer.clear();
734 }
735}
736
737async fn write_size_prefixed_buffer<S: AsyncWrite + Unpin>(
738 stream: &mut S,
739 buffer: &mut Vec<u8>,
740) -> Result<()> {
741 let len = buffer.len() as u32;
742 stream.write_all(len.to_le_bytes().as_slice()).await?;
743 stream.write_all(buffer).await?;
744 Ok(())
745}
746
747fn initialize_settings(
748 session: Arc<ChannelClient>,
749 fs: Arc<dyn Fs>,
750 cx: &mut App,
751) -> async_watch::Receiver<Option<NodeBinaryOptions>> {
752 let user_settings_file_rx = watch_config_file(
753 &cx.background_executor(),
754 fs,
755 paths::settings_file().clone(),
756 );
757
758 handle_settings_file_changes(user_settings_file_rx, cx, {
759 let session = session.clone();
760 move |err, _cx| {
761 if let Some(e) = err {
762 log::info!("Server settings failed to change: {}", e);
763
764 session
765 .send(proto::Toast {
766 project_id: SSH_PROJECT_ID,
767 notification_id: "server-settings-failed".to_string(),
768 message: format!(
769 "Error in settings on remote host {:?}: {}",
770 paths::settings_file(),
771 e
772 ),
773 })
774 .log_err();
775 } else {
776 session
777 .send(proto::HideToast {
778 project_id: SSH_PROJECT_ID,
779 notification_id: "server-settings-failed".to_string(),
780 })
781 .log_err();
782 }
783 }
784 });
785
786 let (tx, rx) = async_watch::channel(None);
787 cx.observe_global::<SettingsStore>(move |cx| {
788 let settings = &ProjectSettings::get_global(cx).node;
789 log::info!("Got new node settings: {:?}", settings);
790 let options = NodeBinaryOptions {
791 allow_path_lookup: !settings.ignore_system_version.unwrap_or_default(),
792 // TODO: Implement this setting
793 allow_binary_download: true,
794 use_paths: settings.path.as_ref().map(|node_path| {
795 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
796 let npm_path = settings
797 .npm_path
798 .as_ref()
799 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
800 (
801 node_path.clone(),
802 npm_path.unwrap_or_else(|| {
803 let base_path = PathBuf::new();
804 node_path.parent().unwrap_or(&base_path).join("npm")
805 }),
806 )
807 }),
808 };
809 tx.send(Some(options)).log_err();
810 })
811 .detach();
812
813 rx
814}
815
816pub fn handle_settings_file_changes(
817 mut server_settings_file: mpsc::UnboundedReceiver<String>,
818 cx: &mut App,
819 settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
820) {
821 let server_settings_content = cx
822 .background_executor()
823 .block(server_settings_file.next())
824 .unwrap();
825 SettingsStore::update_global(cx, |store, cx| {
826 store
827 .set_server_settings(&server_settings_content, cx)
828 .log_err();
829 });
830 cx.spawn(async move |cx| {
831 while let Some(server_settings_content) = server_settings_file.next().await {
832 let result = cx.update_global(|store: &mut SettingsStore, cx| {
833 let result = store.set_server_settings(&server_settings_content, cx);
834 if let Err(err) = &result {
835 log::error!("Failed to load server settings: {err}");
836 }
837 settings_changed(result.err(), cx);
838 cx.refresh_windows();
839 });
840 if result.is_err() {
841 break; // App dropped
842 }
843 }
844 })
845 .detach();
846}
847
848fn read_proxy_settings(cx: &mut Context<'_, HeadlessProject>) -> Option<Uri> {
849 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
850 let proxy_url = proxy_str
851 .as_ref()
852 .and_then(|input: &String| {
853 input
854 .parse::<Uri>()
855 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
856 .ok()
857 })
858 .or_else(read_proxy_from_env);
859 proxy_url
860}
861
862fn daemonize() -> Result<ControlFlow<()>> {
863 match fork::fork().map_err(|e| anyhow::anyhow!("failed to call fork with error code {}", e))? {
864 fork::Fork::Parent(_) => {
865 return Ok(ControlFlow::Break(()));
866 }
867 fork::Fork::Child => {}
868 }
869
870 // Once we've detached from the parent, we want to close stdout/stderr/stdin
871 // so that the outer SSH process is not attached to us in any way anymore.
872 unsafe { redirect_standard_streams() }?;
873
874 Ok(ControlFlow::Continue(()))
875}
876
877unsafe fn redirect_standard_streams() -> Result<()> {
878 let devnull_fd = libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR);
879 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
880
881 let process_stdio = |name, fd| {
882 let reopened_fd = libc::dup2(devnull_fd, fd);
883 anyhow::ensure!(
884 reopened_fd != -1,
885 format!("failed to redirect {} to /dev/null", name)
886 );
887 Ok(())
888 };
889
890 process_stdio("stdin", libc::STDIN_FILENO)?;
891 process_stdio("stdout", libc::STDOUT_FILENO)?;
892 process_stdio("stderr", libc::STDERR_FILENO)?;
893
894 anyhow::ensure!(
895 libc::close(devnull_fd) != -1,
896 "failed to close /dev/null fd after redirecting"
897 );
898
899 Ok(())
900}
901
902fn cleanup_old_binaries() -> Result<()> {
903 let server_dir = paths::remote_server_dir_relative();
904 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
905 let prefix = format!("zed-remote-server-{}-", release_channel);
906
907 for entry in std::fs::read_dir(server_dir)? {
908 let path = entry?.path();
909
910 if let Some(file_name) = path.file_name() {
911 if let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix) {
912 if !is_new_version(version) && !is_file_in_use(file_name) {
913 log::info!("removing old remote server binary: {:?}", path);
914 std::fs::remove_file(&path)?;
915 }
916 }
917 }
918 }
919
920 Ok(())
921}
922
923fn is_new_version(version: &str) -> bool {
924 SemanticVersion::from_str(version)
925 .ok()
926 .zip(SemanticVersion::from_str(env!("ZED_PKG_VERSION")).ok())
927 .is_some_and(|(version, current_version)| version >= current_version)
928}
929
930fn is_file_in_use(file_name: &OsStr) -> bool {
931 let info =
932 sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes(
933 sysinfo::ProcessRefreshKind::new().with_exe(sysinfo::UpdateKind::Always),
934 ));
935
936 for process in info.processes().values() {
937 if process
938 .exe()
939 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
940 {
941 return true;
942 }
943 }
944
945 false
946}