1use crate::headless_project::HeadlessAppState;
2use crate::HeadlessProject;
3use anyhow::{anyhow, Context as _, Result};
4use chrono::Utc;
5use client::{telemetry, ProxySettings};
6use extension::ExtensionHostProxy;
7use fs::{Fs, RealFs};
8use futures::channel::mpsc;
9use futures::{select, select_biased, AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt};
10use git::GitHostingProviderRegistry;
11use gpui::{App, AppContext as _, Context, Entity, SemanticVersion, UpdateGlobal as _};
12use gpui_tokio::Tokio;
13use http_client::{read_proxy_from_env, Uri};
14use language::LanguageRegistry;
15use node_runtime::{NodeBinaryOptions, NodeRuntime};
16use paths::logs_dir;
17use project::project_settings::ProjectSettings;
18
19use release_channel::{AppVersion, ReleaseChannel, RELEASE_CHANNEL};
20use remote::proxy::ProxyLaunchError;
21use remote::ssh_session::ChannelClient;
22use remote::{
23 json_log::LogRecord,
24 protocol::{read_message, write_message},
25};
26use reqwest_client::ReqwestClient;
27use rpc::proto::{self, Envelope, SSH_PROJECT_ID};
28use rpc::{AnyProtoClient, TypedEnvelope};
29use settings::{watch_config_file, Settings, SettingsStore};
30use smol::channel::{Receiver, Sender};
31use smol::io::AsyncReadExt;
32
33use smol::Async;
34use smol::{net::unix::UnixListener, stream::StreamExt as _};
35use std::ffi::OsStr;
36use std::ops::ControlFlow;
37use std::str::FromStr;
38use std::{env, thread};
39use std::{
40 io::Write,
41 mem,
42 path::{Path, PathBuf},
43 sync::Arc,
44};
45use telemetry_events::LocationData;
46use util::ResultExt;
47
48fn init_logging_proxy() {
49 env_logger::builder()
50 .format(|buf, record| {
51 let mut log_record = LogRecord::new(record);
52 log_record.message = format!("(remote proxy) {}", log_record.message);
53 serde_json::to_writer(&mut *buf, &log_record)?;
54 buf.write_all(b"\n")?;
55 Ok(())
56 })
57 .init();
58}
59
60fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
61 struct MultiWrite {
62 file: std::fs::File,
63 channel: Sender<Vec<u8>>,
64 buffer: Vec<u8>,
65 }
66
67 impl std::io::Write for MultiWrite {
68 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
69 let written = self.file.write(buf)?;
70 self.buffer.extend_from_slice(&buf[..written]);
71 Ok(written)
72 }
73
74 fn flush(&mut self) -> std::io::Result<()> {
75 self.channel
76 .send_blocking(self.buffer.clone())
77 .map_err(|error| std::io::Error::new(std::io::ErrorKind::Other, error))?;
78 self.buffer.clear();
79 self.file.flush()
80 }
81 }
82
83 let log_file = std::fs::OpenOptions::new()
84 .create(true)
85 .append(true)
86 .open(&log_file_path)
87 .context("Failed to open log file in append mode")?;
88
89 let (tx, rx) = smol::channel::unbounded();
90
91 let target = Box::new(MultiWrite {
92 file: log_file,
93 channel: tx,
94 buffer: Vec::new(),
95 });
96
97 env_logger::Builder::from_default_env()
98 .target(env_logger::Target::Pipe(target))
99 .format(|buf, record| {
100 let mut log_record = LogRecord::new(record);
101 log_record.message = format!("(remote server) {}", log_record.message);
102 serde_json::to_writer(&mut *buf, &log_record)?;
103 buf.write_all(b"\n")?;
104 Ok(())
105 })
106 .init();
107
108 Ok(rx)
109}
110
111fn init_panic_hook() {
112 std::panic::set_hook(Box::new(|info| {
113 let payload = info
114 .payload()
115 .downcast_ref::<&str>()
116 .map(|s| s.to_string())
117 .or_else(|| info.payload().downcast_ref::<String>().cloned())
118 .unwrap_or_else(|| "Box<Any>".to_string());
119
120 let backtrace = backtrace::Backtrace::new();
121 let mut backtrace = backtrace
122 .frames()
123 .iter()
124 .flat_map(|frame| {
125 frame
126 .symbols()
127 .iter()
128 .filter_map(|frame| Some(format!("{:#}", frame.name()?)))
129 })
130 .collect::<Vec<_>>();
131
132 // Strip out leading stack frames for rust panic-handling.
133 if let Some(ix) = backtrace
134 .iter()
135 .position(|name| name == "rust_begin_unwind")
136 {
137 backtrace.drain(0..=ix);
138 }
139
140 let thread = thread::current();
141 let thread_name = thread.name().unwrap_or("<unnamed>");
142
143 log::error!(
144 "panic occurred: {}\nBacktrace:\n{}",
145 &payload,
146 (&backtrace).join("\n")
147 );
148
149 let release_channel = *RELEASE_CHANNEL;
150 let version = match release_channel {
151 ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION"),
152 ReleaseChannel::Nightly | ReleaseChannel::Dev => {
153 option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha")
154 }
155 };
156
157 let panic_data = telemetry_events::Panic {
158 thread: thread_name.into(),
159 payload: payload.clone(),
160 location_data: info.location().map(|location| LocationData {
161 file: location.file().into(),
162 line: location.line(),
163 }),
164 app_version: format!("remote-server-{version}"),
165 app_commit_sha: option_env!("ZED_COMMIT_SHA").map(|sha| sha.into()),
166 release_channel: release_channel.display_name().into(),
167 target: env!("TARGET").to_owned().into(),
168 os_name: telemetry::os_name(),
169 os_version: Some(telemetry::os_version()),
170 architecture: env::consts::ARCH.into(),
171 panicked_on: Utc::now().timestamp_millis(),
172 backtrace,
173 system_id: None, // Set on SSH client
174 installation_id: None, // Set on SSH client
175 session_id: "".to_string(), // Set on SSH client
176 };
177
178 if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
179 let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
180 let panic_file_path = paths::logs_dir().join(format!("zed-{timestamp}.panic"));
181 let panic_file = std::fs::OpenOptions::new()
182 .append(true)
183 .create(true)
184 .open(&panic_file_path)
185 .log_err();
186 if let Some(mut panic_file) = panic_file {
187 writeln!(&mut panic_file, "{panic_data_json}").log_err();
188 panic_file.flush().log_err();
189 }
190 }
191
192 std::process::abort();
193 }));
194}
195
196fn handle_panic_requests(project: &Entity<HeadlessProject>, client: &Arc<ChannelClient>) {
197 let client: AnyProtoClient = client.clone().into();
198 client.add_request_handler(
199 project.downgrade(),
200 |_, _: TypedEnvelope<proto::GetPanicFiles>, _cx| async move {
201 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
202 let mut panic_files = Vec::new();
203 while let Some(child) = children.next().await {
204 let child = child?;
205 let child_path = child.path();
206
207 if child_path.extension() != Some(OsStr::new("panic")) {
208 continue;
209 }
210 let filename = if let Some(filename) = child_path.file_name() {
211 filename.to_string_lossy()
212 } else {
213 continue;
214 };
215
216 if !filename.starts_with("zed") {
217 continue;
218 }
219
220 let file_contents = smol::fs::read_to_string(&child_path)
221 .await
222 .context("error reading panic file")?;
223
224 panic_files.push(file_contents);
225
226 // We've done what we can, delete the file
227 std::fs::remove_file(child_path)
228 .context("error removing panic")
229 .log_err();
230 }
231 anyhow::Ok(proto::GetPanicFilesResponse {
232 file_contents: panic_files,
233 })
234 },
235 );
236}
237
238struct ServerListeners {
239 stdin: UnixListener,
240 stdout: UnixListener,
241 stderr: UnixListener,
242}
243
244impl ServerListeners {
245 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
246 Ok(Self {
247 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
248 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
249 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
250 })
251 }
252}
253
254fn start_server(
255 listeners: ServerListeners,
256 log_rx: Receiver<Vec<u8>>,
257 cx: &mut App,
258) -> Arc<ChannelClient> {
259 // This is the server idle timeout. If no connection comes in in this timeout, the server will shut down.
260 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
261
262 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
263 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
264 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
265
266 cx.on_app_quit(move |_| {
267 let mut app_quit_tx = app_quit_tx.clone();
268 async move {
269 log::info!("app quitting. sending signal to server main loop");
270 app_quit_tx.send(()).await.ok();
271 }
272 })
273 .detach();
274
275 cx.spawn(|cx| async move {
276 let mut stdin_incoming = listeners.stdin.incoming();
277 let mut stdout_incoming = listeners.stdout.incoming();
278 let mut stderr_incoming = listeners.stderr.incoming();
279
280 loop {
281 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
282
283 log::info!("accepting new connections");
284 let result = select! {
285 streams = streams.fuse() => {
286 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
287 break;
288 };
289 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
290 }
291 _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
292 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
293 cx.update(|cx| {
294 // TODO: This is a hack, because in a headless project, shutdown isn't executed
295 // when calling quit, but it should be.
296 cx.shutdown();
297 cx.quit();
298 })?;
299 break;
300 }
301 _ = app_quit_rx.next().fuse() => {
302 break;
303 }
304 };
305
306 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
307 break;
308 };
309
310 let mut input_buffer = Vec::new();
311 let mut output_buffer = Vec::new();
312
313 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
314 cx.background_executor().spawn(async move {
315 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
316 if let Err(_) = stdin_msg_tx.send(msg).await {
317 break;
318 }
319 }
320 }).detach();
321
322 loop {
323
324 select_biased! {
325 _ = app_quit_rx.next().fuse() => {
326 return anyhow::Ok(());
327 }
328
329 stdin_message = stdin_msg_rx.next().fuse() => {
330 let Some(message) = stdin_message else {
331 log::warn!("error reading message on stdin. exiting.");
332 break;
333 };
334 if let Err(error) = incoming_tx.unbounded_send(message) {
335 log::error!("failed to send message to application: {:?}. exiting.", error);
336 return Err(anyhow!(error));
337 }
338 }
339
340 outgoing_message = outgoing_rx.next().fuse() => {
341 let Some(message) = outgoing_message else {
342 log::error!("stdout handler, no message");
343 break;
344 };
345
346 if let Err(error) =
347 write_message(&mut stdout_stream, &mut output_buffer, message).await
348 {
349 log::error!("failed to write stdout message: {:?}", error);
350 break;
351 }
352 if let Err(error) = stdout_stream.flush().await {
353 log::error!("failed to flush stdout message: {:?}", error);
354 break;
355 }
356 }
357
358 log_message = log_rx.recv().fuse() => {
359 if let Ok(log_message) = log_message {
360 if let Err(error) = stderr_stream.write_all(&log_message).await {
361 log::error!("failed to write log message to stderr: {:?}", error);
362 break;
363 }
364 if let Err(error) = stderr_stream.flush().await {
365 log::error!("failed to flush stderr stream: {:?}", error);
366 break;
367 }
368 }
369 }
370 }
371 }
372 }
373 anyhow::Ok(())
374 })
375 .detach();
376
377 ChannelClient::new(incoming_rx, outgoing_tx, cx, "server")
378}
379
380fn init_paths() -> anyhow::Result<()> {
381 for path in [
382 paths::config_dir(),
383 paths::extensions_dir(),
384 paths::languages_dir(),
385 paths::logs_dir(),
386 paths::temp_dir(),
387 paths::remote_extensions_dir(),
388 paths::remote_extensions_uploads_dir(),
389 ]
390 .iter()
391 {
392 std::fs::create_dir_all(path)
393 .map_err(|e| anyhow!("Could not create directory {:?}: {}", path, e))?;
394 }
395 Ok(())
396}
397
398pub fn execute_run(
399 log_file: PathBuf,
400 pid_file: PathBuf,
401 stdin_socket: PathBuf,
402 stdout_socket: PathBuf,
403 stderr_socket: PathBuf,
404) -> Result<()> {
405 init_paths()?;
406
407 match daemonize()? {
408 ControlFlow::Break(_) => return Ok(()),
409 ControlFlow::Continue(_) => {}
410 }
411
412 init_panic_hook();
413 let log_rx = init_logging_server(log_file)?;
414 log::info!(
415 "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
416 pid_file,
417 stdin_socket,
418 stdout_socket,
419 stderr_socket
420 );
421
422 write_pid_file(&pid_file)
423 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
424
425 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
426
427 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
428 gpui::Application::headless().run(move |cx| {
429 settings::init(cx);
430 let app_version = AppVersion::init(env!("ZED_PKG_VERSION"));
431 release_channel::init(app_version, cx);
432 gpui_tokio::init(cx);
433
434 HeadlessProject::init(cx);
435
436 log::info!("gpui app started, initializing server");
437 let session = start_server(listeners, log_rx, cx);
438
439 client::init_settings(cx);
440
441 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
442 git_hosting_providers::init(cx);
443
444 extension::init(cx);
445 let extension_host_proxy = ExtensionHostProxy::global(cx);
446
447 let project = cx.new(|cx| {
448 let fs = Arc::new(RealFs::new(Default::default(), None));
449 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
450
451 let proxy_url = read_proxy_settings(cx);
452
453 let http_client = {
454 let _guard = Tokio::handle(cx).enter();
455 Arc::new(
456 ReqwestClient::proxy_and_user_agent(
457 proxy_url,
458 &format!(
459 "Zed-Server/{} ({}; {})",
460 env!("CARGO_PKG_VERSION"),
461 std::env::consts::OS,
462 std::env::consts::ARCH
463 ),
464 )
465 .expect("Could not start HTTP client"),
466 )
467 };
468
469 let node_runtime = NodeRuntime::new(http_client.clone(), node_settings_rx);
470
471 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
472 languages.set_language_server_download_dir(paths::languages_dir().clone());
473 let languages = Arc::new(languages);
474
475 HeadlessProject::new(
476 HeadlessAppState {
477 session: session.clone(),
478 fs,
479 http_client,
480 node_runtime,
481 languages,
482 extension_host_proxy,
483 },
484 cx,
485 )
486 });
487
488 handle_panic_requests(&project, &session);
489
490 cx.background_executor()
491 .spawn(async move { cleanup_old_binaries() })
492 .detach();
493
494 mem::forget(project);
495 });
496 log::info!("gpui app is shut down. quitting.");
497 Ok(())
498}
499
500#[derive(Clone)]
501struct ServerPaths {
502 log_file: PathBuf,
503 pid_file: PathBuf,
504 stdin_socket: PathBuf,
505 stdout_socket: PathBuf,
506 stderr_socket: PathBuf,
507}
508
509impl ServerPaths {
510 fn new(identifier: &str) -> Result<Self> {
511 let server_dir = paths::remote_server_state_dir().join(identifier);
512 std::fs::create_dir_all(&server_dir)?;
513 std::fs::create_dir_all(&logs_dir())?;
514
515 let pid_file = server_dir.join("server.pid");
516 let stdin_socket = server_dir.join("stdin.sock");
517 let stdout_socket = server_dir.join("stdout.sock");
518 let stderr_socket = server_dir.join("stderr.sock");
519 let log_file = logs_dir().join(format!("server-{}.log", identifier));
520
521 Ok(Self {
522 pid_file,
523 stdin_socket,
524 stdout_socket,
525 stderr_socket,
526 log_file,
527 })
528 }
529}
530
531pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
532 init_logging_proxy();
533 init_panic_hook();
534
535 log::info!("starting proxy process. PID: {}", std::process::id());
536
537 let server_paths = ServerPaths::new(&identifier)?;
538
539 let server_pid = check_pid_file(&server_paths.pid_file)?;
540 let server_running = server_pid.is_some();
541 if is_reconnecting {
542 if !server_running {
543 log::error!("attempted to reconnect, but no server running");
544 return Err(anyhow!(ProxyLaunchError::ServerNotRunning));
545 }
546 } else {
547 if let Some(pid) = server_pid {
548 log::info!("proxy found server already running with PID {}. Killing process and cleaning up files...", pid);
549 kill_running_server(pid, &server_paths)?;
550 }
551
552 spawn_server(&server_paths)?;
553 };
554
555 let stdin_task = smol::spawn(async move {
556 let stdin = Async::new(std::io::stdin())?;
557 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
558 handle_io(stdin, stream, "stdin").await
559 });
560
561 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
562 let stdout = Async::new(std::io::stdout())?;
563 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
564 handle_io(stream, stdout, "stdout").await
565 });
566
567 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
568 let mut stderr = Async::new(std::io::stderr())?;
569 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
570 let mut stderr_buffer = vec![0; 2048];
571 loop {
572 match stream.read(&mut stderr_buffer).await {
573 Ok(0) => {
574 let error =
575 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
576 Err(anyhow!(error))?;
577 }
578 Ok(n) => {
579 stderr.write_all(&mut stderr_buffer[..n]).await?;
580 stderr.flush().await?;
581 }
582 Err(error) => {
583 Err(anyhow!("error reading stderr: {error:?}"))?;
584 }
585 }
586 }
587 });
588
589 if let Err(forwarding_result) = smol::block_on(async move {
590 futures::select! {
591 result = stdin_task.fuse() => result.context("stdin_task failed"),
592 result = stdout_task.fuse() => result.context("stdout_task failed"),
593 result = stderr_task.fuse() => result.context("stderr_task failed"),
594 }
595 }) {
596 log::error!(
597 "encountered error while forwarding messages: {:?}, terminating...",
598 forwarding_result
599 );
600 return Err(forwarding_result);
601 }
602
603 Ok(())
604}
605
606fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<()> {
607 log::info!("killing existing server with PID {}", pid);
608 std::process::Command::new("kill")
609 .arg(pid.to_string())
610 .output()
611 .context("failed to kill existing server")?;
612
613 for file in [
614 &paths.pid_file,
615 &paths.stdin_socket,
616 &paths.stdout_socket,
617 &paths.stderr_socket,
618 ] {
619 log::debug!("cleaning up file {:?} before starting new server", file);
620 std::fs::remove_file(file).ok();
621 }
622 Ok(())
623}
624
625fn spawn_server(paths: &ServerPaths) -> Result<()> {
626 if paths.stdin_socket.exists() {
627 std::fs::remove_file(&paths.stdin_socket)?;
628 }
629 if paths.stdout_socket.exists() {
630 std::fs::remove_file(&paths.stdout_socket)?;
631 }
632 if paths.stderr_socket.exists() {
633 std::fs::remove_file(&paths.stderr_socket)?;
634 }
635
636 let binary_name = std::env::current_exe()?;
637 let mut server_process = std::process::Command::new(binary_name);
638 server_process
639 .arg("run")
640 .arg("--log-file")
641 .arg(&paths.log_file)
642 .arg("--pid-file")
643 .arg(&paths.pid_file)
644 .arg("--stdin-socket")
645 .arg(&paths.stdin_socket)
646 .arg("--stdout-socket")
647 .arg(&paths.stdout_socket)
648 .arg("--stderr-socket")
649 .arg(&paths.stderr_socket);
650
651 let status = server_process
652 .status()
653 .context("failed to launch server process")?;
654 anyhow::ensure!(
655 status.success(),
656 "failed to launch and detach server process"
657 );
658
659 let mut total_time_waited = std::time::Duration::from_secs(0);
660 let wait_duration = std::time::Duration::from_millis(20);
661 while !paths.stdout_socket.exists()
662 || !paths.stdin_socket.exists()
663 || !paths.stderr_socket.exists()
664 {
665 log::debug!("waiting for server to be ready to accept connections...");
666 std::thread::sleep(wait_duration);
667 total_time_waited += wait_duration;
668 }
669
670 log::info!(
671 "server ready to accept connections. total time waited: {:?}",
672 total_time_waited
673 );
674
675 Ok(())
676}
677
678fn check_pid_file(path: &Path) -> Result<Option<u32>> {
679 let Some(pid) = std::fs::read_to_string(&path)
680 .ok()
681 .and_then(|contents| contents.parse::<u32>().ok())
682 else {
683 return Ok(None);
684 };
685
686 log::debug!("Checking if process with PID {} exists...", pid);
687 match std::process::Command::new("kill")
688 .arg("-0")
689 .arg(pid.to_string())
690 .output()
691 {
692 Ok(output) if output.status.success() => {
693 log::debug!("Process with PID {} exists. NOT spawning new server, but attaching to existing one.", pid);
694 Ok(Some(pid))
695 }
696 _ => {
697 log::debug!(
698 "Found PID file, but process with that PID does not exist. Removing PID file."
699 );
700 std::fs::remove_file(&path).context("Failed to remove PID file")?;
701 Ok(None)
702 }
703 }
704}
705
706fn write_pid_file(path: &Path) -> Result<()> {
707 if path.exists() {
708 std::fs::remove_file(path)?;
709 }
710 let pid = std::process::id().to_string();
711 log::debug!("writing PID {} to file {:?}", pid, path);
712 std::fs::write(path, pid).context("Failed to write PID file")
713}
714
715async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
716where
717 R: AsyncRead + Unpin,
718 W: AsyncWrite + Unpin,
719{
720 use remote::protocol::read_message_raw;
721
722 let mut buffer = Vec::new();
723 loop {
724 read_message_raw(&mut reader, &mut buffer)
725 .await
726 .with_context(|| format!("failed to read message from {}", socket_name))?;
727
728 write_size_prefixed_buffer(&mut writer, &mut buffer)
729 .await
730 .with_context(|| format!("failed to write message to {}", socket_name))?;
731
732 writer.flush().await?;
733
734 buffer.clear();
735 }
736}
737
738async fn write_size_prefixed_buffer<S: AsyncWrite + Unpin>(
739 stream: &mut S,
740 buffer: &mut Vec<u8>,
741) -> Result<()> {
742 let len = buffer.len() as u32;
743 stream.write_all(len.to_le_bytes().as_slice()).await?;
744 stream.write_all(buffer).await?;
745 Ok(())
746}
747
748fn initialize_settings(
749 session: Arc<ChannelClient>,
750 fs: Arc<dyn Fs>,
751 cx: &mut App,
752) -> async_watch::Receiver<Option<NodeBinaryOptions>> {
753 let user_settings_file_rx = watch_config_file(
754 &cx.background_executor(),
755 fs,
756 paths::settings_file().clone(),
757 );
758
759 handle_settings_file_changes(user_settings_file_rx, cx, {
760 let session = session.clone();
761 move |err, _cx| {
762 if let Some(e) = err {
763 log::info!("Server settings failed to change: {}", e);
764
765 session
766 .send(proto::Toast {
767 project_id: SSH_PROJECT_ID,
768 notification_id: "server-settings-failed".to_string(),
769 message: format!(
770 "Error in settings on remote host {:?}: {}",
771 paths::settings_file(),
772 e
773 ),
774 })
775 .log_err();
776 } else {
777 session
778 .send(proto::HideToast {
779 project_id: SSH_PROJECT_ID,
780 notification_id: "server-settings-failed".to_string(),
781 })
782 .log_err();
783 }
784 }
785 });
786
787 let (tx, rx) = async_watch::channel(None);
788 cx.observe_global::<SettingsStore>(move |cx| {
789 let settings = &ProjectSettings::get_global(cx).node;
790 log::info!("Got new node settings: {:?}", settings);
791 let options = NodeBinaryOptions {
792 allow_path_lookup: !settings.ignore_system_version.unwrap_or_default(),
793 // TODO: Implement this setting
794 allow_binary_download: true,
795 use_paths: settings.path.as_ref().map(|node_path| {
796 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
797 let npm_path = settings
798 .npm_path
799 .as_ref()
800 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
801 (
802 node_path.clone(),
803 npm_path.unwrap_or_else(|| {
804 let base_path = PathBuf::new();
805 node_path.parent().unwrap_or(&base_path).join("npm")
806 }),
807 )
808 }),
809 };
810 tx.send(Some(options)).log_err();
811 })
812 .detach();
813
814 rx
815}
816
817pub fn handle_settings_file_changes(
818 mut server_settings_file: mpsc::UnboundedReceiver<String>,
819 cx: &mut App,
820 settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
821) {
822 let server_settings_content = cx
823 .background_executor()
824 .block(server_settings_file.next())
825 .unwrap();
826 SettingsStore::update_global(cx, |store, cx| {
827 store
828 .set_server_settings(&server_settings_content, cx)
829 .log_err();
830 });
831 cx.spawn(move |cx| async move {
832 while let Some(server_settings_content) = server_settings_file.next().await {
833 let result = cx.update_global(|store: &mut SettingsStore, cx| {
834 let result = store.set_server_settings(&server_settings_content, cx);
835 if let Err(err) = &result {
836 log::error!("Failed to load server settings: {err}");
837 }
838 settings_changed(result.err(), cx);
839 cx.refresh_windows();
840 });
841 if result.is_err() {
842 break; // App dropped
843 }
844 }
845 })
846 .detach();
847}
848
849fn read_proxy_settings(cx: &mut Context<'_, HeadlessProject>) -> Option<Uri> {
850 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
851 let proxy_url = proxy_str
852 .as_ref()
853 .and_then(|input: &String| {
854 input
855 .parse::<Uri>()
856 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
857 .ok()
858 })
859 .or_else(read_proxy_from_env);
860 proxy_url
861}
862
863fn daemonize() -> Result<ControlFlow<()>> {
864 match fork::fork().map_err(|e| anyhow::anyhow!("failed to call fork with error code {}", e))? {
865 fork::Fork::Parent(_) => {
866 return Ok(ControlFlow::Break(()));
867 }
868 fork::Fork::Child => {}
869 }
870
871 // Once we've detached from the parent, we want to close stdout/stderr/stdin
872 // so that the outer SSH process is not attached to us in any way anymore.
873 unsafe { redirect_standard_streams() }?;
874
875 Ok(ControlFlow::Continue(()))
876}
877
878unsafe fn redirect_standard_streams() -> Result<()> {
879 let devnull_fd = libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR);
880 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
881
882 let process_stdio = |name, fd| {
883 let reopened_fd = libc::dup2(devnull_fd, fd);
884 anyhow::ensure!(
885 reopened_fd != -1,
886 format!("failed to redirect {} to /dev/null", name)
887 );
888 Ok(())
889 };
890
891 process_stdio("stdin", libc::STDIN_FILENO)?;
892 process_stdio("stdout", libc::STDOUT_FILENO)?;
893 process_stdio("stderr", libc::STDERR_FILENO)?;
894
895 anyhow::ensure!(
896 libc::close(devnull_fd) != -1,
897 "failed to close /dev/null fd after redirecting"
898 );
899
900 Ok(())
901}
902
903fn cleanup_old_binaries() -> Result<()> {
904 let server_dir = paths::remote_server_dir_relative();
905 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
906 let prefix = format!("zed-remote-server-{}-", release_channel);
907
908 for entry in std::fs::read_dir(server_dir)? {
909 let path = entry?.path();
910
911 if let Some(file_name) = path.file_name() {
912 if let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix) {
913 if !is_new_version(version) && !is_file_in_use(file_name) {
914 log::info!("removing old remote server binary: {:?}", path);
915 std::fs::remove_file(&path)?;
916 }
917 }
918 }
919 }
920
921 Ok(())
922}
923
924fn is_new_version(version: &str) -> bool {
925 SemanticVersion::from_str(version)
926 .ok()
927 .zip(SemanticVersion::from_str(env!("ZED_PKG_VERSION")).ok())
928 .is_some_and(|(version, current_version)| version >= current_version)
929}
930
931fn is_file_in_use(file_name: &OsStr) -> bool {
932 let info =
933 sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes(
934 sysinfo::ProcessRefreshKind::new().with_exe(sysinfo::UpdateKind::Always),
935 ));
936
937 for process in info.processes().values() {
938 if process
939 .exe()
940 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
941 {
942 return true;
943 }
944 }
945
946 false
947}