1use crate::headless_project::HeadlessAppState;
2use crate::HeadlessProject;
3use anyhow::{anyhow, Context as _, Result};
4use chrono::Utc;
5use client::{telemetry, ProxySettings};
6use dap::DapRegistry;
7use extension::ExtensionHostProxy;
8use fs::{Fs, RealFs};
9use futures::channel::mpsc;
10use futures::{select, select_biased, AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt};
11use git::GitHostingProviderRegistry;
12use gpui::{App, AppContext as _, Context, Entity, SemanticVersion, UpdateGlobal as _};
13use gpui_tokio::Tokio;
14use http_client::{read_proxy_from_env, Uri};
15use language::LanguageRegistry;
16use node_runtime::{NodeBinaryOptions, NodeRuntime};
17use paths::logs_dir;
18use project::project_settings::ProjectSettings;
19
20use release_channel::{AppVersion, ReleaseChannel, RELEASE_CHANNEL};
21use remote::proxy::ProxyLaunchError;
22use remote::ssh_session::ChannelClient;
23use remote::{
24 json_log::LogRecord,
25 protocol::{read_message, write_message},
26};
27use reqwest_client::ReqwestClient;
28use rpc::proto::{self, Envelope, SSH_PROJECT_ID};
29use rpc::{AnyProtoClient, TypedEnvelope};
30use settings::{watch_config_file, Settings, SettingsStore};
31use smol::channel::{Receiver, Sender};
32use smol::io::AsyncReadExt;
33
34use smol::Async;
35use smol::{net::unix::UnixListener, stream::StreamExt as _};
36use std::ffi::OsStr;
37use std::ops::ControlFlow;
38use std::str::FromStr;
39use std::{env, thread};
40use std::{
41 io::Write,
42 mem,
43 path::{Path, PathBuf},
44 sync::Arc,
45};
46use telemetry_events::LocationData;
47use util::ResultExt;
48
49fn init_logging_proxy() {
50 env_logger::builder()
51 .format(|buf, record| {
52 let mut log_record = LogRecord::new(record);
53 log_record.message = format!("(remote proxy) {}", log_record.message);
54 serde_json::to_writer(&mut *buf, &log_record)?;
55 buf.write_all(b"\n")?;
56 Ok(())
57 })
58 .init();
59}
60
61fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
62 struct MultiWrite {
63 file: std::fs::File,
64 channel: Sender<Vec<u8>>,
65 buffer: Vec<u8>,
66 }
67
68 impl std::io::Write for MultiWrite {
69 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
70 let written = self.file.write(buf)?;
71 self.buffer.extend_from_slice(&buf[..written]);
72 Ok(written)
73 }
74
75 fn flush(&mut self) -> std::io::Result<()> {
76 self.channel
77 .send_blocking(self.buffer.clone())
78 .map_err(|error| std::io::Error::new(std::io::ErrorKind::Other, error))?;
79 self.buffer.clear();
80 self.file.flush()
81 }
82 }
83
84 let log_file = std::fs::OpenOptions::new()
85 .create(true)
86 .append(true)
87 .open(&log_file_path)
88 .context("Failed to open log file in append mode")?;
89
90 let (tx, rx) = smol::channel::unbounded();
91
92 let target = Box::new(MultiWrite {
93 file: log_file,
94 channel: tx,
95 buffer: Vec::new(),
96 });
97
98 env_logger::Builder::from_default_env()
99 .target(env_logger::Target::Pipe(target))
100 .format(|buf, record| {
101 let mut log_record = LogRecord::new(record);
102 log_record.message = format!("(remote server) {}", log_record.message);
103 serde_json::to_writer(&mut *buf, &log_record)?;
104 buf.write_all(b"\n")?;
105 Ok(())
106 })
107 .init();
108
109 Ok(rx)
110}
111
112fn init_panic_hook() {
113 std::panic::set_hook(Box::new(|info| {
114 let payload = info
115 .payload()
116 .downcast_ref::<&str>()
117 .map(|s| s.to_string())
118 .or_else(|| info.payload().downcast_ref::<String>().cloned())
119 .unwrap_or_else(|| "Box<Any>".to_string());
120
121 let backtrace = backtrace::Backtrace::new();
122 let mut backtrace = backtrace
123 .frames()
124 .iter()
125 .flat_map(|frame| {
126 frame
127 .symbols()
128 .iter()
129 .filter_map(|frame| Some(format!("{:#}", frame.name()?)))
130 })
131 .collect::<Vec<_>>();
132
133 // Strip out leading stack frames for rust panic-handling.
134 if let Some(ix) = backtrace
135 .iter()
136 .position(|name| name == "rust_begin_unwind")
137 {
138 backtrace.drain(0..=ix);
139 }
140
141 let thread = thread::current();
142 let thread_name = thread.name().unwrap_or("<unnamed>");
143
144 log::error!(
145 "panic occurred: {}\nBacktrace:\n{}",
146 &payload,
147 (&backtrace).join("\n")
148 );
149
150 let release_channel = *RELEASE_CHANNEL;
151 let version = match release_channel {
152 ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION"),
153 ReleaseChannel::Nightly | ReleaseChannel::Dev => {
154 option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha")
155 }
156 };
157
158 let panic_data = telemetry_events::Panic {
159 thread: thread_name.into(),
160 payload: payload.clone(),
161 location_data: info.location().map(|location| LocationData {
162 file: location.file().into(),
163 line: location.line(),
164 }),
165 app_version: format!("remote-server-{version}"),
166 app_commit_sha: option_env!("ZED_COMMIT_SHA").map(|sha| sha.into()),
167 release_channel: release_channel.display_name().into(),
168 target: env!("TARGET").to_owned().into(),
169 os_name: telemetry::os_name(),
170 os_version: Some(telemetry::os_version()),
171 architecture: env::consts::ARCH.into(),
172 panicked_on: Utc::now().timestamp_millis(),
173 backtrace,
174 system_id: None, // Set on SSH client
175 installation_id: None, // Set on SSH client
176 session_id: "".to_string(), // Set on SSH client
177 };
178
179 if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
180 let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
181 let panic_file_path = paths::logs_dir().join(format!("zed-{timestamp}.panic"));
182 let panic_file = std::fs::OpenOptions::new()
183 .append(true)
184 .create(true)
185 .open(&panic_file_path)
186 .log_err();
187 if let Some(mut panic_file) = panic_file {
188 writeln!(&mut panic_file, "{panic_data_json}").log_err();
189 panic_file.flush().log_err();
190 }
191 }
192
193 std::process::abort();
194 }));
195}
196
197fn handle_panic_requests(project: &Entity<HeadlessProject>, client: &Arc<ChannelClient>) {
198 let client: AnyProtoClient = client.clone().into();
199 client.add_request_handler(
200 project.downgrade(),
201 |_, _: TypedEnvelope<proto::GetPanicFiles>, _cx| async move {
202 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
203 let mut panic_files = Vec::new();
204 while let Some(child) = children.next().await {
205 let child = child?;
206 let child_path = child.path();
207
208 if child_path.extension() != Some(OsStr::new("panic")) {
209 continue;
210 }
211 let filename = if let Some(filename) = child_path.file_name() {
212 filename.to_string_lossy()
213 } else {
214 continue;
215 };
216
217 if !filename.starts_with("zed") {
218 continue;
219 }
220
221 let file_contents = smol::fs::read_to_string(&child_path)
222 .await
223 .context("error reading panic file")?;
224
225 panic_files.push(file_contents);
226
227 // We've done what we can, delete the file
228 std::fs::remove_file(child_path)
229 .context("error removing panic")
230 .log_err();
231 }
232 anyhow::Ok(proto::GetPanicFilesResponse {
233 file_contents: panic_files,
234 })
235 },
236 );
237}
238
239struct ServerListeners {
240 stdin: UnixListener,
241 stdout: UnixListener,
242 stderr: UnixListener,
243}
244
245impl ServerListeners {
246 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
247 Ok(Self {
248 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
249 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
250 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
251 })
252 }
253}
254
255fn start_server(
256 listeners: ServerListeners,
257 log_rx: Receiver<Vec<u8>>,
258 cx: &mut App,
259) -> Arc<ChannelClient> {
260 // This is the server idle timeout. If no connection comes in in this timeout, the server will shut down.
261 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
262
263 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
264 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
265 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
266
267 cx.on_app_quit(move |_| {
268 let mut app_quit_tx = app_quit_tx.clone();
269 async move {
270 log::info!("app quitting. sending signal to server main loop");
271 app_quit_tx.send(()).await.ok();
272 }
273 })
274 .detach();
275
276 cx.spawn(async move |cx| {
277 let mut stdin_incoming = listeners.stdin.incoming();
278 let mut stdout_incoming = listeners.stdout.incoming();
279 let mut stderr_incoming = listeners.stderr.incoming();
280
281 loop {
282 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
283
284 log::info!("accepting new connections");
285 let result = select! {
286 streams = streams.fuse() => {
287 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
288 break;
289 };
290 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
291 }
292 _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
293 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
294 cx.update(|cx| {
295 // TODO: This is a hack, because in a headless project, shutdown isn't executed
296 // when calling quit, but it should be.
297 cx.shutdown();
298 cx.quit();
299 })?;
300 break;
301 }
302 _ = app_quit_rx.next().fuse() => {
303 break;
304 }
305 };
306
307 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
308 break;
309 };
310
311 let mut input_buffer = Vec::new();
312 let mut output_buffer = Vec::new();
313
314 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
315 cx.background_spawn(async move {
316 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
317 if let Err(_) = stdin_msg_tx.send(msg).await {
318 break;
319 }
320 }
321 }).detach();
322
323 loop {
324
325 select_biased! {
326 _ = app_quit_rx.next().fuse() => {
327 return anyhow::Ok(());
328 }
329
330 stdin_message = stdin_msg_rx.next().fuse() => {
331 let Some(message) = stdin_message else {
332 log::warn!("error reading message on stdin. exiting.");
333 break;
334 };
335 if let Err(error) = incoming_tx.unbounded_send(message) {
336 log::error!("failed to send message to application: {:?}. exiting.", error);
337 return Err(anyhow!(error));
338 }
339 }
340
341 outgoing_message = outgoing_rx.next().fuse() => {
342 let Some(message) = outgoing_message else {
343 log::error!("stdout handler, no message");
344 break;
345 };
346
347 if let Err(error) =
348 write_message(&mut stdout_stream, &mut output_buffer, message).await
349 {
350 log::error!("failed to write stdout message: {:?}", error);
351 break;
352 }
353 if let Err(error) = stdout_stream.flush().await {
354 log::error!("failed to flush stdout message: {:?}", error);
355 break;
356 }
357 }
358
359 log_message = log_rx.recv().fuse() => {
360 if let Ok(log_message) = log_message {
361 if let Err(error) = stderr_stream.write_all(&log_message).await {
362 log::error!("failed to write log message to stderr: {:?}", error);
363 break;
364 }
365 if let Err(error) = stderr_stream.flush().await {
366 log::error!("failed to flush stderr stream: {:?}", error);
367 break;
368 }
369 }
370 }
371 }
372 }
373 }
374 anyhow::Ok(())
375 })
376 .detach();
377
378 ChannelClient::new(incoming_rx, outgoing_tx, cx, "server")
379}
380
381fn init_paths() -> anyhow::Result<()> {
382 for path in [
383 paths::config_dir(),
384 paths::extensions_dir(),
385 paths::languages_dir(),
386 paths::logs_dir(),
387 paths::temp_dir(),
388 paths::remote_extensions_dir(),
389 paths::remote_extensions_uploads_dir(),
390 ]
391 .iter()
392 {
393 std::fs::create_dir_all(path)
394 .map_err(|e| anyhow!("Could not create directory {:?}: {}", path, e))?;
395 }
396 Ok(())
397}
398
399pub fn execute_run(
400 log_file: PathBuf,
401 pid_file: PathBuf,
402 stdin_socket: PathBuf,
403 stdout_socket: PathBuf,
404 stderr_socket: PathBuf,
405) -> Result<()> {
406 init_paths()?;
407
408 match daemonize()? {
409 ControlFlow::Break(_) => return Ok(()),
410 ControlFlow::Continue(_) => {}
411 }
412
413 init_panic_hook();
414 let log_rx = init_logging_server(log_file)?;
415 log::info!(
416 "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
417 pid_file,
418 stdin_socket,
419 stdout_socket,
420 stderr_socket
421 );
422
423 write_pid_file(&pid_file)
424 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
425
426 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
427
428 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
429 gpui::Application::headless().run(move |cx| {
430 settings::init(cx);
431 let app_version = AppVersion::init(env!("ZED_PKG_VERSION"));
432 release_channel::init(app_version, cx);
433 gpui_tokio::init(cx);
434
435 HeadlessProject::init(cx);
436
437 log::info!("gpui app started, initializing server");
438 let session = start_server(listeners, log_rx, cx);
439
440 client::init_settings(cx);
441
442 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
443 git_hosting_providers::init(cx);
444
445 extension::init(cx);
446 let extension_host_proxy = ExtensionHostProxy::global(cx);
447
448 let project = cx.new(|cx| {
449 let fs = Arc::new(RealFs::new(None, cx.background_executor().clone()));
450 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
451
452 let proxy_url = read_proxy_settings(cx);
453
454 let http_client = {
455 let _guard = Tokio::handle(cx).enter();
456 Arc::new(
457 ReqwestClient::proxy_and_user_agent(
458 proxy_url,
459 &format!(
460 "Zed-Server/{} ({}; {})",
461 env!("CARGO_PKG_VERSION"),
462 std::env::consts::OS,
463 std::env::consts::ARCH
464 ),
465 )
466 .expect("Could not start HTTP client"),
467 )
468 };
469
470 let node_runtime = NodeRuntime::new(http_client.clone(), node_settings_rx);
471
472 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
473 languages.set_language_server_download_dir(paths::languages_dir().clone());
474 let languages = Arc::new(languages);
475 let debug_adapters = DapRegistry::default().into();
476
477 HeadlessProject::new(
478 HeadlessAppState {
479 session: session.clone(),
480 fs,
481 http_client,
482 node_runtime,
483 languages,
484 debug_adapters,
485 extension_host_proxy,
486 },
487 cx,
488 )
489 });
490
491 handle_panic_requests(&project, &session);
492
493 cx.background_spawn(async move { cleanup_old_binaries() })
494 .detach();
495
496 mem::forget(project);
497 });
498 log::info!("gpui app is shut down. quitting.");
499 Ok(())
500}
501
502#[derive(Clone)]
503struct ServerPaths {
504 log_file: PathBuf,
505 pid_file: PathBuf,
506 stdin_socket: PathBuf,
507 stdout_socket: PathBuf,
508 stderr_socket: PathBuf,
509}
510
511impl ServerPaths {
512 fn new(identifier: &str) -> Result<Self> {
513 let server_dir = paths::remote_server_state_dir().join(identifier);
514 std::fs::create_dir_all(&server_dir)?;
515 std::fs::create_dir_all(&logs_dir())?;
516
517 let pid_file = server_dir.join("server.pid");
518 let stdin_socket = server_dir.join("stdin.sock");
519 let stdout_socket = server_dir.join("stdout.sock");
520 let stderr_socket = server_dir.join("stderr.sock");
521 let log_file = logs_dir().join(format!("server-{}.log", identifier));
522
523 Ok(Self {
524 pid_file,
525 stdin_socket,
526 stdout_socket,
527 stderr_socket,
528 log_file,
529 })
530 }
531}
532
533pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
534 init_logging_proxy();
535 init_panic_hook();
536
537 log::info!("starting proxy process. PID: {}", std::process::id());
538
539 let server_paths = ServerPaths::new(&identifier)?;
540
541 let server_pid = check_pid_file(&server_paths.pid_file)?;
542 let server_running = server_pid.is_some();
543 if is_reconnecting {
544 if !server_running {
545 log::error!("attempted to reconnect, but no server running");
546 return Err(anyhow!(ProxyLaunchError::ServerNotRunning));
547 }
548 } else {
549 if let Some(pid) = server_pid {
550 log::info!("proxy found server already running with PID {}. Killing process and cleaning up files...", pid);
551 kill_running_server(pid, &server_paths)?;
552 }
553
554 spawn_server(&server_paths)?;
555 };
556
557 let stdin_task = smol::spawn(async move {
558 let stdin = Async::new(std::io::stdin())?;
559 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
560 handle_io(stdin, stream, "stdin").await
561 });
562
563 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
564 let stdout = Async::new(std::io::stdout())?;
565 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
566 handle_io(stream, stdout, "stdout").await
567 });
568
569 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
570 let mut stderr = Async::new(std::io::stderr())?;
571 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
572 let mut stderr_buffer = vec![0; 2048];
573 loop {
574 match stream.read(&mut stderr_buffer).await {
575 Ok(0) => {
576 let error =
577 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
578 Err(anyhow!(error))?;
579 }
580 Ok(n) => {
581 stderr.write_all(&mut stderr_buffer[..n]).await?;
582 stderr.flush().await?;
583 }
584 Err(error) => {
585 Err(anyhow!("error reading stderr: {error:?}"))?;
586 }
587 }
588 }
589 });
590
591 if let Err(forwarding_result) = smol::block_on(async move {
592 futures::select! {
593 result = stdin_task.fuse() => result.context("stdin_task failed"),
594 result = stdout_task.fuse() => result.context("stdout_task failed"),
595 result = stderr_task.fuse() => result.context("stderr_task failed"),
596 }
597 }) {
598 log::error!(
599 "encountered error while forwarding messages: {:?}, terminating...",
600 forwarding_result
601 );
602 return Err(forwarding_result);
603 }
604
605 Ok(())
606}
607
608fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<()> {
609 log::info!("killing existing server with PID {}", pid);
610 std::process::Command::new("kill")
611 .arg(pid.to_string())
612 .output()
613 .context("failed to kill existing server")?;
614
615 for file in [
616 &paths.pid_file,
617 &paths.stdin_socket,
618 &paths.stdout_socket,
619 &paths.stderr_socket,
620 ] {
621 log::debug!("cleaning up file {:?} before starting new server", file);
622 std::fs::remove_file(file).ok();
623 }
624 Ok(())
625}
626
627fn spawn_server(paths: &ServerPaths) -> Result<()> {
628 if paths.stdin_socket.exists() {
629 std::fs::remove_file(&paths.stdin_socket)?;
630 }
631 if paths.stdout_socket.exists() {
632 std::fs::remove_file(&paths.stdout_socket)?;
633 }
634 if paths.stderr_socket.exists() {
635 std::fs::remove_file(&paths.stderr_socket)?;
636 }
637
638 let binary_name = std::env::current_exe()?;
639 let mut server_process = std::process::Command::new(binary_name);
640 server_process
641 .arg("run")
642 .arg("--log-file")
643 .arg(&paths.log_file)
644 .arg("--pid-file")
645 .arg(&paths.pid_file)
646 .arg("--stdin-socket")
647 .arg(&paths.stdin_socket)
648 .arg("--stdout-socket")
649 .arg(&paths.stdout_socket)
650 .arg("--stderr-socket")
651 .arg(&paths.stderr_socket);
652
653 let status = server_process
654 .status()
655 .context("failed to launch server process")?;
656 anyhow::ensure!(
657 status.success(),
658 "failed to launch and detach server process"
659 );
660
661 let mut total_time_waited = std::time::Duration::from_secs(0);
662 let wait_duration = std::time::Duration::from_millis(20);
663 while !paths.stdout_socket.exists()
664 || !paths.stdin_socket.exists()
665 || !paths.stderr_socket.exists()
666 {
667 log::debug!("waiting for server to be ready to accept connections...");
668 std::thread::sleep(wait_duration);
669 total_time_waited += wait_duration;
670 }
671
672 log::info!(
673 "server ready to accept connections. total time waited: {:?}",
674 total_time_waited
675 );
676
677 Ok(())
678}
679
680fn check_pid_file(path: &Path) -> Result<Option<u32>> {
681 let Some(pid) = std::fs::read_to_string(&path)
682 .ok()
683 .and_then(|contents| contents.parse::<u32>().ok())
684 else {
685 return Ok(None);
686 };
687
688 log::debug!("Checking if process with PID {} exists...", pid);
689 match std::process::Command::new("kill")
690 .arg("-0")
691 .arg(pid.to_string())
692 .output()
693 {
694 Ok(output) if output.status.success() => {
695 log::debug!("Process with PID {} exists. NOT spawning new server, but attaching to existing one.", pid);
696 Ok(Some(pid))
697 }
698 _ => {
699 log::debug!(
700 "Found PID file, but process with that PID does not exist. Removing PID file."
701 );
702 std::fs::remove_file(&path).context("Failed to remove PID file")?;
703 Ok(None)
704 }
705 }
706}
707
708fn write_pid_file(path: &Path) -> Result<()> {
709 if path.exists() {
710 std::fs::remove_file(path)?;
711 }
712 let pid = std::process::id().to_string();
713 log::debug!("writing PID {} to file {:?}", pid, path);
714 std::fs::write(path, pid).context("Failed to write PID file")
715}
716
717async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
718where
719 R: AsyncRead + Unpin,
720 W: AsyncWrite + Unpin,
721{
722 use remote::protocol::read_message_raw;
723
724 let mut buffer = Vec::new();
725 loop {
726 read_message_raw(&mut reader, &mut buffer)
727 .await
728 .with_context(|| format!("failed to read message from {}", socket_name))?;
729
730 write_size_prefixed_buffer(&mut writer, &mut buffer)
731 .await
732 .with_context(|| format!("failed to write message to {}", socket_name))?;
733
734 writer.flush().await?;
735
736 buffer.clear();
737 }
738}
739
740async fn write_size_prefixed_buffer<S: AsyncWrite + Unpin>(
741 stream: &mut S,
742 buffer: &mut Vec<u8>,
743) -> Result<()> {
744 let len = buffer.len() as u32;
745 stream.write_all(len.to_le_bytes().as_slice()).await?;
746 stream.write_all(buffer).await?;
747 Ok(())
748}
749
750fn initialize_settings(
751 session: Arc<ChannelClient>,
752 fs: Arc<dyn Fs>,
753 cx: &mut App,
754) -> async_watch::Receiver<Option<NodeBinaryOptions>> {
755 let user_settings_file_rx = watch_config_file(
756 &cx.background_executor(),
757 fs,
758 paths::settings_file().clone(),
759 );
760
761 handle_settings_file_changes(user_settings_file_rx, cx, {
762 let session = session.clone();
763 move |err, _cx| {
764 if let Some(e) = err {
765 log::info!("Server settings failed to change: {}", e);
766
767 session
768 .send(proto::Toast {
769 project_id: SSH_PROJECT_ID,
770 notification_id: "server-settings-failed".to_string(),
771 message: format!(
772 "Error in settings on remote host {:?}: {}",
773 paths::settings_file(),
774 e
775 ),
776 })
777 .log_err();
778 } else {
779 session
780 .send(proto::HideToast {
781 project_id: SSH_PROJECT_ID,
782 notification_id: "server-settings-failed".to_string(),
783 })
784 .log_err();
785 }
786 }
787 });
788
789 let (tx, rx) = async_watch::channel(None);
790 cx.observe_global::<SettingsStore>(move |cx| {
791 let settings = &ProjectSettings::get_global(cx).node;
792 log::info!("Got new node settings: {:?}", settings);
793 let options = NodeBinaryOptions {
794 allow_path_lookup: !settings.ignore_system_version.unwrap_or_default(),
795 // TODO: Implement this setting
796 allow_binary_download: true,
797 use_paths: settings.path.as_ref().map(|node_path| {
798 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
799 let npm_path = settings
800 .npm_path
801 .as_ref()
802 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
803 (
804 node_path.clone(),
805 npm_path.unwrap_or_else(|| {
806 let base_path = PathBuf::new();
807 node_path.parent().unwrap_or(&base_path).join("npm")
808 }),
809 )
810 }),
811 };
812 tx.send(Some(options)).log_err();
813 })
814 .detach();
815
816 rx
817}
818
819pub fn handle_settings_file_changes(
820 mut server_settings_file: mpsc::UnboundedReceiver<String>,
821 cx: &mut App,
822 settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
823) {
824 let server_settings_content = cx
825 .background_executor()
826 .block(server_settings_file.next())
827 .unwrap();
828 SettingsStore::update_global(cx, |store, cx| {
829 store
830 .set_server_settings(&server_settings_content, cx)
831 .log_err();
832 });
833 cx.spawn(async move |cx| {
834 while let Some(server_settings_content) = server_settings_file.next().await {
835 let result = cx.update_global(|store: &mut SettingsStore, cx| {
836 let result = store.set_server_settings(&server_settings_content, cx);
837 if let Err(err) = &result {
838 log::error!("Failed to load server settings: {err}");
839 }
840 settings_changed(result.err(), cx);
841 cx.refresh_windows();
842 });
843 if result.is_err() {
844 break; // App dropped
845 }
846 }
847 })
848 .detach();
849}
850
851fn read_proxy_settings(cx: &mut Context<HeadlessProject>) -> Option<Uri> {
852 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
853 let proxy_url = proxy_str
854 .as_ref()
855 .and_then(|input: &String| {
856 input
857 .parse::<Uri>()
858 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
859 .ok()
860 })
861 .or_else(read_proxy_from_env);
862 proxy_url
863}
864
865fn daemonize() -> Result<ControlFlow<()>> {
866 match fork::fork().map_err(|e| anyhow::anyhow!("failed to call fork with error code {}", e))? {
867 fork::Fork::Parent(_) => {
868 return Ok(ControlFlow::Break(()));
869 }
870 fork::Fork::Child => {}
871 }
872
873 // Once we've detached from the parent, we want to close stdout/stderr/stdin
874 // so that the outer SSH process is not attached to us in any way anymore.
875 unsafe { redirect_standard_streams() }?;
876
877 Ok(ControlFlow::Continue(()))
878}
879
880unsafe fn redirect_standard_streams() -> Result<()> {
881 let devnull_fd = libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR);
882 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
883
884 let process_stdio = |name, fd| {
885 let reopened_fd = libc::dup2(devnull_fd, fd);
886 anyhow::ensure!(
887 reopened_fd != -1,
888 format!("failed to redirect {} to /dev/null", name)
889 );
890 Ok(())
891 };
892
893 process_stdio("stdin", libc::STDIN_FILENO)?;
894 process_stdio("stdout", libc::STDOUT_FILENO)?;
895 process_stdio("stderr", libc::STDERR_FILENO)?;
896
897 anyhow::ensure!(
898 libc::close(devnull_fd) != -1,
899 "failed to close /dev/null fd after redirecting"
900 );
901
902 Ok(())
903}
904
905fn cleanup_old_binaries() -> Result<()> {
906 let server_dir = paths::remote_server_dir_relative();
907 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
908 let prefix = format!("zed-remote-server-{}-", release_channel);
909
910 for entry in std::fs::read_dir(server_dir)? {
911 let path = entry?.path();
912
913 if let Some(file_name) = path.file_name() {
914 if let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix) {
915 if !is_new_version(version) && !is_file_in_use(file_name) {
916 log::info!("removing old remote server binary: {:?}", path);
917 std::fs::remove_file(&path)?;
918 }
919 }
920 }
921 }
922
923 Ok(())
924}
925
926fn is_new_version(version: &str) -> bool {
927 SemanticVersion::from_str(version)
928 .ok()
929 .zip(SemanticVersion::from_str(env!("ZED_PKG_VERSION")).ok())
930 .is_some_and(|(version, current_version)| version >= current_version)
931}
932
933fn is_file_in_use(file_name: &OsStr) -> bool {
934 let info =
935 sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes(
936 sysinfo::ProcessRefreshKind::new().with_exe(sysinfo::UpdateKind::Always),
937 ));
938
939 for process in info.processes().values() {
940 if process
941 .exe()
942 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
943 {
944 return true;
945 }
946 }
947
948 false
949}