1use crate::headless_project::HeadlessAppState;
2use crate::HeadlessProject;
3use anyhow::{anyhow, Context, Result};
4use chrono::Utc;
5use client::{telemetry, ProxySettings};
6use fs::{Fs, RealFs};
7use futures::channel::mpsc;
8use futures::{select, select_biased, AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt};
9use git::GitHostingProviderRegistry;
10use gpui::{AppContext, Context as _, Model, ModelContext, SemanticVersion, UpdateGlobal as _};
11use http_client::{read_proxy_from_env, Uri};
12use language::LanguageRegistry;
13use node_runtime::{NodeBinaryOptions, NodeRuntime};
14use paths::logs_dir;
15use project::project_settings::ProjectSettings;
16
17use remote::proxy::ProxyLaunchError;
18use remote::ssh_session::ChannelClient;
19use remote::{
20 json_log::LogRecord,
21 protocol::{read_message, write_message},
22};
23use reqwest_client::ReqwestClient;
24use rpc::proto::{self, Envelope, SSH_PROJECT_ID};
25use rpc::{AnyProtoClient, TypedEnvelope};
26use settings::{watch_config_file, Settings, SettingsStore};
27use smol::channel::{Receiver, Sender};
28use smol::io::AsyncReadExt;
29
30use smol::Async;
31use smol::{net::unix::UnixListener, stream::StreamExt as _};
32use std::ffi::OsStr;
33use std::ops::ControlFlow;
34use std::str::FromStr;
35use std::{env, thread};
36use std::{
37 io::Write,
38 mem,
39 path::{Path, PathBuf},
40 sync::Arc,
41};
42use telemetry_events::LocationData;
43use util::ResultExt;
44
45fn init_logging_proxy() {
46 env_logger::builder()
47 .format(|buf, record| {
48 let mut log_record = LogRecord::new(record);
49 log_record.message = format!("(remote proxy) {}", log_record.message);
50 serde_json::to_writer(&mut *buf, &log_record)?;
51 buf.write_all(b"\n")?;
52 Ok(())
53 })
54 .init();
55}
56
57fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
58 struct MultiWrite {
59 file: Box<dyn std::io::Write + Send + 'static>,
60 channel: Sender<Vec<u8>>,
61 buffer: Vec<u8>,
62 }
63
64 impl std::io::Write for MultiWrite {
65 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
66 let written = self.file.write(buf)?;
67 self.buffer.extend_from_slice(&buf[..written]);
68 Ok(written)
69 }
70
71 fn flush(&mut self) -> std::io::Result<()> {
72 self.channel
73 .send_blocking(self.buffer.clone())
74 .map_err(|error| std::io::Error::new(std::io::ErrorKind::Other, error))?;
75 self.buffer.clear();
76 self.file.flush()
77 }
78 }
79
80 let log_file = Box::new(if log_file_path.exists() {
81 std::fs::OpenOptions::new()
82 .append(true)
83 .open(&log_file_path)
84 .context("Failed to open log file in append mode")?
85 } else {
86 std::fs::File::create(&log_file_path).context("Failed to create log file")?
87 });
88
89 let (tx, rx) = smol::channel::unbounded();
90
91 let target = Box::new(MultiWrite {
92 file: log_file,
93 channel: tx,
94 buffer: Vec::new(),
95 });
96
97 env_logger::Builder::from_default_env()
98 .target(env_logger::Target::Pipe(target))
99 .format(|buf, record| {
100 let mut log_record = LogRecord::new(record);
101 log_record.message = format!("(remote server) {}", log_record.message);
102 serde_json::to_writer(&mut *buf, &log_record)?;
103 buf.write_all(b"\n")?;
104 Ok(())
105 })
106 .init();
107
108 Ok(rx)
109}
110
111fn init_panic_hook() {
112 std::panic::set_hook(Box::new(|info| {
113 let payload = info
114 .payload()
115 .downcast_ref::<&str>()
116 .map(|s| s.to_string())
117 .or_else(|| info.payload().downcast_ref::<String>().cloned())
118 .unwrap_or_else(|| "Box<Any>".to_string());
119
120 let backtrace = backtrace::Backtrace::new();
121 let mut backtrace = backtrace
122 .frames()
123 .iter()
124 .flat_map(|frame| {
125 frame
126 .symbols()
127 .iter()
128 .filter_map(|frame| Some(format!("{:#}", frame.name()?)))
129 })
130 .collect::<Vec<_>>();
131
132 // Strip out leading stack frames for rust panic-handling.
133 if let Some(ix) = backtrace
134 .iter()
135 .position(|name| name == "rust_begin_unwind")
136 {
137 backtrace.drain(0..=ix);
138 }
139
140 let thread = thread::current();
141 let thread_name = thread.name().unwrap_or("<unnamed>");
142
143 log::error!(
144 "panic occurred: {}\nBacktrace:\n{}",
145 &payload,
146 (&backtrace).join("\n")
147 );
148
149 let panic_data = telemetry_events::Panic {
150 thread: thread_name.into(),
151 payload: payload.clone(),
152 location_data: info.location().map(|location| LocationData {
153 file: location.file().into(),
154 line: location.line(),
155 }),
156 app_version: format!(
157 "remote-server-{}",
158 option_env!("ZED_COMMIT_SHA").unwrap_or(&env!("ZED_PKG_VERSION"))
159 ),
160 release_channel: release_channel::RELEASE_CHANNEL.display_name().into(),
161 os_name: telemetry::os_name(),
162 os_version: Some(telemetry::os_version()),
163 architecture: env::consts::ARCH.into(),
164 panicked_on: Utc::now().timestamp_millis(),
165 backtrace,
166 system_id: None, // Set on SSH client
167 installation_id: None, // Set on SSH client
168 session_id: "".to_string(), // Set on SSH client
169 };
170
171 if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
172 let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
173 let panic_file_path = paths::logs_dir().join(format!("zed-{timestamp}.panic"));
174 let panic_file = std::fs::OpenOptions::new()
175 .append(true)
176 .create(true)
177 .open(&panic_file_path)
178 .log_err();
179 if let Some(mut panic_file) = panic_file {
180 writeln!(&mut panic_file, "{panic_data_json}").log_err();
181 panic_file.flush().log_err();
182 }
183 }
184
185 std::process::abort();
186 }));
187}
188
189fn handle_panic_requests(project: &Model<HeadlessProject>, client: &Arc<ChannelClient>) {
190 let client: AnyProtoClient = client.clone().into();
191 client.add_request_handler(
192 project.downgrade(),
193 |_, _: TypedEnvelope<proto::GetPanicFiles>, _cx| async move {
194 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
195 let mut panic_files = Vec::new();
196 while let Some(child) = children.next().await {
197 let child = child?;
198 let child_path = child.path();
199
200 if child_path.extension() != Some(OsStr::new("panic")) {
201 continue;
202 }
203 let filename = if let Some(filename) = child_path.file_name() {
204 filename.to_string_lossy()
205 } else {
206 continue;
207 };
208
209 if !filename.starts_with("zed") {
210 continue;
211 }
212
213 let file_contents = smol::fs::read_to_string(&child_path)
214 .await
215 .context("error reading panic file")?;
216
217 panic_files.push(file_contents);
218
219 // We've done what we can, delete the file
220 std::fs::remove_file(child_path)
221 .context("error removing panic")
222 .log_err();
223 }
224 anyhow::Ok(proto::GetPanicFilesResponse {
225 file_contents: panic_files,
226 })
227 },
228 );
229}
230
231struct ServerListeners {
232 stdin: UnixListener,
233 stdout: UnixListener,
234 stderr: UnixListener,
235}
236
237impl ServerListeners {
238 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
239 Ok(Self {
240 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
241 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
242 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
243 })
244 }
245}
246
247fn start_server(
248 listeners: ServerListeners,
249 mut log_rx: Receiver<Vec<u8>>,
250 cx: &mut AppContext,
251) -> Arc<ChannelClient> {
252 // This is the server idle timeout. If no connection comes in in this timeout, the server will shut down.
253 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
254
255 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
256 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
257 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
258
259 cx.on_app_quit(move |_| {
260 let mut app_quit_tx = app_quit_tx.clone();
261 async move {
262 log::info!("app quitting. sending signal to server main loop");
263 app_quit_tx.send(()).await.ok();
264 }
265 })
266 .detach();
267
268 cx.spawn(|cx| async move {
269 let mut stdin_incoming = listeners.stdin.incoming();
270 let mut stdout_incoming = listeners.stdout.incoming();
271 let mut stderr_incoming = listeners.stderr.incoming();
272
273 loop {
274 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
275
276 log::info!("accepting new connections");
277 let result = select! {
278 streams = streams.fuse() => {
279 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
280 break;
281 };
282 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
283 }
284 _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
285 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
286 cx.update(|cx| {
287 // TODO: This is a hack, because in a headless project, shutdown isn't executed
288 // when calling quit, but it should be.
289 cx.shutdown();
290 cx.quit();
291 })?;
292 break;
293 }
294 _ = app_quit_rx.next().fuse() => {
295 break;
296 }
297 };
298
299 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
300 break;
301 };
302
303 let mut input_buffer = Vec::new();
304 let mut output_buffer = Vec::new();
305
306 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
307 cx.background_executor().spawn(async move {
308 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
309 if let Err(_) = stdin_msg_tx.send(msg).await {
310 break;
311 }
312 }
313 }).detach();
314
315 loop {
316
317 select_biased! {
318 _ = app_quit_rx.next().fuse() => {
319 return anyhow::Ok(());
320 }
321
322 stdin_message = stdin_msg_rx.next().fuse() => {
323 let Some(message) = stdin_message else {
324 log::warn!("error reading message on stdin. exiting.");
325 break;
326 };
327 if let Err(error) = incoming_tx.unbounded_send(message) {
328 log::error!("failed to send message to application: {:?}. exiting.", error);
329 return Err(anyhow!(error));
330 }
331 }
332
333 outgoing_message = outgoing_rx.next().fuse() => {
334 let Some(message) = outgoing_message else {
335 log::error!("stdout handler, no message");
336 break;
337 };
338
339 if let Err(error) =
340 write_message(&mut stdout_stream, &mut output_buffer, message).await
341 {
342 log::error!("failed to write stdout message: {:?}", error);
343 break;
344 }
345 if let Err(error) = stdout_stream.flush().await {
346 log::error!("failed to flush stdout message: {:?}", error);
347 break;
348 }
349 }
350
351 log_message = log_rx.next().fuse() => {
352 if let Some(log_message) = log_message {
353 if let Err(error) = stderr_stream.write_all(&log_message).await {
354 log::error!("failed to write log message to stderr: {:?}", error);
355 break;
356 }
357 if let Err(error) = stderr_stream.flush().await {
358 log::error!("failed to flush stderr stream: {:?}", error);
359 break;
360 }
361 }
362 }
363 }
364 }
365 }
366 anyhow::Ok(())
367 })
368 .detach();
369
370 ChannelClient::new(incoming_rx, outgoing_tx, cx, "server")
371}
372
373fn init_paths() -> anyhow::Result<()> {
374 for path in [
375 paths::config_dir(),
376 paths::extensions_dir(),
377 paths::languages_dir(),
378 paths::logs_dir(),
379 paths::temp_dir(),
380 ]
381 .iter()
382 {
383 std::fs::create_dir_all(path)
384 .map_err(|e| anyhow!("Could not create directory {:?}: {}", path, e))?;
385 }
386 Ok(())
387}
388
389pub fn execute_run(
390 log_file: PathBuf,
391 pid_file: PathBuf,
392 stdin_socket: PathBuf,
393 stdout_socket: PathBuf,
394 stderr_socket: PathBuf,
395) -> Result<()> {
396 init_paths()?;
397
398 match daemonize()? {
399 ControlFlow::Break(_) => return Ok(()),
400 ControlFlow::Continue(_) => {}
401 }
402
403 init_panic_hook();
404 let log_rx = init_logging_server(log_file)?;
405 log::info!(
406 "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
407 pid_file,
408 stdin_socket,
409 stdout_socket,
410 stderr_socket
411 );
412
413 write_pid_file(&pid_file)
414 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
415
416 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
417
418 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
419 gpui::App::headless().run(move |cx| {
420 settings::init(cx);
421 HeadlessProject::init(cx);
422
423 log::info!("gpui app started, initializing server");
424 let session = start_server(listeners, log_rx, cx);
425
426 client::init_settings(cx);
427
428 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
429 git_hosting_providers::init(cx);
430
431 let project = cx.new_model(|cx| {
432 let fs = Arc::new(RealFs::new(Default::default(), None));
433 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
434
435 let proxy_url = read_proxy_settings(cx);
436
437 let http_client = Arc::new(
438 ReqwestClient::proxy_and_user_agent(
439 proxy_url,
440 &format!(
441 "Zed-Server/{} ({}; {})",
442 env!("CARGO_PKG_VERSION"),
443 std::env::consts::OS,
444 std::env::consts::ARCH
445 ),
446 )
447 .expect("Could not start HTTP client"),
448 );
449
450 let node_runtime = NodeRuntime::new(http_client.clone(), node_settings_rx);
451
452 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
453 languages.set_language_server_download_dir(paths::languages_dir().clone());
454 let languages = Arc::new(languages);
455
456 HeadlessProject::new(
457 HeadlessAppState {
458 session: session.clone(),
459 fs,
460 http_client,
461 node_runtime,
462 languages,
463 },
464 cx,
465 )
466 });
467
468 handle_panic_requests(&project, &session);
469
470 cx.background_executor()
471 .spawn(async move { cleanup_old_binaries() })
472 .detach();
473
474 mem::forget(project);
475 });
476 log::info!("gpui app is shut down. quitting.");
477 Ok(())
478}
479
480#[derive(Clone)]
481struct ServerPaths {
482 log_file: PathBuf,
483 pid_file: PathBuf,
484 stdin_socket: PathBuf,
485 stdout_socket: PathBuf,
486 stderr_socket: PathBuf,
487}
488
489impl ServerPaths {
490 fn new(identifier: &str) -> Result<Self> {
491 let server_dir = paths::remote_server_state_dir().join(identifier);
492 std::fs::create_dir_all(&server_dir)?;
493 std::fs::create_dir_all(&logs_dir())?;
494
495 let pid_file = server_dir.join("server.pid");
496 let stdin_socket = server_dir.join("stdin.sock");
497 let stdout_socket = server_dir.join("stdout.sock");
498 let stderr_socket = server_dir.join("stderr.sock");
499 let log_file = logs_dir().join(format!("server-{}.log", identifier));
500
501 Ok(Self {
502 pid_file,
503 stdin_socket,
504 stdout_socket,
505 stderr_socket,
506 log_file,
507 })
508 }
509}
510
511pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
512 init_logging_proxy();
513 init_panic_hook();
514
515 log::info!("starting proxy process. PID: {}", std::process::id());
516
517 let server_paths = ServerPaths::new(&identifier)?;
518
519 let server_pid = check_pid_file(&server_paths.pid_file)?;
520 let server_running = server_pid.is_some();
521 if is_reconnecting {
522 if !server_running {
523 log::error!("attempted to reconnect, but no server running");
524 return Err(anyhow!(ProxyLaunchError::ServerNotRunning));
525 }
526 } else {
527 if let Some(pid) = server_pid {
528 log::info!("proxy found server already running with PID {}. Killing process and cleaning up files...", pid);
529 kill_running_server(pid, &server_paths)?;
530 }
531
532 spawn_server(&server_paths)?;
533 };
534
535 let stdin_task = smol::spawn(async move {
536 let stdin = Async::new(std::io::stdin())?;
537 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
538 handle_io(stdin, stream, "stdin").await
539 });
540
541 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
542 let stdout = Async::new(std::io::stdout())?;
543 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
544 handle_io(stream, stdout, "stdout").await
545 });
546
547 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
548 let mut stderr = Async::new(std::io::stderr())?;
549 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
550 let mut stderr_buffer = vec![0; 2048];
551 loop {
552 match stream.read(&mut stderr_buffer).await {
553 Ok(0) => {
554 let error =
555 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
556 Err(anyhow!(error))?;
557 }
558 Ok(n) => {
559 stderr.write_all(&mut stderr_buffer[..n]).await?;
560 stderr.flush().await?;
561 }
562 Err(error) => {
563 Err(anyhow!("error reading stderr: {error:?}"))?;
564 }
565 }
566 }
567 });
568
569 if let Err(forwarding_result) = smol::block_on(async move {
570 futures::select! {
571 result = stdin_task.fuse() => result.context("stdin_task failed"),
572 result = stdout_task.fuse() => result.context("stdout_task failed"),
573 result = stderr_task.fuse() => result.context("stderr_task failed"),
574 }
575 }) {
576 log::error!(
577 "encountered error while forwarding messages: {:?}, terminating...",
578 forwarding_result
579 );
580 return Err(forwarding_result);
581 }
582
583 Ok(())
584}
585
586fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<()> {
587 log::info!("killing existing server with PID {}", pid);
588 std::process::Command::new("kill")
589 .arg(pid.to_string())
590 .output()
591 .context("failed to kill existing server")?;
592
593 for file in [
594 &paths.pid_file,
595 &paths.stdin_socket,
596 &paths.stdout_socket,
597 &paths.stderr_socket,
598 ] {
599 log::debug!("cleaning up file {:?} before starting new server", file);
600 std::fs::remove_file(file).ok();
601 }
602 Ok(())
603}
604
605fn spawn_server(paths: &ServerPaths) -> Result<()> {
606 if paths.stdin_socket.exists() {
607 std::fs::remove_file(&paths.stdin_socket)?;
608 }
609 if paths.stdout_socket.exists() {
610 std::fs::remove_file(&paths.stdout_socket)?;
611 }
612 if paths.stderr_socket.exists() {
613 std::fs::remove_file(&paths.stderr_socket)?;
614 }
615
616 let binary_name = std::env::current_exe()?;
617 let mut server_process = std::process::Command::new(binary_name);
618 server_process
619 .arg("run")
620 .arg("--log-file")
621 .arg(&paths.log_file)
622 .arg("--pid-file")
623 .arg(&paths.pid_file)
624 .arg("--stdin-socket")
625 .arg(&paths.stdin_socket)
626 .arg("--stdout-socket")
627 .arg(&paths.stdout_socket)
628 .arg("--stderr-socket")
629 .arg(&paths.stderr_socket);
630
631 let status = server_process
632 .status()
633 .context("failed to launch server process")?;
634 anyhow::ensure!(
635 status.success(),
636 "failed to launch and detach server process"
637 );
638
639 let mut total_time_waited = std::time::Duration::from_secs(0);
640 let wait_duration = std::time::Duration::from_millis(20);
641 while !paths.stdout_socket.exists()
642 || !paths.stdin_socket.exists()
643 || !paths.stderr_socket.exists()
644 {
645 log::debug!("waiting for server to be ready to accept connections...");
646 std::thread::sleep(wait_duration);
647 total_time_waited += wait_duration;
648 }
649
650 log::info!(
651 "server ready to accept connections. total time waited: {:?}",
652 total_time_waited
653 );
654
655 Ok(())
656}
657
658fn check_pid_file(path: &Path) -> Result<Option<u32>> {
659 let Some(pid) = std::fs::read_to_string(&path)
660 .ok()
661 .and_then(|contents| contents.parse::<u32>().ok())
662 else {
663 return Ok(None);
664 };
665
666 log::debug!("Checking if process with PID {} exists...", pid);
667 match std::process::Command::new("kill")
668 .arg("-0")
669 .arg(pid.to_string())
670 .output()
671 {
672 Ok(output) if output.status.success() => {
673 log::debug!("Process with PID {} exists. NOT spawning new server, but attaching to existing one.", pid);
674 Ok(Some(pid))
675 }
676 _ => {
677 log::debug!(
678 "Found PID file, but process with that PID does not exist. Removing PID file."
679 );
680 std::fs::remove_file(&path).context("Failed to remove PID file")?;
681 Ok(None)
682 }
683 }
684}
685
686fn write_pid_file(path: &Path) -> Result<()> {
687 if path.exists() {
688 std::fs::remove_file(path)?;
689 }
690 let pid = std::process::id().to_string();
691 log::debug!("writing PID {} to file {:?}", pid, path);
692 std::fs::write(path, pid).context("Failed to write PID file")
693}
694
695async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
696where
697 R: AsyncRead + Unpin,
698 W: AsyncWrite + Unpin,
699{
700 use remote::protocol::read_message_raw;
701
702 let mut buffer = Vec::new();
703 loop {
704 read_message_raw(&mut reader, &mut buffer)
705 .await
706 .with_context(|| format!("failed to read message from {}", socket_name))?;
707
708 write_size_prefixed_buffer(&mut writer, &mut buffer)
709 .await
710 .with_context(|| format!("failed to write message to {}", socket_name))?;
711
712 writer.flush().await?;
713
714 buffer.clear();
715 }
716}
717
718async fn write_size_prefixed_buffer<S: AsyncWrite + Unpin>(
719 stream: &mut S,
720 buffer: &mut Vec<u8>,
721) -> Result<()> {
722 let len = buffer.len() as u32;
723 stream.write_all(len.to_le_bytes().as_slice()).await?;
724 stream.write_all(buffer).await?;
725 Ok(())
726}
727
728fn initialize_settings(
729 session: Arc<ChannelClient>,
730 fs: Arc<dyn Fs>,
731 cx: &mut AppContext,
732) -> async_watch::Receiver<Option<NodeBinaryOptions>> {
733 let user_settings_file_rx = watch_config_file(
734 &cx.background_executor(),
735 fs,
736 paths::settings_file().clone(),
737 );
738
739 handle_settings_file_changes(user_settings_file_rx, cx, {
740 let session = session.clone();
741 move |err, _cx| {
742 if let Some(e) = err {
743 log::info!("Server settings failed to change: {}", e);
744
745 session
746 .send(proto::Toast {
747 project_id: SSH_PROJECT_ID,
748 notification_id: "server-settings-failed".to_string(),
749 message: format!(
750 "Error in settings on remote host {:?}: {}",
751 paths::settings_file(),
752 e
753 ),
754 })
755 .log_err();
756 } else {
757 session
758 .send(proto::HideToast {
759 project_id: SSH_PROJECT_ID,
760 notification_id: "server-settings-failed".to_string(),
761 })
762 .log_err();
763 }
764 }
765 });
766
767 let (tx, rx) = async_watch::channel(None);
768 cx.observe_global::<SettingsStore>(move |cx| {
769 let settings = &ProjectSettings::get_global(cx).node;
770 log::info!("Got new node settings: {:?}", settings);
771 let options = NodeBinaryOptions {
772 allow_path_lookup: !settings.ignore_system_version.unwrap_or_default(),
773 // TODO: Implement this setting
774 allow_binary_download: true,
775 use_paths: settings.path.as_ref().map(|node_path| {
776 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
777 let npm_path = settings
778 .npm_path
779 .as_ref()
780 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
781 (
782 node_path.clone(),
783 npm_path.unwrap_or_else(|| {
784 let base_path = PathBuf::new();
785 node_path.parent().unwrap_or(&base_path).join("npm")
786 }),
787 )
788 }),
789 };
790 tx.send(Some(options)).log_err();
791 })
792 .detach();
793
794 rx
795}
796
797pub fn handle_settings_file_changes(
798 mut server_settings_file: mpsc::UnboundedReceiver<String>,
799 cx: &mut AppContext,
800 settings_changed: impl Fn(Option<anyhow::Error>, &mut AppContext) + 'static,
801) {
802 let server_settings_content = cx
803 .background_executor()
804 .block(server_settings_file.next())
805 .unwrap();
806 SettingsStore::update_global(cx, |store, cx| {
807 store
808 .set_server_settings(&server_settings_content, cx)
809 .log_err();
810 });
811 cx.spawn(move |cx| async move {
812 while let Some(server_settings_content) = server_settings_file.next().await {
813 let result = cx.update_global(|store: &mut SettingsStore, cx| {
814 let result = store.set_server_settings(&server_settings_content, cx);
815 if let Err(err) = &result {
816 log::error!("Failed to load server settings: {err}");
817 }
818 settings_changed(result.err(), cx);
819 cx.refresh();
820 });
821 if result.is_err() {
822 break; // App dropped
823 }
824 }
825 })
826 .detach();
827}
828
829fn read_proxy_settings(cx: &mut ModelContext<'_, HeadlessProject>) -> Option<Uri> {
830 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
831 let proxy_url = proxy_str
832 .as_ref()
833 .and_then(|input: &String| {
834 input
835 .parse::<Uri>()
836 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
837 .ok()
838 })
839 .or_else(read_proxy_from_env);
840 proxy_url
841}
842
843fn daemonize() -> Result<ControlFlow<()>> {
844 match fork::fork().map_err(|e| anyhow::anyhow!("failed to call fork with error code {}", e))? {
845 fork::Fork::Parent(_) => {
846 return Ok(ControlFlow::Break(()));
847 }
848 fork::Fork::Child => {}
849 }
850
851 // Once we've detached from the parent, we want to close stdout/stderr/stdin
852 // so that the outer SSH process is not attached to us in any way anymore.
853 unsafe { redirect_standard_streams() }?;
854
855 Ok(ControlFlow::Continue(()))
856}
857
858unsafe fn redirect_standard_streams() -> Result<()> {
859 let devnull_fd = libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR);
860 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
861
862 let process_stdio = |name, fd| {
863 let reopened_fd = libc::dup2(devnull_fd, fd);
864 anyhow::ensure!(
865 reopened_fd != -1,
866 format!("failed to redirect {} to /dev/null", name)
867 );
868 Ok(())
869 };
870
871 process_stdio("stdin", libc::STDIN_FILENO)?;
872 process_stdio("stdout", libc::STDOUT_FILENO)?;
873 process_stdio("stderr", libc::STDERR_FILENO)?;
874
875 anyhow::ensure!(
876 libc::close(devnull_fd) != -1,
877 "failed to close /dev/null fd after redirecting"
878 );
879
880 Ok(())
881}
882
883fn cleanup_old_binaries() -> Result<()> {
884 let server_dir = paths::remote_server_dir_relative();
885 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
886 let prefix = format!("zed-remote-server-{}-", release_channel);
887
888 for entry in std::fs::read_dir(server_dir)? {
889 let path = entry?.path();
890
891 if let Some(file_name) = path.file_name() {
892 if let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix) {
893 if !is_new_version(version) && !is_file_in_use(file_name) {
894 log::info!("removing old remote server binary: {:?}", path);
895 std::fs::remove_file(&path)?;
896 }
897 }
898 }
899 }
900
901 Ok(())
902}
903
904fn is_new_version(version: &str) -> bool {
905 SemanticVersion::from_str(version)
906 .ok()
907 .zip(SemanticVersion::from_str(env!("ZED_PKG_VERSION")).ok())
908 .is_some_and(|(version, current_version)| version >= current_version)
909}
910
911fn is_file_in_use(file_name: &OsStr) -> bool {
912 let info =
913 sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes(
914 sysinfo::ProcessRefreshKind::new().with_exe(sysinfo::UpdateKind::Always),
915 ));
916
917 for process in info.processes().values() {
918 if process
919 .exe()
920 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
921 {
922 return true;
923 }
924 }
925
926 false
927}