unix.rs

  1use crate::headless_project::HeadlessAppState;
  2use crate::HeadlessProject;
  3use anyhow::{anyhow, Context, Result};
  4use chrono::Utc;
  5use client::{telemetry, ProxySettings};
  6use fs::{Fs, RealFs};
  7use futures::channel::mpsc;
  8use futures::{select, select_biased, AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt};
  9use git::GitHostingProviderRegistry;
 10use gpui::{AppContext, Context as _, Model, ModelContext, UpdateGlobal as _};
 11use http_client::{read_proxy_from_env, Uri};
 12use language::LanguageRegistry;
 13use node_runtime::{NodeBinaryOptions, NodeRuntime};
 14use paths::logs_dir;
 15use project::project_settings::ProjectSettings;
 16
 17use remote::proxy::ProxyLaunchError;
 18use remote::ssh_session::ChannelClient;
 19use remote::{
 20    json_log::LogRecord,
 21    protocol::{read_message, write_message},
 22};
 23use reqwest_client::ReqwestClient;
 24use rpc::proto::{self, Envelope, SSH_PROJECT_ID};
 25use rpc::{AnyProtoClient, TypedEnvelope};
 26use settings::{watch_config_file, Settings, SettingsStore};
 27use smol::channel::{Receiver, Sender};
 28use smol::io::AsyncReadExt;
 29
 30use smol::Async;
 31use smol::{net::unix::UnixListener, stream::StreamExt as _};
 32use std::ffi::OsStr;
 33use std::ops::ControlFlow;
 34use std::{env, thread};
 35use std::{
 36    io::Write,
 37    mem,
 38    path::{Path, PathBuf},
 39    sync::Arc,
 40};
 41use telemetry_events::LocationData;
 42use util::ResultExt;
 43
 44fn init_logging_proxy() {
 45    env_logger::builder()
 46        .format(|buf, record| {
 47            let mut log_record = LogRecord::new(record);
 48            log_record.message = format!("(remote proxy) {}", log_record.message);
 49            serde_json::to_writer(&mut *buf, &log_record)?;
 50            buf.write_all(b"\n")?;
 51            Ok(())
 52        })
 53        .init();
 54}
 55
 56fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
 57    struct MultiWrite {
 58        file: Box<dyn std::io::Write + Send + 'static>,
 59        channel: Sender<Vec<u8>>,
 60        buffer: Vec<u8>,
 61    }
 62
 63    impl std::io::Write for MultiWrite {
 64        fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
 65            let written = self.file.write(buf)?;
 66            self.buffer.extend_from_slice(&buf[..written]);
 67            Ok(written)
 68        }
 69
 70        fn flush(&mut self) -> std::io::Result<()> {
 71            self.channel
 72                .send_blocking(self.buffer.clone())
 73                .map_err(|error| std::io::Error::new(std::io::ErrorKind::Other, error))?;
 74            self.buffer.clear();
 75            self.file.flush()
 76        }
 77    }
 78
 79    let log_file = Box::new(if log_file_path.exists() {
 80        std::fs::OpenOptions::new()
 81            .append(true)
 82            .open(&log_file_path)
 83            .context("Failed to open log file in append mode")?
 84    } else {
 85        std::fs::File::create(&log_file_path).context("Failed to create log file")?
 86    });
 87
 88    let (tx, rx) = smol::channel::unbounded();
 89
 90    let target = Box::new(MultiWrite {
 91        file: log_file,
 92        channel: tx,
 93        buffer: Vec::new(),
 94    });
 95
 96    env_logger::Builder::from_default_env()
 97        .target(env_logger::Target::Pipe(target))
 98        .format(|buf, record| {
 99            let mut log_record = LogRecord::new(record);
100            log_record.message = format!("(remote server) {}", log_record.message);
101            serde_json::to_writer(&mut *buf, &log_record)?;
102            buf.write_all(b"\n")?;
103            Ok(())
104        })
105        .init();
106
107    Ok(rx)
108}
109
110fn init_panic_hook() {
111    std::panic::set_hook(Box::new(|info| {
112        let payload = info
113            .payload()
114            .downcast_ref::<&str>()
115            .map(|s| s.to_string())
116            .or_else(|| info.payload().downcast_ref::<String>().cloned())
117            .unwrap_or_else(|| "Box<Any>".to_string());
118
119        let backtrace = backtrace::Backtrace::new();
120        let mut backtrace = backtrace
121            .frames()
122            .iter()
123            .flat_map(|frame| {
124                frame
125                    .symbols()
126                    .iter()
127                    .filter_map(|frame| Some(format!("{:#}", frame.name()?)))
128            })
129            .collect::<Vec<_>>();
130
131        // Strip out leading stack frames for rust panic-handling.
132        if let Some(ix) = backtrace
133            .iter()
134            .position(|name| name == "rust_begin_unwind")
135        {
136            backtrace.drain(0..=ix);
137        }
138
139        let thread = thread::current();
140        let thread_name = thread.name().unwrap_or("<unnamed>");
141
142        log::error!(
143            "panic occurred: {}\nBacktrace:\n{}",
144            &payload,
145            (&backtrace).join("\n")
146        );
147
148        let panic_data = telemetry_events::Panic {
149            thread: thread_name.into(),
150            payload: payload.clone(),
151            location_data: info.location().map(|location| LocationData {
152                file: location.file().into(),
153                line: location.line(),
154            }),
155            app_version: format!(
156                "remote-server-{}",
157                option_env!("ZED_COMMIT_SHA").unwrap_or(&env!("ZED_PKG_VERSION"))
158            ),
159            release_channel: release_channel::RELEASE_CHANNEL.display_name().into(),
160            os_name: telemetry::os_name(),
161            os_version: Some(telemetry::os_version()),
162            architecture: env::consts::ARCH.into(),
163            panicked_on: Utc::now().timestamp_millis(),
164            backtrace,
165            system_id: None,            // Set on SSH client
166            installation_id: None,      // Set on SSH client
167            session_id: "".to_string(), // Set on SSH client
168        };
169
170        if let Some(panic_data_json) = serde_json::to_string(&panic_data).log_err() {
171            let timestamp = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string();
172            let panic_file_path = paths::logs_dir().join(format!("zed-{timestamp}.panic"));
173            let panic_file = std::fs::OpenOptions::new()
174                .append(true)
175                .create(true)
176                .open(&panic_file_path)
177                .log_err();
178            if let Some(mut panic_file) = panic_file {
179                writeln!(&mut panic_file, "{panic_data_json}").log_err();
180                panic_file.flush().log_err();
181            }
182        }
183
184        std::process::abort();
185    }));
186}
187
188fn handle_panic_requests(project: &Model<HeadlessProject>, client: &Arc<ChannelClient>) {
189    let client: AnyProtoClient = client.clone().into();
190    client.add_request_handler(
191        project.downgrade(),
192        |_, _: TypedEnvelope<proto::GetPanicFiles>, _cx| async move {
193            let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
194            let mut panic_files = Vec::new();
195            while let Some(child) = children.next().await {
196                let child = child?;
197                let child_path = child.path();
198
199                if child_path.extension() != Some(OsStr::new("panic")) {
200                    continue;
201                }
202                let filename = if let Some(filename) = child_path.file_name() {
203                    filename.to_string_lossy()
204                } else {
205                    continue;
206                };
207
208                if !filename.starts_with("zed") {
209                    continue;
210                }
211
212                let file_contents = smol::fs::read_to_string(&child_path)
213                    .await
214                    .context("error reading panic file")?;
215
216                panic_files.push(file_contents);
217
218                // We've done what we can, delete the file
219                std::fs::remove_file(child_path)
220                    .context("error removing panic")
221                    .log_err();
222            }
223            anyhow::Ok(proto::GetPanicFilesResponse {
224                file_contents: panic_files,
225            })
226        },
227    );
228}
229
230struct ServerListeners {
231    stdin: UnixListener,
232    stdout: UnixListener,
233    stderr: UnixListener,
234}
235
236impl ServerListeners {
237    pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
238        Ok(Self {
239            stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
240            stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
241            stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
242        })
243    }
244}
245
246fn start_server(
247    listeners: ServerListeners,
248    mut log_rx: Receiver<Vec<u8>>,
249    cx: &mut AppContext,
250) -> Arc<ChannelClient> {
251    // This is the server idle timeout. If no connection comes in in this timeout, the server will shut down.
252    const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
253
254    let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
255    let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
256    let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
257
258    cx.on_app_quit(move |_| {
259        let mut app_quit_tx = app_quit_tx.clone();
260        async move {
261            log::info!("app quitting. sending signal to server main loop");
262            app_quit_tx.send(()).await.ok();
263        }
264    })
265    .detach();
266
267    cx.spawn(|cx| async move {
268        let mut stdin_incoming = listeners.stdin.incoming();
269        let mut stdout_incoming = listeners.stdout.incoming();
270        let mut stderr_incoming = listeners.stderr.incoming();
271
272        loop {
273            let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
274
275            log::info!("accepting new connections");
276            let result = select! {
277                streams = streams.fuse() => {
278                    let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
279                        break;
280                    };
281                    anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
282                }
283                _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
284                    log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
285                    cx.update(|cx| {
286                        // TODO: This is a hack, because in a headless project, shutdown isn't executed
287                        // when calling quit, but it should be.
288                        cx.shutdown();
289                        cx.quit();
290                    })?;
291                    break;
292                }
293                _ = app_quit_rx.next().fuse() => {
294                    break;
295                }
296            };
297
298            let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
299                break;
300            };
301
302            let mut input_buffer = Vec::new();
303            let mut output_buffer = Vec::new();
304
305            let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
306            cx.background_executor().spawn(async move {
307                while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
308                    if let Err(_) = stdin_msg_tx.send(msg).await {
309                        break;
310                    }
311                }
312            }).detach();
313
314            loop {
315
316                select_biased! {
317                    _ = app_quit_rx.next().fuse() => {
318                        return anyhow::Ok(());
319                    }
320
321                    stdin_message = stdin_msg_rx.next().fuse() => {
322                        let Some(message) = stdin_message else {
323                            log::warn!("error reading message on stdin. exiting.");
324                            break;
325                        };
326                        if let Err(error) = incoming_tx.unbounded_send(message) {
327                            log::error!("failed to send message to application: {:?}. exiting.", error);
328                            return Err(anyhow!(error));
329                        }
330                    }
331
332                    outgoing_message  = outgoing_rx.next().fuse() => {
333                        let Some(message) = outgoing_message else {
334                            log::error!("stdout handler, no message");
335                            break;
336                        };
337
338                        if let Err(error) =
339                            write_message(&mut stdout_stream, &mut output_buffer, message).await
340                        {
341                            log::error!("failed to write stdout message: {:?}", error);
342                            break;
343                        }
344                        if let Err(error) = stdout_stream.flush().await {
345                            log::error!("failed to flush stdout message: {:?}", error);
346                            break;
347                        }
348                    }
349
350                    log_message = log_rx.next().fuse() => {
351                        if let Some(log_message) = log_message {
352                            if let Err(error) = stderr_stream.write_all(&log_message).await {
353                                log::error!("failed to write log message to stderr: {:?}", error);
354                                break;
355                            }
356                            if let Err(error) = stderr_stream.flush().await {
357                                log::error!("failed to flush stderr stream: {:?}", error);
358                                break;
359                            }
360                        }
361                    }
362                }
363            }
364        }
365        anyhow::Ok(())
366    })
367    .detach();
368
369    ChannelClient::new(incoming_rx, outgoing_tx, cx, "server")
370}
371
372fn init_paths() -> anyhow::Result<()> {
373    for path in [
374        paths::config_dir(),
375        paths::extensions_dir(),
376        paths::languages_dir(),
377        paths::logs_dir(),
378        paths::temp_dir(),
379    ]
380    .iter()
381    {
382        std::fs::create_dir_all(path)
383            .map_err(|e| anyhow!("Could not create directory {:?}: {}", path, e))?;
384    }
385    Ok(())
386}
387
388pub fn execute_run(
389    log_file: PathBuf,
390    pid_file: PathBuf,
391    stdin_socket: PathBuf,
392    stdout_socket: PathBuf,
393    stderr_socket: PathBuf,
394) -> Result<()> {
395    init_paths()?;
396
397    match daemonize()? {
398        ControlFlow::Break(_) => return Ok(()),
399        ControlFlow::Continue(_) => {}
400    }
401
402    init_panic_hook();
403    let log_rx = init_logging_server(log_file)?;
404    log::info!(
405        "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
406        pid_file,
407        stdin_socket,
408        stdout_socket,
409        stderr_socket
410    );
411
412    write_pid_file(&pid_file)
413        .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
414
415    let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
416
417    let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
418    gpui::App::headless().run(move |cx| {
419        settings::init(cx);
420        HeadlessProject::init(cx);
421
422        log::info!("gpui app started, initializing server");
423        let session = start_server(listeners, log_rx, cx);
424
425        client::init_settings(cx);
426
427        GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
428        git_hosting_providers::init(cx);
429
430        let project = cx.new_model(|cx| {
431            let fs = Arc::new(RealFs::new(Default::default(), None));
432            let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
433
434            let proxy_url = read_proxy_settings(cx);
435
436            let http_client = Arc::new(
437                ReqwestClient::proxy_and_user_agent(
438                    proxy_url,
439                    &format!(
440                        "Zed-Server/{} ({}; {})",
441                        env!("CARGO_PKG_VERSION"),
442                        std::env::consts::OS,
443                        std::env::consts::ARCH
444                    ),
445                )
446                .expect("Could not start HTTP client"),
447            );
448
449            let node_runtime = NodeRuntime::new(http_client.clone(), node_settings_rx);
450
451            let mut languages = LanguageRegistry::new(cx.background_executor().clone());
452            languages.set_language_server_download_dir(paths::languages_dir().clone());
453            let languages = Arc::new(languages);
454
455            HeadlessProject::new(
456                HeadlessAppState {
457                    session: session.clone(),
458                    fs,
459                    http_client,
460                    node_runtime,
461                    languages,
462                },
463                cx,
464            )
465        });
466
467        handle_panic_requests(&project, &session);
468
469        mem::forget(project);
470    });
471    log::info!("gpui app is shut down. quitting.");
472    Ok(())
473}
474
475#[derive(Clone)]
476struct ServerPaths {
477    log_file: PathBuf,
478    pid_file: PathBuf,
479    stdin_socket: PathBuf,
480    stdout_socket: PathBuf,
481    stderr_socket: PathBuf,
482}
483
484impl ServerPaths {
485    fn new(identifier: &str) -> Result<Self> {
486        let server_dir = paths::remote_server_state_dir().join(identifier);
487        std::fs::create_dir_all(&server_dir)?;
488        std::fs::create_dir_all(&logs_dir())?;
489
490        let pid_file = server_dir.join("server.pid");
491        let stdin_socket = server_dir.join("stdin.sock");
492        let stdout_socket = server_dir.join("stdout.sock");
493        let stderr_socket = server_dir.join("stderr.sock");
494        let log_file = logs_dir().join(format!("server-{}.log", identifier));
495
496        Ok(Self {
497            pid_file,
498            stdin_socket,
499            stdout_socket,
500            stderr_socket,
501            log_file,
502        })
503    }
504}
505
506pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
507    init_logging_proxy();
508    init_panic_hook();
509
510    log::info!("starting proxy process. PID: {}", std::process::id());
511
512    let server_paths = ServerPaths::new(&identifier)?;
513
514    let server_pid = check_pid_file(&server_paths.pid_file)?;
515    let server_running = server_pid.is_some();
516    if is_reconnecting {
517        if !server_running {
518            log::error!("attempted to reconnect, but no server running");
519            return Err(anyhow!(ProxyLaunchError::ServerNotRunning));
520        }
521    } else {
522        if let Some(pid) = server_pid {
523            log::info!("proxy found server already running with PID {}. Killing process and cleaning up files...", pid);
524            kill_running_server(pid, &server_paths)?;
525        }
526
527        spawn_server(&server_paths)?;
528    };
529
530    let stdin_task = smol::spawn(async move {
531        let stdin = Async::new(std::io::stdin())?;
532        let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
533        handle_io(stdin, stream, "stdin").await
534    });
535
536    let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
537        let stdout = Async::new(std::io::stdout())?;
538        let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
539        handle_io(stream, stdout, "stdout").await
540    });
541
542    let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
543        let mut stderr = Async::new(std::io::stderr())?;
544        let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
545        let mut stderr_buffer = vec![0; 2048];
546        loop {
547            match stream.read(&mut stderr_buffer).await {
548                Ok(0) => {
549                    let error =
550                        std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
551                    Err(anyhow!(error))?;
552                }
553                Ok(n) => {
554                    stderr.write_all(&mut stderr_buffer[..n]).await?;
555                    stderr.flush().await?;
556                }
557                Err(error) => {
558                    Err(anyhow!("error reading stderr: {error:?}"))?;
559                }
560            }
561        }
562    });
563
564    if let Err(forwarding_result) = smol::block_on(async move {
565        futures::select! {
566            result = stdin_task.fuse() => result.context("stdin_task failed"),
567            result = stdout_task.fuse() => result.context("stdout_task failed"),
568            result = stderr_task.fuse() => result.context("stderr_task failed"),
569        }
570    }) {
571        log::error!(
572            "encountered error while forwarding messages: {:?}, terminating...",
573            forwarding_result
574        );
575        return Err(forwarding_result);
576    }
577
578    Ok(())
579}
580
581fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<()> {
582    log::info!("killing existing server with PID {}", pid);
583    std::process::Command::new("kill")
584        .arg(pid.to_string())
585        .output()
586        .context("failed to kill existing server")?;
587
588    for file in [
589        &paths.pid_file,
590        &paths.stdin_socket,
591        &paths.stdout_socket,
592        &paths.stderr_socket,
593    ] {
594        log::debug!("cleaning up file {:?} before starting new server", file);
595        std::fs::remove_file(file).ok();
596    }
597    Ok(())
598}
599
600fn spawn_server(paths: &ServerPaths) -> Result<()> {
601    if paths.stdin_socket.exists() {
602        std::fs::remove_file(&paths.stdin_socket)?;
603    }
604    if paths.stdout_socket.exists() {
605        std::fs::remove_file(&paths.stdout_socket)?;
606    }
607    if paths.stderr_socket.exists() {
608        std::fs::remove_file(&paths.stderr_socket)?;
609    }
610
611    let binary_name = std::env::current_exe()?;
612    let mut server_process = std::process::Command::new(binary_name);
613    server_process
614        .arg("run")
615        .arg("--log-file")
616        .arg(&paths.log_file)
617        .arg("--pid-file")
618        .arg(&paths.pid_file)
619        .arg("--stdin-socket")
620        .arg(&paths.stdin_socket)
621        .arg("--stdout-socket")
622        .arg(&paths.stdout_socket)
623        .arg("--stderr-socket")
624        .arg(&paths.stderr_socket);
625
626    let status = server_process
627        .status()
628        .context("failed to launch server process")?;
629    anyhow::ensure!(
630        status.success(),
631        "failed to launch and detach server process"
632    );
633
634    let mut total_time_waited = std::time::Duration::from_secs(0);
635    let wait_duration = std::time::Duration::from_millis(20);
636    while !paths.stdout_socket.exists()
637        || !paths.stdin_socket.exists()
638        || !paths.stderr_socket.exists()
639    {
640        log::debug!("waiting for server to be ready to accept connections...");
641        std::thread::sleep(wait_duration);
642        total_time_waited += wait_duration;
643    }
644
645    log::info!(
646        "server ready to accept connections. total time waited: {:?}",
647        total_time_waited
648    );
649
650    Ok(())
651}
652
653fn check_pid_file(path: &Path) -> Result<Option<u32>> {
654    let Some(pid) = std::fs::read_to_string(&path)
655        .ok()
656        .and_then(|contents| contents.parse::<u32>().ok())
657    else {
658        return Ok(None);
659    };
660
661    log::debug!("Checking if process with PID {} exists...", pid);
662    match std::process::Command::new("kill")
663        .arg("-0")
664        .arg(pid.to_string())
665        .output()
666    {
667        Ok(output) if output.status.success() => {
668            log::debug!("Process with PID {} exists. NOT spawning new server, but attaching to existing one.", pid);
669            Ok(Some(pid))
670        }
671        _ => {
672            log::debug!(
673                "Found PID file, but process with that PID does not exist. Removing PID file."
674            );
675            std::fs::remove_file(&path).context("Failed to remove PID file")?;
676            Ok(None)
677        }
678    }
679}
680
681fn write_pid_file(path: &Path) -> Result<()> {
682    if path.exists() {
683        std::fs::remove_file(path)?;
684    }
685    let pid = std::process::id().to_string();
686    log::debug!("writing PID {} to file {:?}", pid, path);
687    std::fs::write(path, pid).context("Failed to write PID file")
688}
689
690async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
691where
692    R: AsyncRead + Unpin,
693    W: AsyncWrite + Unpin,
694{
695    use remote::protocol::read_message_raw;
696
697    let mut buffer = Vec::new();
698    loop {
699        read_message_raw(&mut reader, &mut buffer)
700            .await
701            .with_context(|| format!("failed to read message from {}", socket_name))?;
702
703        write_size_prefixed_buffer(&mut writer, &mut buffer)
704            .await
705            .with_context(|| format!("failed to write message to {}", socket_name))?;
706
707        writer.flush().await?;
708
709        buffer.clear();
710    }
711}
712
713async fn write_size_prefixed_buffer<S: AsyncWrite + Unpin>(
714    stream: &mut S,
715    buffer: &mut Vec<u8>,
716) -> Result<()> {
717    let len = buffer.len() as u32;
718    stream.write_all(len.to_le_bytes().as_slice()).await?;
719    stream.write_all(buffer).await?;
720    Ok(())
721}
722
723fn initialize_settings(
724    session: Arc<ChannelClient>,
725    fs: Arc<dyn Fs>,
726    cx: &mut AppContext,
727) -> async_watch::Receiver<Option<NodeBinaryOptions>> {
728    let user_settings_file_rx = watch_config_file(
729        &cx.background_executor(),
730        fs,
731        paths::settings_file().clone(),
732    );
733
734    handle_settings_file_changes(user_settings_file_rx, cx, {
735        let session = session.clone();
736        move |err, _cx| {
737            if let Some(e) = err {
738                log::info!("Server settings failed to change: {}", e);
739
740                session
741                    .send(proto::Toast {
742                        project_id: SSH_PROJECT_ID,
743                        notification_id: "server-settings-failed".to_string(),
744                        message: format!(
745                            "Error in settings on remote host {:?}: {}",
746                            paths::settings_file(),
747                            e
748                        ),
749                    })
750                    .log_err();
751            } else {
752                session
753                    .send(proto::HideToast {
754                        project_id: SSH_PROJECT_ID,
755                        notification_id: "server-settings-failed".to_string(),
756                    })
757                    .log_err();
758            }
759        }
760    });
761
762    let (tx, rx) = async_watch::channel(None);
763    cx.observe_global::<SettingsStore>(move |cx| {
764        let settings = &ProjectSettings::get_global(cx).node;
765        log::info!("Got new node settings: {:?}", settings);
766        let options = NodeBinaryOptions {
767            allow_path_lookup: !settings.ignore_system_version.unwrap_or_default(),
768            // TODO: Implement this setting
769            allow_binary_download: true,
770            use_paths: settings.path.as_ref().map(|node_path| {
771                let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
772                let npm_path = settings
773                    .npm_path
774                    .as_ref()
775                    .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
776                (
777                    node_path.clone(),
778                    npm_path.unwrap_or_else(|| {
779                        let base_path = PathBuf::new();
780                        node_path.parent().unwrap_or(&base_path).join("npm")
781                    }),
782                )
783            }),
784        };
785        tx.send(Some(options)).log_err();
786    })
787    .detach();
788
789    rx
790}
791
792pub fn handle_settings_file_changes(
793    mut server_settings_file: mpsc::UnboundedReceiver<String>,
794    cx: &mut AppContext,
795    settings_changed: impl Fn(Option<anyhow::Error>, &mut AppContext) + 'static,
796) {
797    let server_settings_content = cx
798        .background_executor()
799        .block(server_settings_file.next())
800        .unwrap();
801    SettingsStore::update_global(cx, |store, cx| {
802        store
803            .set_server_settings(&server_settings_content, cx)
804            .log_err();
805    });
806    cx.spawn(move |cx| async move {
807        while let Some(server_settings_content) = server_settings_file.next().await {
808            let result = cx.update_global(|store: &mut SettingsStore, cx| {
809                let result = store.set_server_settings(&server_settings_content, cx);
810                if let Err(err) = &result {
811                    log::error!("Failed to load server settings: {err}");
812                }
813                settings_changed(result.err(), cx);
814                cx.refresh();
815            });
816            if result.is_err() {
817                break; // App dropped
818            }
819        }
820    })
821    .detach();
822}
823
824fn read_proxy_settings(cx: &mut ModelContext<'_, HeadlessProject>) -> Option<Uri> {
825    let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
826    let proxy_url = proxy_str
827        .as_ref()
828        .and_then(|input: &String| {
829            input
830                .parse::<Uri>()
831                .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
832                .ok()
833        })
834        .or_else(read_proxy_from_env);
835    proxy_url
836}
837
838fn daemonize() -> Result<ControlFlow<()>> {
839    match fork::fork().map_err(|e| anyhow::anyhow!("failed to call fork with error code {}", e))? {
840        fork::Fork::Parent(_) => {
841            return Ok(ControlFlow::Break(()));
842        }
843        fork::Fork::Child => {}
844    }
845
846    // Once we've detached from the parent, we want to close stdout/stderr/stdin
847    // so that the outer SSH process is not attached to us in any way anymore.
848    unsafe { redirect_standard_streams() }?;
849
850    Ok(ControlFlow::Continue(()))
851}
852
853unsafe fn redirect_standard_streams() -> Result<()> {
854    let devnull_fd = libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR);
855    anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
856
857    let process_stdio = |name, fd| {
858        let reopened_fd = libc::dup2(devnull_fd, fd);
859        anyhow::ensure!(
860            reopened_fd != -1,
861            format!("failed to redirect {} to /dev/null", name)
862        );
863        Ok(())
864    };
865
866    process_stdio("stdin", libc::STDIN_FILENO)?;
867    process_stdio("stdout", libc::STDOUT_FILENO)?;
868    process_stdio("stderr", libc::STDERR_FILENO)?;
869
870    anyhow::ensure!(
871        libc::close(devnull_fd) != -1,
872        "failed to close /dev/null fd after redirecting"
873    );
874
875    Ok(())
876}