1use crate::HeadlessProject;
2use crate::headless_project::HeadlessAppState;
3use anyhow::{Context as _, Result, anyhow};
4use client::ProxySettings;
5use util::ResultExt;
6
7use extension::ExtensionHostProxy;
8use fs::{Fs, RealFs};
9use futures::channel::{mpsc, oneshot};
10use futures::{AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, select, select_biased};
11use git::GitHostingProviderRegistry;
12use gpui::{App, AppContext as _, Context, Entity, UpdateGlobal as _};
13use gpui_tokio::Tokio;
14use http_client::{Url, read_proxy_from_env};
15use language::LanguageRegistry;
16use node_runtime::{NodeBinaryOptions, NodeRuntime};
17use paths::logs_dir;
18use project::project_settings::ProjectSettings;
19use util::command::new_smol_command;
20
21use proto::CrashReport;
22use release_channel::{AppCommitSha, AppVersion, RELEASE_CHANNEL, ReleaseChannel};
23use remote::RemoteClient;
24use remote::{
25 json_log::LogRecord,
26 protocol::{read_message, write_message},
27 proxy::ProxyLaunchError,
28};
29use reqwest_client::ReqwestClient;
30use rpc::proto::{self, Envelope, REMOTE_SERVER_PROJECT_ID};
31use rpc::{AnyProtoClient, TypedEnvelope};
32use settings::{Settings, SettingsStore, watch_config_file};
33use smol::Async;
34use smol::channel::{Receiver, Sender};
35use smol::io::AsyncReadExt;
36use smol::{net::unix::UnixListener, stream::StreamExt as _};
37use std::{
38 env,
39 ffi::OsStr,
40 fs::File,
41 io::Write,
42 mem,
43 ops::ControlFlow,
44 path::{Path, PathBuf},
45 process::ExitStatus,
46 str::FromStr,
47 sync::{Arc, LazyLock},
48};
49use thiserror::Error;
50
51pub static VERSION: LazyLock<String> = LazyLock::new(|| match *RELEASE_CHANNEL {
52 ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION").to_owned(),
53 ReleaseChannel::Nightly | ReleaseChannel::Dev => {
54 let commit_sha = option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha");
55 let build_identifier = option_env!("ZED_BUILD_ID");
56 if let Some(build_id) = build_identifier {
57 format!("{build_id}+{commit_sha}")
58 } else {
59 commit_sha.to_owned()
60 }
61 }
62});
63
64fn init_logging_proxy() {
65 env_logger::builder()
66 .format(|buf, record| {
67 let mut log_record = LogRecord::new(record);
68 log_record.message = format!("(remote proxy) {}", log_record.message);
69 serde_json::to_writer(&mut *buf, &log_record)?;
70 buf.write_all(b"\n")?;
71 Ok(())
72 })
73 .init();
74}
75
76fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
77 struct MultiWrite {
78 file: File,
79 channel: Sender<Vec<u8>>,
80 buffer: Vec<u8>,
81 }
82
83 impl Write for MultiWrite {
84 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
85 let written = self.file.write(buf)?;
86 self.buffer.extend_from_slice(&buf[..written]);
87 Ok(written)
88 }
89
90 fn flush(&mut self) -> std::io::Result<()> {
91 self.channel
92 .send_blocking(self.buffer.clone())
93 .map_err(std::io::Error::other)?;
94 self.buffer.clear();
95 self.file.flush()
96 }
97 }
98
99 let log_file = std::fs::OpenOptions::new()
100 .create(true)
101 .append(true)
102 .open(&log_file_path)
103 .context("Failed to open log file in append mode")?;
104
105 let (tx, rx) = smol::channel::unbounded();
106
107 let target = Box::new(MultiWrite {
108 file: log_file,
109 channel: tx,
110 buffer: Vec::new(),
111 });
112
113 env_logger::Builder::new()
114 .filter_level(log::LevelFilter::Info)
115 .parse_default_env()
116 .target(env_logger::Target::Pipe(target))
117 .format(|buf, record| {
118 let mut log_record = LogRecord::new(record);
119 log_record.message = format!("(remote server) {}", log_record.message);
120 serde_json::to_writer(&mut *buf, &log_record)?;
121 buf.write_all(b"\n")?;
122 Ok(())
123 })
124 .init();
125
126 Ok(rx)
127}
128
129fn handle_crash_files_requests(project: &Entity<HeadlessProject>, client: &AnyProtoClient) {
130 client.add_request_handler(
131 project.downgrade(),
132 |_, _: TypedEnvelope<proto::GetCrashFiles>, _cx| async move {
133 let mut legacy_panics = Vec::new();
134 let mut crashes = Vec::new();
135 let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
136 while let Some(child) = children.next().await {
137 let child = child?;
138 let child_path = child.path();
139
140 let extension = child_path.extension();
141 if extension == Some(OsStr::new("panic")) {
142 let filename = if let Some(filename) = child_path.file_name() {
143 filename.to_string_lossy()
144 } else {
145 continue;
146 };
147
148 if !filename.starts_with("zed") {
149 continue;
150 }
151
152 let file_contents = smol::fs::read_to_string(&child_path)
153 .await
154 .context("error reading panic file")?;
155
156 legacy_panics.push(file_contents);
157 smol::fs::remove_file(&child_path)
158 .await
159 .context("error removing panic")
160 .log_err();
161 } else if extension == Some(OsStr::new("dmp")) {
162 let mut json_path = child_path.clone();
163 json_path.set_extension("json");
164 if let Ok(json_content) = smol::fs::read_to_string(&json_path).await {
165 crashes.push(CrashReport {
166 metadata: json_content,
167 minidump_contents: smol::fs::read(&child_path).await?,
168 });
169 smol::fs::remove_file(&child_path).await.log_err();
170 smol::fs::remove_file(&json_path).await.log_err();
171 } else {
172 log::error!("Couldn't find json metadata for crash: {child_path:?}");
173 }
174 }
175 }
176
177 anyhow::Ok(proto::GetCrashFilesResponse { crashes })
178 },
179 );
180}
181
182struct ServerListeners {
183 stdin: UnixListener,
184 stdout: UnixListener,
185 stderr: UnixListener,
186}
187
188impl ServerListeners {
189 pub fn new(stdin_path: PathBuf, stdout_path: PathBuf, stderr_path: PathBuf) -> Result<Self> {
190 Ok(Self {
191 stdin: UnixListener::bind(stdin_path).context("failed to bind stdin socket")?,
192 stdout: UnixListener::bind(stdout_path).context("failed to bind stdout socket")?,
193 stderr: UnixListener::bind(stderr_path).context("failed to bind stderr socket")?,
194 })
195 }
196}
197
198fn start_server(
199 listeners: ServerListeners,
200 log_rx: Receiver<Vec<u8>>,
201 cx: &mut App,
202) -> AnyProtoClient {
203 // This is the server idle timeout. If no connection comes in this timeout, the server will shut down.
204 const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
205
206 let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
207 let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded::<Envelope>();
208 let (app_quit_tx, mut app_quit_rx) = mpsc::unbounded::<()>();
209
210 cx.on_app_quit(move |_| {
211 let mut app_quit_tx = app_quit_tx.clone();
212 async move {
213 log::info!("app quitting. sending signal to server main loop");
214 app_quit_tx.send(()).await.ok();
215 }
216 })
217 .detach();
218
219 cx.spawn(async move |cx| {
220 let mut stdin_incoming = listeners.stdin.incoming();
221 let mut stdout_incoming = listeners.stdout.incoming();
222 let mut stderr_incoming = listeners.stderr.incoming();
223
224 loop {
225 let streams = futures::future::join3(stdin_incoming.next(), stdout_incoming.next(), stderr_incoming.next());
226
227 log::info!("accepting new connections");
228 let result = select! {
229 streams = streams.fuse() => {
230 let (Some(Ok(stdin_stream)), Some(Ok(stdout_stream)), Some(Ok(stderr_stream))) = streams else {
231 break;
232 };
233 anyhow::Ok((stdin_stream, stdout_stream, stderr_stream))
234 }
235 _ = futures::FutureExt::fuse(smol::Timer::after(IDLE_TIMEOUT)) => {
236 log::warn!("timed out waiting for new connections after {:?}. exiting.", IDLE_TIMEOUT);
237 cx.update(|cx| {
238 // TODO: This is a hack, because in a headless project, shutdown isn't executed
239 // when calling quit, but it should be.
240 cx.shutdown();
241 cx.quit();
242 })?;
243 break;
244 }
245 _ = app_quit_rx.next().fuse() => {
246 break;
247 }
248 };
249
250 let Ok((mut stdin_stream, mut stdout_stream, mut stderr_stream)) = result else {
251 break;
252 };
253
254 let mut input_buffer = Vec::new();
255 let mut output_buffer = Vec::new();
256
257 let (mut stdin_msg_tx, mut stdin_msg_rx) = mpsc::unbounded::<Envelope>();
258 cx.background_spawn(async move {
259 while let Ok(msg) = read_message(&mut stdin_stream, &mut input_buffer).await {
260 if (stdin_msg_tx.send(msg).await).is_err() {
261 break;
262 }
263 }
264 }).detach();
265
266 loop {
267
268 select_biased! {
269 _ = app_quit_rx.next().fuse() => {
270 return anyhow::Ok(());
271 }
272
273 stdin_message = stdin_msg_rx.next().fuse() => {
274 let Some(message) = stdin_message else {
275 log::warn!("error reading message on stdin. exiting.");
276 break;
277 };
278 if let Err(error) = incoming_tx.unbounded_send(message) {
279 log::error!("failed to send message to application: {error:?}. exiting.");
280 return Err(anyhow!(error));
281 }
282 }
283
284 outgoing_message = outgoing_rx.next().fuse() => {
285 let Some(message) = outgoing_message else {
286 log::error!("stdout handler, no message");
287 break;
288 };
289
290 if let Err(error) =
291 write_message(&mut stdout_stream, &mut output_buffer, message).await
292 {
293 log::error!("failed to write stdout message: {:?}", error);
294 break;
295 }
296 if let Err(error) = stdout_stream.flush().await {
297 log::error!("failed to flush stdout message: {:?}", error);
298 break;
299 }
300 }
301
302 log_message = log_rx.recv().fuse() => {
303 if let Ok(log_message) = log_message {
304 if let Err(error) = stderr_stream.write_all(&log_message).await {
305 log::error!("failed to write log message to stderr: {:?}", error);
306 break;
307 }
308 if let Err(error) = stderr_stream.flush().await {
309 log::error!("failed to flush stderr stream: {:?}", error);
310 break;
311 }
312 }
313 }
314 }
315 }
316 }
317 anyhow::Ok(())
318 })
319 .detach();
320
321 RemoteClient::proto_client_from_channels(incoming_rx, outgoing_tx, cx, "server")
322}
323
324fn init_paths() -> anyhow::Result<()> {
325 for path in [
326 paths::config_dir(),
327 paths::extensions_dir(),
328 paths::languages_dir(),
329 paths::logs_dir(),
330 paths::temp_dir(),
331 paths::hang_traces_dir(),
332 paths::remote_extensions_dir(),
333 paths::remote_extensions_uploads_dir(),
334 ]
335 .iter()
336 {
337 std::fs::create_dir_all(path).with_context(|| format!("creating directory {path:?}"))?;
338 }
339 Ok(())
340}
341
342pub fn execute_run(
343 log_file: PathBuf,
344 pid_file: PathBuf,
345 stdin_socket: PathBuf,
346 stdout_socket: PathBuf,
347 stderr_socket: PathBuf,
348) -> Result<()> {
349 init_paths()?;
350
351 match daemonize()? {
352 ControlFlow::Break(_) => return Ok(()),
353 ControlFlow::Continue(_) => {}
354 }
355
356 let app = gpui::Application::headless();
357 let id = std::process::id().to_string();
358 app.background_executor()
359 .spawn(crashes::init(crashes::InitCrashHandler {
360 session_id: id,
361 zed_version: VERSION.to_owned(),
362 binary: "zed-remote-server".to_string(),
363 release_channel: release_channel::RELEASE_CHANNEL_NAME.clone(),
364 commit_sha: option_env!("ZED_COMMIT_SHA").unwrap_or("no_sha").to_owned(),
365 }))
366 .detach();
367 let log_rx = init_logging_server(log_file)?;
368 log::info!(
369 "starting up. pid_file: {:?}, stdin_socket: {:?}, stdout_socket: {:?}, stderr_socket: {:?}",
370 pid_file,
371 stdin_socket,
372 stdout_socket,
373 stderr_socket
374 );
375
376 write_pid_file(&pid_file)
377 .with_context(|| format!("failed to write pid file: {:?}", &pid_file))?;
378
379 let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?;
380
381 rayon::ThreadPoolBuilder::new()
382 .num_threads(std::thread::available_parallelism().map_or(1, |n| n.get().div_ceil(2)))
383 .stack_size(10 * 1024 * 1024)
384 .thread_name(|ix| format!("RayonWorker{}", ix))
385 .build_global()
386 .unwrap();
387
388 let (shell_env_loaded_tx, shell_env_loaded_rx) = oneshot::channel();
389 app.background_executor()
390 .spawn(async {
391 util::load_login_shell_environment().await.log_err();
392 shell_env_loaded_tx.send(()).ok();
393 })
394 .detach();
395
396 let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new());
397 app.run(move |cx| {
398 settings::init(cx);
399 let app_commit_sha = option_env!("ZED_COMMIT_SHA").map(|s| AppCommitSha::new(s.to_owned()));
400 let app_version = AppVersion::load(
401 env!("ZED_PKG_VERSION"),
402 option_env!("ZED_BUILD_ID"),
403 app_commit_sha,
404 );
405 release_channel::init(app_version, cx);
406 gpui_tokio::init(cx);
407
408 HeadlessProject::init(cx);
409
410 log::info!("gpui app started, initializing server");
411 let session = start_server(listeners, log_rx, cx);
412
413 GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
414 git_hosting_providers::init(cx);
415 dap_adapters::init(cx);
416
417 extension::init(cx);
418 let extension_host_proxy = ExtensionHostProxy::global(cx);
419
420 json_schema_store::init(cx);
421
422 let project = cx.new(|cx| {
423 let fs = Arc::new(RealFs::new(None, cx.background_executor().clone()));
424 let node_settings_rx = initialize_settings(session.clone(), fs.clone(), cx);
425
426 let proxy_url = read_proxy_settings(cx);
427
428 let http_client = {
429 let _guard = Tokio::handle(cx).enter();
430 Arc::new(
431 ReqwestClient::proxy_and_user_agent(
432 proxy_url,
433 &format!(
434 "Zed-Server/{} ({}; {})",
435 env!("CARGO_PKG_VERSION"),
436 std::env::consts::OS,
437 std::env::consts::ARCH
438 ),
439 )
440 .expect("Could not start HTTP client"),
441 )
442 };
443
444 let node_runtime = NodeRuntime::new(
445 http_client.clone(),
446 Some(shell_env_loaded_rx),
447 node_settings_rx,
448 );
449
450 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
451 languages.set_language_server_download_dir(paths::languages_dir().clone());
452 let languages = Arc::new(languages);
453
454 HeadlessProject::new(
455 HeadlessAppState {
456 session: session.clone(),
457 fs,
458 http_client,
459 node_runtime,
460 languages,
461 extension_host_proxy,
462 },
463 cx,
464 )
465 });
466
467 handle_crash_files_requests(&project, &session);
468
469 cx.background_spawn(async move { cleanup_old_binaries() })
470 .detach();
471
472 mem::forget(project);
473 });
474 log::info!("gpui app is shut down. quitting.");
475 Ok(())
476}
477
478#[derive(Debug, Error)]
479pub(crate) enum ServerPathError {
480 #[error("Failed to create server_dir `{path}`")]
481 CreateServerDir {
482 #[source]
483 source: std::io::Error,
484 path: PathBuf,
485 },
486 #[error("Failed to create logs_dir `{path}`")]
487 CreateLogsDir {
488 #[source]
489 source: std::io::Error,
490 path: PathBuf,
491 },
492}
493
494#[derive(Clone, Debug)]
495struct ServerPaths {
496 log_file: PathBuf,
497 pid_file: PathBuf,
498 stdin_socket: PathBuf,
499 stdout_socket: PathBuf,
500 stderr_socket: PathBuf,
501}
502
503impl ServerPaths {
504 fn new(identifier: &str) -> Result<Self, ServerPathError> {
505 let server_dir = paths::remote_server_state_dir().join(identifier);
506 std::fs::create_dir_all(&server_dir).map_err(|source| {
507 ServerPathError::CreateServerDir {
508 source,
509 path: server_dir.clone(),
510 }
511 })?;
512 let log_dir = logs_dir();
513 std::fs::create_dir_all(log_dir).map_err(|source| ServerPathError::CreateLogsDir {
514 source: source,
515 path: log_dir.clone(),
516 })?;
517
518 let pid_file = server_dir.join("server.pid");
519 let stdin_socket = server_dir.join("stdin.sock");
520 let stdout_socket = server_dir.join("stdout.sock");
521 let stderr_socket = server_dir.join("stderr.sock");
522 let log_file = logs_dir().join(format!("server-{}.log", identifier));
523
524 Ok(Self {
525 pid_file,
526 stdin_socket,
527 stdout_socket,
528 stderr_socket,
529 log_file,
530 })
531 }
532}
533
534#[derive(Debug, Error)]
535pub(crate) enum ExecuteProxyError {
536 #[error("Failed to init server paths")]
537 ServerPath(#[from] ServerPathError),
538
539 #[error(transparent)]
540 ServerNotRunning(#[from] ProxyLaunchError),
541
542 #[error("Failed to check PidFile '{path}'")]
543 CheckPidFile {
544 #[source]
545 source: CheckPidError,
546 path: PathBuf,
547 },
548
549 #[error("Failed to kill existing server with pid '{pid}'")]
550 KillRunningServer {
551 #[source]
552 source: std::io::Error,
553 pid: u32,
554 },
555
556 #[error("failed to spawn server")]
557 SpawnServer(#[source] SpawnServerError),
558
559 #[error("stdin_task failed")]
560 StdinTask(#[source] anyhow::Error),
561 #[error("stdout_task failed")]
562 StdoutTask(#[source] anyhow::Error),
563 #[error("stderr_task failed")]
564 StderrTask(#[source] anyhow::Error),
565}
566
567pub(crate) fn execute_proxy(
568 identifier: String,
569 is_reconnecting: bool,
570) -> Result<(), ExecuteProxyError> {
571 init_logging_proxy();
572
573 let server_paths = ServerPaths::new(&identifier)?;
574
575 let id = std::process::id().to_string();
576 smol::spawn(crashes::init(crashes::InitCrashHandler {
577 session_id: id,
578 zed_version: VERSION.to_owned(),
579 binary: "zed-remote-server".to_string(),
580 release_channel: release_channel::RELEASE_CHANNEL_NAME.clone(),
581 commit_sha: option_env!("ZED_COMMIT_SHA").unwrap_or("no_sha").to_owned(),
582 }))
583 .detach();
584
585 log::info!("starting proxy process. PID: {}", std::process::id());
586 smol::block_on(async {
587 let server_pid = check_pid_file(&server_paths.pid_file)
588 .await
589 .map_err(|source| ExecuteProxyError::CheckPidFile {
590 source,
591 path: server_paths.pid_file.clone(),
592 })?;
593 let server_running = server_pid.is_some();
594 if is_reconnecting {
595 if !server_running {
596 log::error!("attempted to reconnect, but no server running");
597 return Err(ExecuteProxyError::ServerNotRunning(
598 ProxyLaunchError::ServerNotRunning,
599 ));
600 }
601 } else {
602 if let Some(pid) = server_pid {
603 log::info!(
604 "proxy found server already running with PID {}. Killing process and cleaning up files...",
605 pid
606 );
607 kill_running_server(pid, &server_paths).await?;
608 }
609
610 spawn_server(&server_paths)
611 .await
612 .map_err(ExecuteProxyError::SpawnServer)?;
613 };
614 Ok(())
615 })?;
616
617 let stdin_task = smol::spawn(async move {
618 let stdin = Async::new(std::io::stdin())?;
619 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdin_socket).await?;
620 handle_io(stdin, stream, "stdin").await
621 });
622
623 let stdout_task: smol::Task<Result<()>> = smol::spawn(async move {
624 let stdout = Async::new(std::io::stdout())?;
625 let stream = smol::net::unix::UnixStream::connect(&server_paths.stdout_socket).await?;
626 handle_io(stream, stdout, "stdout").await
627 });
628
629 let stderr_task: smol::Task<Result<()>> = smol::spawn(async move {
630 let mut stderr = Async::new(std::io::stderr())?;
631 let mut stream = smol::net::unix::UnixStream::connect(&server_paths.stderr_socket).await?;
632 let mut stderr_buffer = vec![0; 2048];
633 loop {
634 match stream
635 .read(&mut stderr_buffer)
636 .await
637 .context("reading stderr")?
638 {
639 0 => {
640 let error =
641 std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "stderr closed");
642 Err(anyhow!(error))?;
643 }
644 n => {
645 stderr.write_all(&stderr_buffer[..n]).await?;
646 stderr.flush().await?;
647 }
648 }
649 }
650 });
651
652 if let Err(forwarding_result) = smol::block_on(async move {
653 futures::select! {
654 result = stdin_task.fuse() => result.map_err(ExecuteProxyError::StdinTask),
655 result = stdout_task.fuse() => result.map_err(ExecuteProxyError::StdoutTask),
656 result = stderr_task.fuse() => result.map_err(ExecuteProxyError::StderrTask),
657 }
658 }) {
659 log::error!(
660 "encountered error while forwarding messages: {:?}, terminating...",
661 forwarding_result
662 );
663 return Err(forwarding_result);
664 }
665
666 Ok(())
667}
668
669async fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<(), ExecuteProxyError> {
670 log::info!("killing existing server with PID {}", pid);
671 new_smol_command("kill")
672 .arg(pid.to_string())
673 .output()
674 .await
675 .map_err(|source| ExecuteProxyError::KillRunningServer { source, pid })?;
676
677 for file in [
678 &paths.pid_file,
679 &paths.stdin_socket,
680 &paths.stdout_socket,
681 &paths.stderr_socket,
682 ] {
683 log::debug!("cleaning up file {:?} before starting new server", file);
684 std::fs::remove_file(file).ok();
685 }
686 Ok(())
687}
688
689#[derive(Debug, Error)]
690pub(crate) enum SpawnServerError {
691 #[error("failed to remove stdin socket")]
692 RemoveStdinSocket(#[source] std::io::Error),
693
694 #[error("failed to remove stdout socket")]
695 RemoveStdoutSocket(#[source] std::io::Error),
696
697 #[error("failed to remove stderr socket")]
698 RemoveStderrSocket(#[source] std::io::Error),
699
700 #[error("failed to get current_exe")]
701 CurrentExe(#[source] std::io::Error),
702
703 #[error("failed to launch server process")]
704 ProcessStatus(#[source] std::io::Error),
705
706 #[error("failed to launch and detach server process: {status}\n{paths}")]
707 LaunchStatus { status: ExitStatus, paths: String },
708}
709
710async fn spawn_server(paths: &ServerPaths) -> Result<(), SpawnServerError> {
711 if paths.stdin_socket.exists() {
712 std::fs::remove_file(&paths.stdin_socket).map_err(SpawnServerError::RemoveStdinSocket)?;
713 }
714 if paths.stdout_socket.exists() {
715 std::fs::remove_file(&paths.stdout_socket).map_err(SpawnServerError::RemoveStdoutSocket)?;
716 }
717 if paths.stderr_socket.exists() {
718 std::fs::remove_file(&paths.stderr_socket).map_err(SpawnServerError::RemoveStderrSocket)?;
719 }
720
721 let binary_name = std::env::current_exe().map_err(SpawnServerError::CurrentExe)?;
722 let mut server_process = new_smol_command(binary_name);
723 server_process
724 .arg("run")
725 .arg("--log-file")
726 .arg(&paths.log_file)
727 .arg("--pid-file")
728 .arg(&paths.pid_file)
729 .arg("--stdin-socket")
730 .arg(&paths.stdin_socket)
731 .arg("--stdout-socket")
732 .arg(&paths.stdout_socket)
733 .arg("--stderr-socket")
734 .arg(&paths.stderr_socket);
735
736 let status = server_process
737 .status()
738 .await
739 .map_err(SpawnServerError::ProcessStatus)?;
740
741 if !status.success() {
742 return Err(SpawnServerError::LaunchStatus {
743 status,
744 paths: format!(
745 "log file: {:?}, pid file: {:?}",
746 paths.log_file, paths.pid_file,
747 ),
748 });
749 }
750
751 let mut total_time_waited = std::time::Duration::from_secs(0);
752 let wait_duration = std::time::Duration::from_millis(20);
753 while !paths.stdout_socket.exists()
754 || !paths.stdin_socket.exists()
755 || !paths.stderr_socket.exists()
756 {
757 log::debug!("waiting for server to be ready to accept connections...");
758 std::thread::sleep(wait_duration);
759 total_time_waited += wait_duration;
760 }
761
762 log::info!(
763 "server ready to accept connections. total time waited: {:?}",
764 total_time_waited
765 );
766
767 Ok(())
768}
769
770#[derive(Debug, Error)]
771#[error("Failed to remove PID file for missing process (pid `{pid}`")]
772pub(crate) struct CheckPidError {
773 #[source]
774 source: std::io::Error,
775 pid: u32,
776}
777
778async fn check_pid_file(path: &Path) -> Result<Option<u32>, CheckPidError> {
779 let Some(pid) = std::fs::read_to_string(&path)
780 .ok()
781 .and_then(|contents| contents.parse::<u32>().ok())
782 else {
783 return Ok(None);
784 };
785
786 log::debug!("Checking if process with PID {} exists...", pid);
787 match new_smol_command("kill")
788 .arg("-0")
789 .arg(pid.to_string())
790 .output()
791 .await
792 {
793 Ok(output) if output.status.success() => {
794 log::debug!(
795 "Process with PID {} exists. NOT spawning new server, but attaching to existing one.",
796 pid
797 );
798 Ok(Some(pid))
799 }
800 _ => {
801 log::debug!(
802 "Found PID file, but process with that PID does not exist. Removing PID file."
803 );
804 std::fs::remove_file(&path).map_err(|source| CheckPidError { source, pid })?;
805 Ok(None)
806 }
807 }
808}
809
810fn write_pid_file(path: &Path) -> Result<()> {
811 if path.exists() {
812 std::fs::remove_file(path)?;
813 }
814 let pid = std::process::id().to_string();
815 log::debug!("writing PID {} to file {:?}", pid, path);
816 std::fs::write(path, pid).context("Failed to write PID file")
817}
818
819async fn handle_io<R, W>(mut reader: R, mut writer: W, socket_name: &str) -> Result<()>
820where
821 R: AsyncRead + Unpin,
822 W: AsyncWrite + Unpin,
823{
824 use remote::protocol::{read_message_raw, write_size_prefixed_buffer};
825
826 let mut buffer = Vec::new();
827 loop {
828 read_message_raw(&mut reader, &mut buffer)
829 .await
830 .with_context(|| format!("failed to read message from {}", socket_name))?;
831 write_size_prefixed_buffer(&mut writer, &mut buffer)
832 .await
833 .with_context(|| format!("failed to write message to {}", socket_name))?;
834 writer.flush().await?;
835 buffer.clear();
836 }
837}
838
839fn initialize_settings(
840 session: AnyProtoClient,
841 fs: Arc<dyn Fs>,
842 cx: &mut App,
843) -> watch::Receiver<Option<NodeBinaryOptions>> {
844 let user_settings_file_rx =
845 watch_config_file(cx.background_executor(), fs, paths::settings_file().clone());
846
847 handle_settings_file_changes(user_settings_file_rx, cx, {
848 move |err, _cx| {
849 if let Some(e) = err {
850 log::info!("Server settings failed to change: {}", e);
851
852 session
853 .send(proto::Toast {
854 project_id: REMOTE_SERVER_PROJECT_ID,
855 notification_id: "server-settings-failed".to_string(),
856 message: format!(
857 "Error in settings on remote host {:?}: {}",
858 paths::settings_file(),
859 e
860 ),
861 })
862 .log_err();
863 } else {
864 session
865 .send(proto::HideToast {
866 project_id: REMOTE_SERVER_PROJECT_ID,
867 notification_id: "server-settings-failed".to_string(),
868 })
869 .log_err();
870 }
871 }
872 });
873
874 let (mut tx, rx) = watch::channel(None);
875 let mut node_settings = None;
876 cx.observe_global::<SettingsStore>(move |cx| {
877 let new_node_settings = &ProjectSettings::get_global(cx).node;
878 if Some(new_node_settings) != node_settings.as_ref() {
879 log::info!("Got new node settings: {new_node_settings:?}");
880 let options = NodeBinaryOptions {
881 allow_path_lookup: !new_node_settings.ignore_system_version,
882 // TODO: Implement this setting
883 allow_binary_download: true,
884 use_paths: new_node_settings.path.as_ref().map(|node_path| {
885 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
886 let npm_path = new_node_settings
887 .npm_path
888 .as_ref()
889 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
890 (
891 node_path.clone(),
892 npm_path.unwrap_or_else(|| {
893 let base_path = PathBuf::new();
894 node_path.parent().unwrap_or(&base_path).join("npm")
895 }),
896 )
897 }),
898 };
899 node_settings = Some(new_node_settings.clone());
900 tx.send(Some(options)).ok();
901 }
902 })
903 .detach();
904
905 rx
906}
907
908pub fn handle_settings_file_changes(
909 mut server_settings_file: mpsc::UnboundedReceiver<String>,
910 cx: &mut App,
911 settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
912) {
913 let server_settings_content = cx
914 .background_executor()
915 .block(server_settings_file.next())
916 .unwrap();
917 SettingsStore::update_global(cx, |store, cx| {
918 store
919 .set_server_settings(&server_settings_content, cx)
920 .log_err();
921 });
922 cx.spawn(async move |cx| {
923 while let Some(server_settings_content) = server_settings_file.next().await {
924 let result = cx.update_global(|store: &mut SettingsStore, cx| {
925 let result = store.set_server_settings(&server_settings_content, cx);
926 if let Err(err) = &result {
927 log::error!("Failed to load server settings: {err}");
928 }
929 settings_changed(result.err(), cx);
930 cx.refresh_windows();
931 });
932 if result.is_err() {
933 break; // App dropped
934 }
935 }
936 })
937 .detach();
938}
939
940fn read_proxy_settings(cx: &mut Context<HeadlessProject>) -> Option<Url> {
941 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
942
943 proxy_str
944 .as_ref()
945 .and_then(|input: &String| {
946 input
947 .parse::<Url>()
948 .inspect_err(|e| log::error!("Error parsing proxy settings: {}", e))
949 .ok()
950 })
951 .or_else(read_proxy_from_env)
952}
953
954fn daemonize() -> Result<ControlFlow<()>> {
955 match fork::fork().map_err(|e| anyhow!("failed to call fork with error code {e}"))? {
956 fork::Fork::Parent(_) => {
957 return Ok(ControlFlow::Break(()));
958 }
959 fork::Fork::Child => {}
960 }
961
962 // Once we've detached from the parent, we want to close stdout/stderr/stdin
963 // so that the outer SSH process is not attached to us in any way anymore.
964 unsafe { redirect_standard_streams() }?;
965
966 Ok(ControlFlow::Continue(()))
967}
968
969unsafe fn redirect_standard_streams() -> Result<()> {
970 let devnull_fd = unsafe { libc::open(b"/dev/null\0" as *const [u8; 10] as _, libc::O_RDWR) };
971 anyhow::ensure!(devnull_fd != -1, "failed to open /dev/null");
972
973 let process_stdio = |name, fd| {
974 let reopened_fd = unsafe { libc::dup2(devnull_fd, fd) };
975 anyhow::ensure!(
976 reopened_fd != -1,
977 format!("failed to redirect {} to /dev/null", name)
978 );
979 Ok(())
980 };
981
982 process_stdio("stdin", libc::STDIN_FILENO)?;
983 process_stdio("stdout", libc::STDOUT_FILENO)?;
984 process_stdio("stderr", libc::STDERR_FILENO)?;
985
986 anyhow::ensure!(
987 unsafe { libc::close(devnull_fd) != -1 },
988 "failed to close /dev/null fd after redirecting"
989 );
990
991 Ok(())
992}
993
994fn cleanup_old_binaries() -> Result<()> {
995 let server_dir = paths::remote_server_dir_relative();
996 let release_channel = release_channel::RELEASE_CHANNEL.dev_name();
997 let prefix = format!("zed-remote-server-{}-", release_channel);
998
999 for entry in std::fs::read_dir(server_dir.as_std_path())? {
1000 let path = entry?.path();
1001
1002 if let Some(file_name) = path.file_name()
1003 && let Some(version) = file_name.to_string_lossy().strip_prefix(&prefix)
1004 && !is_new_version(version)
1005 && !is_file_in_use(file_name)
1006 {
1007 log::info!("removing old remote server binary: {:?}", path);
1008 std::fs::remove_file(&path)?;
1009 }
1010 }
1011
1012 Ok(())
1013}
1014
1015fn is_new_version(version: &str) -> bool {
1016 semver::Version::from_str(version)
1017 .ok()
1018 .zip(semver::Version::from_str(env!("ZED_PKG_VERSION")).ok())
1019 .is_some_and(|(version, current_version)| version >= current_version)
1020}
1021
1022fn is_file_in_use(file_name: &OsStr) -> bool {
1023 let info = sysinfo::System::new_with_specifics(sysinfo::RefreshKind::nothing().with_processes(
1024 sysinfo::ProcessRefreshKind::nothing().with_exe(sysinfo::UpdateKind::Always),
1025 ));
1026
1027 for process in info.processes().values() {
1028 if process
1029 .exe()
1030 .is_some_and(|exe| exe.file_name().is_some_and(|name| name == file_name))
1031 {
1032 return true;
1033 }
1034 }
1035
1036 false
1037}