1use anyhow::{Context as _, Result, anyhow};
2use client::ProjectId;
3use collections::HashMap;
4use collections::HashSet;
5use language::File;
6use lsp::LanguageServerId;
7
8use extension::ExtensionHostProxy;
9use extension_host::headless_host::HeadlessExtensionStore;
10use fs::Fs;
11use gpui::{App, AppContext as _, AsyncApp, Context, Entity, PromptLevel};
12use http_client::HttpClient;
13use language::{Buffer, BufferEvent, LanguageRegistry, proto::serialize_operation};
14use node_runtime::NodeRuntime;
15use project::{
16 AgentRegistryStore, LspStore, LspStoreEvent, ManifestTree, PrettierStore, ProjectEnvironment,
17 ProjectPath, ToolchainStore, WorktreeId,
18 agent_server_store::AgentServerStore,
19 buffer_store::{BufferStore, BufferStoreEvent},
20 context_server_store::ContextServerStore,
21 debugger::{breakpoint_store::BreakpointStore, dap_store::DapStore},
22 git_store::GitStore,
23 image_store::ImageId,
24 lsp_store::log_store::{self, GlobalLogStore, LanguageServerKind, LogKind},
25 project_settings::SettingsObserver,
26 search::SearchQuery,
27 task_store::TaskStore,
28 trusted_worktrees::{PathTrust, RemoteHostLocation, TrustedWorktrees},
29 worktree_store::{WorktreeIdCounter, WorktreeStore},
30};
31use rpc::{
32 AnyProtoClient, TypedEnvelope,
33 proto::{self, REMOTE_SERVER_PEER_ID, REMOTE_SERVER_PROJECT_ID},
34};
35use smol::process::Child;
36
37use settings::initial_server_settings_content;
38use std::{
39 num::NonZeroU64,
40 path::{Path, PathBuf},
41 sync::{
42 Arc,
43 atomic::{AtomicU64, AtomicUsize, Ordering},
44 },
45 time::Instant,
46};
47use sysinfo::{ProcessRefreshKind, RefreshKind, System, UpdateKind};
48use util::{ResultExt, paths::PathStyle, rel_path::RelPath};
49use worktree::Worktree;
50
51pub struct HeadlessProject {
52 pub fs: Arc<dyn Fs>,
53 pub session: AnyProtoClient,
54 pub worktree_store: Entity<WorktreeStore>,
55 pub buffer_store: Entity<BufferStore>,
56 pub lsp_store: Entity<LspStore>,
57 pub task_store: Entity<TaskStore>,
58 pub dap_store: Entity<DapStore>,
59 pub breakpoint_store: Entity<BreakpointStore>,
60 pub agent_server_store: Entity<AgentServerStore>,
61 pub context_server_store: Entity<ContextServerStore>,
62 pub settings_observer: Entity<SettingsObserver>,
63 pub next_entry_id: Arc<AtomicUsize>,
64 pub languages: Arc<LanguageRegistry>,
65 pub extensions: Entity<HeadlessExtensionStore>,
66 pub git_store: Entity<GitStore>,
67 pub environment: Entity<ProjectEnvironment>,
68 pub profiling_collector: gpui::ProfilingCollector,
69 // Used mostly to keep alive the toolchain store for RPC handlers.
70 // Local variant is used within LSP store, but that's a separate entity.
71 pub _toolchain_store: Entity<ToolchainStore>,
72 pub kernels: HashMap<String, Child>,
73}
74
75pub struct HeadlessAppState {
76 pub session: AnyProtoClient,
77 pub fs: Arc<dyn Fs>,
78 pub http_client: Arc<dyn HttpClient>,
79 pub node_runtime: NodeRuntime,
80 pub languages: Arc<LanguageRegistry>,
81 pub extension_host_proxy: Arc<ExtensionHostProxy>,
82 pub startup_time: Instant,
83}
84
85impl HeadlessProject {
86 pub fn init(cx: &mut App) {
87 settings::init(cx);
88 log_store::init(true, cx);
89 }
90
91 pub fn new(
92 HeadlessAppState {
93 session,
94 fs,
95 http_client,
96 node_runtime,
97 languages,
98 extension_host_proxy: proxy,
99 startup_time,
100 }: HeadlessAppState,
101 init_worktree_trust: bool,
102 cx: &mut Context<Self>,
103 ) -> Self {
104 debug_adapter_extension::init(proxy.clone(), cx);
105 languages::init(languages.clone(), fs.clone(), node_runtime.clone(), cx);
106
107 let worktree_store = cx.new(|cx| {
108 let mut store = WorktreeStore::local(true, fs.clone(), WorktreeIdCounter::get(cx));
109 store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
110 store
111 });
112
113 if init_worktree_trust {
114 project::trusted_worktrees::track_worktree_trust(
115 worktree_store.clone(),
116 None::<RemoteHostLocation>,
117 Some((session.clone(), ProjectId(REMOTE_SERVER_PROJECT_ID))),
118 None,
119 cx,
120 );
121 }
122
123 let environment =
124 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, true, cx));
125 let manifest_tree = ManifestTree::new(worktree_store.clone(), cx);
126 let toolchain_store = cx.new(|cx| {
127 ToolchainStore::local(
128 languages.clone(),
129 worktree_store.clone(),
130 environment.clone(),
131 manifest_tree.clone(),
132 cx,
133 )
134 });
135
136 let buffer_store = cx.new(|cx| {
137 let mut buffer_store = BufferStore::local(worktree_store.clone(), cx);
138 buffer_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
139 buffer_store
140 });
141
142 let breakpoint_store = cx.new(|_| {
143 let mut breakpoint_store =
144 BreakpointStore::local(worktree_store.clone(), buffer_store.clone());
145 breakpoint_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone());
146
147 breakpoint_store
148 });
149
150 let dap_store = cx.new(|cx| {
151 let mut dap_store = DapStore::new_local(
152 http_client.clone(),
153 node_runtime.clone(),
154 fs.clone(),
155 environment.clone(),
156 toolchain_store.read(cx).as_language_toolchain_store(),
157 worktree_store.clone(),
158 breakpoint_store.clone(),
159 true,
160 cx,
161 );
162 dap_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
163 dap_store
164 });
165
166 let git_store = cx.new(|cx| {
167 let mut store = GitStore::local(
168 &worktree_store,
169 buffer_store.clone(),
170 environment.clone(),
171 fs.clone(),
172 cx,
173 );
174 store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
175 store
176 });
177
178 let prettier_store = cx.new(|cx| {
179 PrettierStore::new(
180 node_runtime.clone(),
181 fs.clone(),
182 languages.clone(),
183 worktree_store.clone(),
184 cx,
185 )
186 });
187
188 let task_store = cx.new(|cx| {
189 let mut task_store = TaskStore::local(
190 buffer_store.downgrade(),
191 worktree_store.clone(),
192 toolchain_store.read(cx).as_language_toolchain_store(),
193 environment.clone(),
194 git_store.clone(),
195 cx,
196 );
197 task_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
198 task_store
199 });
200 let settings_observer = cx.new(|cx| {
201 let mut observer = SettingsObserver::new_local(
202 fs.clone(),
203 worktree_store.clone(),
204 task_store.clone(),
205 true,
206 cx,
207 );
208 observer.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
209 observer
210 });
211
212 let lsp_store = cx.new(|cx| {
213 let mut lsp_store = LspStore::new_local(
214 buffer_store.clone(),
215 worktree_store.clone(),
216 prettier_store.clone(),
217 toolchain_store
218 .read(cx)
219 .as_local_store()
220 .expect("Toolchain store to be local")
221 .clone(),
222 environment.clone(),
223 manifest_tree,
224 languages.clone(),
225 http_client.clone(),
226 fs.clone(),
227 cx,
228 );
229 lsp_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
230 lsp_store
231 });
232
233 AgentRegistryStore::init_global(cx, fs.clone(), http_client.clone());
234
235 let agent_server_store = cx.new(|cx| {
236 let mut agent_server_store = AgentServerStore::local(
237 node_runtime.clone(),
238 fs.clone(),
239 environment.clone(),
240 http_client.clone(),
241 cx,
242 );
243 agent_server_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
244 agent_server_store
245 });
246
247 let context_server_store = cx.new(|cx| {
248 let mut context_server_store =
249 ContextServerStore::local(worktree_store.clone(), None, true, cx);
250 context_server_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone());
251 context_server_store
252 });
253
254 cx.subscribe(&lsp_store, Self::on_lsp_store_event).detach();
255 language_extension::init(
256 language_extension::LspAccess::ViaLspStore(lsp_store.clone()),
257 proxy.clone(),
258 languages.clone(),
259 );
260
261 cx.subscribe(&buffer_store, |_this, _buffer_store, event, cx| {
262 if let BufferStoreEvent::BufferAdded(buffer) = event {
263 cx.subscribe(buffer, Self::on_buffer_event).detach();
264 }
265 })
266 .detach();
267
268 let extensions = HeadlessExtensionStore::new(
269 fs.clone(),
270 http_client.clone(),
271 paths::remote_extensions_dir().to_path_buf(),
272 proxy,
273 node_runtime,
274 cx,
275 );
276
277 // local_machine -> ssh handlers
278 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &worktree_store);
279 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &buffer_store);
280 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &cx.entity());
281 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &lsp_store);
282 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &task_store);
283 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &toolchain_store);
284 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &dap_store);
285 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &breakpoint_store);
286 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &settings_observer);
287 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &git_store);
288 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &agent_server_store);
289 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &context_server_store);
290
291 session.add_request_handler(cx.weak_entity(), Self::handle_list_remote_directory);
292 session.add_request_handler(cx.weak_entity(), Self::handle_get_path_metadata);
293 session.add_request_handler(cx.weak_entity(), Self::handle_shutdown_remote_server);
294 session.add_request_handler(cx.weak_entity(), Self::handle_ping);
295 session.add_request_handler(cx.weak_entity(), Self::handle_get_processes);
296 session.add_request_handler(cx.weak_entity(), Self::handle_get_remote_profiling_data);
297
298 session.add_entity_request_handler(Self::handle_add_worktree);
299 session.add_request_handler(cx.weak_entity(), Self::handle_remove_worktree);
300
301 session.add_entity_request_handler(Self::handle_open_buffer_by_path);
302 session.add_entity_request_handler(Self::handle_open_new_buffer);
303 session.add_entity_request_handler(Self::handle_find_search_candidates);
304 session.add_entity_request_handler(Self::handle_open_server_settings);
305 session.add_entity_request_handler(Self::handle_get_directory_environment);
306 session.add_entity_message_handler(Self::handle_toggle_lsp_logs);
307 session.add_entity_request_handler(Self::handle_open_image_by_path);
308 session.add_entity_request_handler(Self::handle_trust_worktrees);
309 session.add_entity_request_handler(Self::handle_restrict_worktrees);
310 session.add_entity_request_handler(Self::handle_download_file_by_path);
311
312 // TODO!(yara) add message handlers here (+ look for the other side, recv on UI side client)
313 session.add_entity_message_handler(Self::handle_find_search_candidates_cancel);
314 session.add_entity_request_handler(BufferStore::handle_update_buffer);
315 session.add_entity_message_handler(BufferStore::handle_close_buffer);
316
317 session.add_request_handler(
318 extensions.downgrade(),
319 HeadlessExtensionStore::handle_sync_extensions,
320 );
321 session.add_request_handler(
322 extensions.downgrade(),
323 HeadlessExtensionStore::handle_install_extension,
324 );
325
326 session.add_request_handler(cx.weak_entity(), Self::handle_spawn_kernel);
327 session.add_request_handler(cx.weak_entity(), Self::handle_kill_kernel);
328
329 BufferStore::init(&session);
330 WorktreeStore::init(&session); // TODO!(yara) maybe here
331 SettingsObserver::init(&session);
332 LspStore::init(&session);
333 TaskStore::init(Some(&session));
334 ToolchainStore::init(&session);
335 DapStore::init(&session, cx);
336 // todo(debugger): Re init breakpoint store when we set it up for collab
337 BreakpointStore::init(&session);
338 GitStore::init(&session);
339 AgentServerStore::init_headless(&session);
340 ContextServerStore::init_headless(&session);
341
342 HeadlessProject {
343 next_entry_id: Default::default(),
344 session,
345 settings_observer,
346 fs,
347 worktree_store,
348 buffer_store,
349 lsp_store,
350 task_store,
351 dap_store,
352 breakpoint_store,
353 agent_server_store,
354 context_server_store,
355 languages,
356 extensions,
357 git_store,
358 environment,
359 profiling_collector: gpui::ProfilingCollector::new(startup_time),
360 _toolchain_store: toolchain_store,
361 kernels: Default::default(),
362 }
363 }
364
365 fn on_buffer_event(
366 &mut self,
367 buffer: Entity<Buffer>,
368 event: &BufferEvent,
369 cx: &mut Context<Self>,
370 ) {
371 if let BufferEvent::Operation {
372 operation,
373 is_local: true,
374 } = event
375 {
376 cx.background_spawn(self.session.request(proto::UpdateBuffer {
377 project_id: REMOTE_SERVER_PROJECT_ID,
378 buffer_id: buffer.read(cx).remote_id().to_proto(),
379 operations: vec![serialize_operation(operation)],
380 }))
381 .detach()
382 }
383 }
384
385 fn on_lsp_store_event(
386 &mut self,
387 lsp_store: Entity<LspStore>,
388 event: &LspStoreEvent,
389 cx: &mut Context<Self>,
390 ) {
391 match event {
392 LspStoreEvent::LanguageServerAdded(id, name, worktree_id) => {
393 let log_store = cx
394 .try_global::<GlobalLogStore>()
395 .map(|lsp_logs| lsp_logs.0.clone());
396 if let Some(log_store) = log_store {
397 log_store.update(cx, |log_store, cx| {
398 log_store.add_language_server(
399 LanguageServerKind::LocalSsh {
400 lsp_store: self.lsp_store.downgrade(),
401 },
402 *id,
403 Some(name.clone()),
404 *worktree_id,
405 lsp_store.read(cx).language_server_for_id(*id),
406 cx,
407 );
408 });
409 }
410 }
411 LspStoreEvent::LanguageServerRemoved(id) => {
412 let log_store = cx
413 .try_global::<GlobalLogStore>()
414 .map(|lsp_logs| lsp_logs.0.clone());
415 if let Some(log_store) = log_store {
416 log_store.update(cx, |log_store, cx| {
417 log_store.remove_language_server(*id, cx);
418 });
419 }
420 }
421 LspStoreEvent::LanguageServerUpdate {
422 language_server_id,
423 name,
424 message,
425 } => {
426 self.session
427 .send(proto::UpdateLanguageServer {
428 project_id: REMOTE_SERVER_PROJECT_ID,
429 server_name: name.as_ref().map(|name| name.to_string()),
430 language_server_id: language_server_id.to_proto(),
431 variant: Some(message.clone()),
432 })
433 .log_err();
434 }
435 LspStoreEvent::Notification(message) => {
436 self.session
437 .send(proto::Toast {
438 project_id: REMOTE_SERVER_PROJECT_ID,
439 notification_id: "lsp".to_string(),
440 message: message.clone(),
441 })
442 .log_err();
443 }
444 LspStoreEvent::LanguageServerPrompt(prompt) => {
445 let request = self.session.request(proto::LanguageServerPromptRequest {
446 project_id: REMOTE_SERVER_PROJECT_ID,
447 actions: prompt
448 .actions
449 .iter()
450 .map(|action| action.title.to_string())
451 .collect(),
452 level: Some(prompt_to_proto(prompt)),
453 lsp_name: prompt.lsp_name.clone(),
454 message: prompt.message.clone(),
455 });
456 let prompt = prompt.clone();
457 cx.background_spawn(async move {
458 let response = request.await?;
459 if let Some(action_response) = response.action_response {
460 prompt.respond(action_response as usize).await;
461 }
462 anyhow::Ok(())
463 })
464 .detach();
465 }
466 _ => {}
467 }
468 }
469
470 pub async fn handle_add_worktree(
471 this: Entity<Self>,
472 message: TypedEnvelope<proto::AddWorktree>,
473 mut cx: AsyncApp,
474 ) -> Result<proto::AddWorktreeResponse> {
475 use client::ErrorCodeExt;
476 let fs = this.read_with(&cx, |this, _| this.fs.clone());
477 let path = PathBuf::from(shellexpand::tilde(&message.payload.path).to_string());
478
479 let canonicalized = match fs.canonicalize(&path).await {
480 Ok(path) => path,
481 Err(e) => {
482 let mut parent = path
483 .parent()
484 .ok_or(e)
485 .with_context(|| format!("{path:?} does not exist"))?;
486 if parent == Path::new("") {
487 parent = util::paths::home_dir();
488 }
489 let parent = fs.canonicalize(parent).await.map_err(|_| {
490 anyhow!(
491 proto::ErrorCode::DevServerProjectPathDoesNotExist
492 .with_tag("path", path.to_string_lossy().as_ref())
493 )
494 })?;
495 if let Some(file_name) = path.file_name() {
496 parent.join(file_name)
497 } else {
498 parent
499 }
500 }
501 };
502 let next_worktree_id = this
503 .update(&mut cx, |this, cx| {
504 this.worktree_store
505 .update(cx, |worktree_store, _| worktree_store.next_worktree_id())
506 })
507 .await?;
508 let worktree = this
509 .read_with(&cx.clone(), |this, _| {
510 Worktree::local(
511 Arc::from(canonicalized.as_path()),
512 message.payload.visible,
513 this.fs.clone(),
514 this.next_entry_id.clone(),
515 true,
516 next_worktree_id,
517 &mut cx,
518 )
519 })
520 .await?;
521
522 let response = this.read_with(&cx, |_, cx| {
523 let worktree = worktree.read(cx);
524 proto::AddWorktreeResponse {
525 worktree_id: worktree.id().to_proto(),
526 canonicalized_path: canonicalized.to_string_lossy().into_owned(),
527 root_repo_common_dir: worktree
528 .root_repo_common_dir()
529 .map(|p| p.to_string_lossy().into_owned()),
530 }
531 });
532
533 // We spawn this asynchronously, so that we can send the response back
534 // *before* `worktree_store.add()` can send out UpdateProject requests
535 // to the client about the new worktree.
536 //
537 // That lets the client manage the reference/handles of the newly-added
538 // worktree, before getting interrupted by an UpdateProject request.
539 //
540 // This fixes the problem of the client sending the AddWorktree request,
541 // headless project sending out a project update, client receiving it
542 // and immediately dropping the reference of the new client, causing it
543 // to be dropped on the headless project, and the client only then
544 // receiving a response to AddWorktree.
545 cx.spawn(async move |cx| {
546 this.update(cx, |this, cx| {
547 this.worktree_store.update(cx, |worktree_store, cx| {
548 worktree_store.add(&worktree, cx);
549 });
550 });
551 })
552 .detach();
553
554 Ok(response)
555 }
556
557 pub async fn handle_remove_worktree(
558 this: Entity<Self>,
559 envelope: TypedEnvelope<proto::RemoveWorktree>,
560 mut cx: AsyncApp,
561 ) -> Result<proto::Ack> {
562 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
563 this.update(&mut cx, |this, cx| {
564 this.worktree_store.update(cx, |worktree_store, cx| {
565 worktree_store.remove_worktree(worktree_id, cx);
566 });
567 });
568 Ok(proto::Ack {})
569 }
570
571 pub async fn handle_open_buffer_by_path(
572 this: Entity<Self>,
573 message: TypedEnvelope<proto::OpenBufferByPath>,
574 mut cx: AsyncApp,
575 ) -> Result<proto::OpenBufferResponse> {
576 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
577 let path = RelPath::from_proto(&message.payload.path)?;
578 let (buffer_store, buffer) = this.update(&mut cx, |this, cx| {
579 let buffer_store = this.buffer_store.clone();
580 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
581 buffer_store.open_buffer(ProjectPath { worktree_id, path }, cx)
582 });
583 (buffer_store, buffer)
584 });
585
586 let buffer = buffer.await?;
587 let buffer_id = buffer.read_with(&cx, |b, _| b.remote_id());
588 buffer_store.update(&mut cx, |buffer_store, cx| {
589 buffer_store
590 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
591 .detach_and_log_err(cx);
592 });
593
594 Ok(proto::OpenBufferResponse {
595 buffer_id: buffer_id.to_proto(),
596 })
597 }
598
599 pub async fn handle_open_image_by_path(
600 this: Entity<Self>,
601 message: TypedEnvelope<proto::OpenImageByPath>,
602 mut cx: AsyncApp,
603 ) -> Result<proto::OpenImageResponse> {
604 static NEXT_ID: AtomicU64 = AtomicU64::new(1);
605 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
606 let path = RelPath::from_proto(&message.payload.path)?;
607 let project_id = message.payload.project_id;
608 use proto::create_image_for_peer::Variant;
609
610 let (worktree_store, session) = this.read_with(&cx, |this, _| {
611 (this.worktree_store.clone(), this.session.clone())
612 });
613
614 let worktree = worktree_store
615 .read_with(&cx, |store, cx| store.worktree_for_id(worktree_id, cx))
616 .context("worktree not found")?;
617
618 let load_task = worktree.update(&mut cx, |worktree, cx| {
619 worktree.load_binary_file(path.as_ref(), cx)
620 });
621
622 let loaded_file = load_task.await?;
623 let content = loaded_file.content;
624 let file = loaded_file.file;
625
626 let proto_file = worktree.read_with(&cx, |_worktree, cx| file.to_proto(cx));
627 let image_id =
628 ImageId::from(NonZeroU64::new(NEXT_ID.fetch_add(1, Ordering::Relaxed)).unwrap());
629
630 let format = image::guess_format(&content)
631 .map(|f| format!("{:?}", f).to_lowercase())
632 .unwrap_or_else(|_| "unknown".to_string());
633
634 let state = proto::ImageState {
635 id: image_id.to_proto(),
636 file: Some(proto_file),
637 content_size: content.len() as u64,
638 format,
639 };
640
641 session.send(proto::CreateImageForPeer {
642 project_id,
643 peer_id: Some(REMOTE_SERVER_PEER_ID),
644 variant: Some(Variant::State(state)),
645 })?;
646
647 const CHUNK_SIZE: usize = 1024 * 1024; // 1MB chunks
648 for chunk in content.chunks(CHUNK_SIZE) {
649 session.send(proto::CreateImageForPeer {
650 project_id,
651 peer_id: Some(REMOTE_SERVER_PEER_ID),
652 variant: Some(Variant::Chunk(proto::ImageChunk {
653 image_id: image_id.to_proto(),
654 data: chunk.to_vec(),
655 })),
656 })?;
657 }
658
659 Ok(proto::OpenImageResponse {
660 image_id: image_id.to_proto(),
661 })
662 }
663
664 pub async fn handle_trust_worktrees(
665 this: Entity<Self>,
666 envelope: TypedEnvelope<proto::TrustWorktrees>,
667 mut cx: AsyncApp,
668 ) -> Result<proto::Ack> {
669 let trusted_worktrees = cx
670 .update(|cx| TrustedWorktrees::try_get_global(cx))
671 .context("missing trusted worktrees")?;
672 let worktree_store = this.read_with(&cx, |project, _| project.worktree_store.clone());
673 trusted_worktrees.update(&mut cx, |trusted_worktrees, cx| {
674 trusted_worktrees.trust(
675 &worktree_store,
676 envelope
677 .payload
678 .trusted_paths
679 .into_iter()
680 .filter_map(PathTrust::from_proto)
681 .collect(),
682 cx,
683 );
684 });
685 Ok(proto::Ack {})
686 }
687
688 pub async fn handle_restrict_worktrees(
689 this: Entity<Self>,
690 envelope: TypedEnvelope<proto::RestrictWorktrees>,
691 mut cx: AsyncApp,
692 ) -> Result<proto::Ack> {
693 let trusted_worktrees = cx
694 .update(|cx| TrustedWorktrees::try_get_global(cx))
695 .context("missing trusted worktrees")?;
696 let worktree_store = this.read_with(&cx, |project, _| project.worktree_store.downgrade());
697 trusted_worktrees.update(&mut cx, |trusted_worktrees, cx| {
698 let restricted_paths = envelope
699 .payload
700 .worktree_ids
701 .into_iter()
702 .map(WorktreeId::from_proto)
703 .map(PathTrust::Worktree)
704 .collect::<HashSet<_>>();
705 trusted_worktrees.restrict(worktree_store, restricted_paths, cx);
706 });
707 Ok(proto::Ack {})
708 }
709
710 pub async fn handle_download_file_by_path(
711 this: Entity<Self>,
712 message: TypedEnvelope<proto::DownloadFileByPath>,
713 mut cx: AsyncApp,
714 ) -> Result<proto::DownloadFileResponse> {
715 log::debug!(
716 "handle_download_file_by_path: received request: {:?}",
717 message.payload
718 );
719
720 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
721 let path = RelPath::from_proto(&message.payload.path)?;
722 let project_id = message.payload.project_id;
723 let file_id = message.payload.file_id;
724 log::debug!(
725 "handle_download_file_by_path: worktree_id={:?}, path={:?}, file_id={}",
726 worktree_id,
727 path,
728 file_id
729 );
730 use proto::create_file_for_peer::Variant;
731
732 let (worktree_store, session): (Entity<WorktreeStore>, AnyProtoClient) = this
733 .read_with(&cx, |this, _| {
734 (this.worktree_store.clone(), this.session.clone())
735 });
736
737 let worktree = worktree_store
738 .read_with(&cx, |store, cx| store.worktree_for_id(worktree_id, cx))
739 .context("worktree not found")?;
740
741 let download_task = worktree.update(&mut cx, |worktree: &mut Worktree, cx| {
742 worktree.load_binary_file(path.as_ref(), cx)
743 });
744
745 let downloaded_file = download_task.await?;
746 let content = downloaded_file.content;
747 let file = downloaded_file.file;
748 log::debug!(
749 "handle_download_file_by_path: file loaded, content_size={}",
750 content.len()
751 );
752
753 let proto_file = worktree.read_with(&cx, |_worktree: &Worktree, cx| file.to_proto(cx));
754 log::debug!(
755 "handle_download_file_by_path: using client-provided file_id={}",
756 file_id
757 );
758
759 let state = proto::FileState {
760 id: file_id,
761 file: Some(proto_file),
762 content_size: content.len() as u64,
763 };
764
765 log::debug!("handle_download_file_by_path: sending State message");
766 session.send(proto::CreateFileForPeer {
767 project_id,
768 peer_id: Some(REMOTE_SERVER_PEER_ID),
769 variant: Some(Variant::State(state)),
770 })?;
771
772 const CHUNK_SIZE: usize = 1024 * 1024; // 1MB chunks
773 let num_chunks = content.len().div_ceil(CHUNK_SIZE);
774 log::debug!(
775 "handle_download_file_by_path: sending {} chunks",
776 num_chunks
777 );
778 for (i, chunk) in content.chunks(CHUNK_SIZE).enumerate() {
779 log::trace!(
780 "handle_download_file_by_path: sending chunk {}/{}, size={}",
781 i + 1,
782 num_chunks,
783 chunk.len()
784 );
785 session.send(proto::CreateFileForPeer {
786 project_id,
787 peer_id: Some(REMOTE_SERVER_PEER_ID),
788 variant: Some(Variant::Chunk(proto::FileChunk {
789 file_id,
790 data: chunk.to_vec(),
791 })),
792 })?;
793 }
794
795 log::debug!(
796 "handle_download_file_by_path: returning file_id={}",
797 file_id
798 );
799 Ok(proto::DownloadFileResponse { file_id })
800 }
801
802 pub async fn handle_open_new_buffer(
803 this: Entity<Self>,
804 _message: TypedEnvelope<proto::OpenNewBuffer>,
805 mut cx: AsyncApp,
806 ) -> Result<proto::OpenBufferResponse> {
807 let (buffer_store, buffer) = this.update(&mut cx, |this, cx| {
808 let buffer_store = this.buffer_store.clone();
809 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
810 buffer_store.create_buffer(None, true, cx)
811 });
812 (buffer_store, buffer)
813 });
814
815 let buffer = buffer.await?;
816 let buffer_id = buffer.read_with(&cx, |b, _| b.remote_id());
817 buffer_store.update(&mut cx, |buffer_store, cx| {
818 buffer_store
819 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
820 .detach_and_log_err(cx);
821 });
822
823 Ok(proto::OpenBufferResponse {
824 buffer_id: buffer_id.to_proto(),
825 })
826 }
827
828 async fn handle_toggle_lsp_logs(
829 _: Entity<Self>,
830 envelope: TypedEnvelope<proto::ToggleLspLogs>,
831 cx: AsyncApp,
832 ) -> Result<()> {
833 let server_id = LanguageServerId::from_proto(envelope.payload.server_id);
834 cx.update(|cx| {
835 let log_store = cx
836 .try_global::<GlobalLogStore>()
837 .map(|global_log_store| global_log_store.0.clone())
838 .context("lsp logs store is missing")?;
839 let toggled_log_kind =
840 match proto::toggle_lsp_logs::LogType::from_i32(envelope.payload.log_type)
841 .context("invalid log type")?
842 {
843 proto::toggle_lsp_logs::LogType::Log => LogKind::Logs,
844 proto::toggle_lsp_logs::LogType::Trace => LogKind::Trace,
845 proto::toggle_lsp_logs::LogType::Rpc => LogKind::Rpc,
846 };
847 log_store.update(cx, |log_store, _| {
848 log_store.toggle_lsp_logs(server_id, envelope.payload.enabled, toggled_log_kind);
849 });
850 anyhow::Ok(())
851 })?;
852
853 Ok(())
854 }
855
856 async fn handle_open_server_settings(
857 this: Entity<Self>,
858 _: TypedEnvelope<proto::OpenServerSettings>,
859 mut cx: AsyncApp,
860 ) -> Result<proto::OpenBufferResponse> {
861 let settings_path = paths::settings_file();
862 let (worktree, path) = this
863 .update(&mut cx, |this, cx| {
864 this.worktree_store.update(cx, |worktree_store, cx| {
865 worktree_store.find_or_create_worktree(settings_path, false, cx)
866 })
867 })
868 .await?;
869
870 let (buffer, buffer_store) = this.update(&mut cx, |this, cx| {
871 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
872 buffer_store.open_buffer(
873 ProjectPath {
874 worktree_id: worktree.read(cx).id(),
875 path,
876 },
877 cx,
878 )
879 });
880
881 (buffer, this.buffer_store.clone())
882 });
883
884 let buffer = buffer.await?;
885
886 let buffer_id = cx.update(|cx| {
887 if buffer.read(cx).is_empty() {
888 buffer.update(cx, |buffer, cx| {
889 buffer.edit([(0..0, initial_server_settings_content())], None, cx)
890 });
891 }
892
893 let buffer_id = buffer.read(cx).remote_id();
894
895 buffer_store.update(cx, |buffer_store, cx| {
896 buffer_store
897 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
898 .detach_and_log_err(cx);
899 });
900
901 buffer_id
902 });
903
904 Ok(proto::OpenBufferResponse {
905 buffer_id: buffer_id.to_proto(),
906 })
907 }
908
909 async fn handle_spawn_kernel(
910 this: Entity<Self>,
911 envelope: TypedEnvelope<proto::SpawnKernel>,
912 cx: AsyncApp,
913 ) -> Result<proto::SpawnKernelResponse> {
914 let fs = this.update(&mut cx.clone(), |this, _| this.fs.clone());
915
916 let mut ports = Vec::new();
917 for _ in 0..5 {
918 let listener = std::net::TcpListener::bind("127.0.0.1:0")?;
919 let port = listener.local_addr()?.port();
920 ports.push(port);
921 }
922
923 let connection_info = serde_json::json!({
924 "shell_port": ports[0],
925 "iopub_port": ports[1],
926 "stdin_port": ports[2],
927 "control_port": ports[3],
928 "hb_port": ports[4],
929 "ip": "127.0.0.1",
930 "key": uuid::Uuid::new_v4().to_string(),
931 "transport": "tcp",
932 "signature_scheme": "hmac-sha256",
933 "kernel_name": envelope.payload.kernel_name,
934 });
935
936 let connection_file_content = serde_json::to_string_pretty(&connection_info)?;
937 let kernel_id = uuid::Uuid::new_v4().to_string();
938
939 let connection_file_path = std::env::temp_dir().join(format!("kernel-{}.json", kernel_id));
940 fs.save(
941 &connection_file_path,
942 &connection_file_content.as_str().into(),
943 language::LineEnding::Unix,
944 )
945 .await?;
946
947 let working_directory = if envelope.payload.working_directory.is_empty() {
948 std::env::current_dir()
949 .ok()
950 .map(|p| p.to_string_lossy().into_owned())
951 } else {
952 Some(envelope.payload.working_directory)
953 };
954
955 // Spawn kernel (Assuming python for now, or we'd need to parse kernelspec logic here or pass the command)
956
957 // Spawn kernel
958 let spawn_kernel = |binary: &str, args: &[String]| {
959 let mut command = smol::process::Command::new(binary);
960
961 if !args.is_empty() {
962 for arg in args {
963 if arg == "{connection_file}" {
964 command.arg(&connection_file_path);
965 } else {
966 command.arg(arg);
967 }
968 }
969 } else {
970 command
971 .arg("-m")
972 .arg("ipykernel_launcher")
973 .arg("-f")
974 .arg(&connection_file_path);
975 }
976
977 // This ensures subprocesses spawned from the kernel use the correct Python environment
978 let python_bin_dir = std::path::Path::new(binary).parent();
979 if let Some(bin_dir) = python_bin_dir {
980 if let Some(path_var) = std::env::var_os("PATH") {
981 let mut paths = std::env::split_paths(&path_var).collect::<Vec<_>>();
982 paths.insert(0, bin_dir.to_path_buf());
983 if let Ok(new_path) = std::env::join_paths(paths) {
984 command.env("PATH", new_path);
985 }
986 }
987
988 if let Some(venv_root) = bin_dir.parent() {
989 command.env("VIRTUAL_ENV", venv_root.to_string_lossy().to_string());
990 }
991 }
992
993 if let Some(wd) = &working_directory {
994 command.current_dir(wd);
995 }
996 command.spawn()
997 };
998
999 // We need to manage the child process lifecycle
1000 let child = if !envelope.payload.command.is_empty() {
1001 spawn_kernel(&envelope.payload.command, &envelope.payload.args).context(format!(
1002 "failed to spawn kernel process (command: {})",
1003 envelope.payload.command
1004 ))?
1005 } else {
1006 spawn_kernel("python3", &[])
1007 .or_else(|_| spawn_kernel("python", &[]))
1008 .context("failed to spawn kernel process (tried python3 and python)")?
1009 };
1010
1011 this.update(&mut cx.clone(), |this, _cx| {
1012 this.kernels.insert(kernel_id.clone(), child);
1013 });
1014
1015 Ok(proto::SpawnKernelResponse {
1016 kernel_id,
1017 connection_file: connection_file_content,
1018 })
1019 }
1020
1021 async fn handle_kill_kernel(
1022 this: Entity<Self>,
1023 envelope: TypedEnvelope<proto::KillKernel>,
1024 mut cx: AsyncApp,
1025 ) -> Result<proto::Ack> {
1026 let kernel_id = envelope.payload.kernel_id;
1027 let child = this.update(&mut cx, |this, _| this.kernels.remove(&kernel_id));
1028 if let Some(mut child) = child {
1029 child.kill().log_err();
1030 }
1031 Ok(proto::Ack {})
1032 }
1033
1034 async fn handle_find_search_candidates(
1035 this: Entity<Self>,
1036 envelope: TypedEnvelope<proto::FindSearchCandidates>,
1037 mut cx: AsyncApp,
1038 ) -> Result<proto::Ack> {
1039 use futures::stream::StreamExt as _;
1040
1041 let peer_id = envelope.original_sender_id.unwrap_or(envelope.sender_id);
1042 let message = envelope.payload;
1043 let query = SearchQuery::from_proto(
1044 message.query.context("missing query field")?,
1045 PathStyle::local(),
1046 )?;
1047
1048 let project_id = message.project_id;
1049 let buffer_store = this.read_with(&cx, |this, _| this.buffer_store.clone());
1050 let handle = message.handle;
1051 let _buffer_store = buffer_store.clone();
1052 let client = this.read_with(&cx, |this, _| this.session.clone());
1053 let task = cx.spawn(async move |cx| {
1054 let results = this.update(cx, |this, cx| {
1055 project::Search::local(
1056 this.fs.clone(),
1057 this.buffer_store.clone(),
1058 this.worktree_store.clone(),
1059 message.limit as _,
1060 cx,
1061 )
1062 .into_handle(query, cx)
1063 .matching_buffers(cx)
1064 });
1065 let (batcher, batches) =
1066 project::project_search::AdaptiveBatcher::new(cx.background_executor());
1067 let mut new_matches = Box::pin(results.rx);
1068
1069 let sender_task = cx.background_executor().spawn({
1070 let client = client.clone();
1071 async move {
1072 let mut batches = std::pin::pin!(batches);
1073 while let Some(buffer_ids) = batches.next().await {
1074 client
1075 .request(proto::FindSearchCandidatesChunk {
1076 handle,
1077 peer_id: Some(peer_id),
1078 project_id,
1079 variant: Some(
1080 proto::find_search_candidates_chunk::Variant::Matches(
1081 proto::FindSearchCandidatesMatches { buffer_ids },
1082 ),
1083 ),
1084 })
1085 .await?;
1086 }
1087 anyhow::Ok(())
1088 }
1089 });
1090
1091 while let Some(buffer) = new_matches.next().await {
1092 let _ = buffer_store
1093 .update(cx, |this, cx| {
1094 this.create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
1095 })
1096 .await;
1097 let buffer_id = buffer.read_with(cx, |this, _| this.remote_id().to_proto());
1098 batcher.push(buffer_id).await;
1099 }
1100 batcher.flush().await;
1101
1102 sender_task.await?;
1103
1104 client
1105 .request(proto::FindSearchCandidatesChunk {
1106 handle,
1107 peer_id: Some(peer_id),
1108 project_id,
1109 variant: Some(proto::find_search_candidates_chunk::Variant::Done(
1110 proto::FindSearchCandidatesDone {},
1111 )),
1112 })
1113 .await?;
1114 anyhow::Ok(())
1115 });
1116 _buffer_store.update(&mut cx, |this, _| {
1117 this.register_ongoing_project_search((peer_id, handle), task);
1118 });
1119
1120 Ok(proto::Ack {})
1121 }
1122
1123 // Goes from client to host.
1124 async fn handle_find_search_candidates_cancel(
1125 this: Entity<Self>,
1126 envelope: TypedEnvelope<proto::FindSearchCandidatesCancelled>,
1127 mut cx: AsyncApp,
1128 ) -> Result<()> {
1129 let buffer_store = this.read_with(&mut cx, |this, _| this.buffer_store.clone());
1130 BufferStore::handle_find_search_candidates_cancel(buffer_store, envelope, cx).await
1131 }
1132
1133 async fn handle_list_remote_directory(
1134 this: Entity<Self>,
1135 envelope: TypedEnvelope<proto::ListRemoteDirectory>,
1136 cx: AsyncApp,
1137 ) -> Result<proto::ListRemoteDirectoryResponse> {
1138 use smol::stream::StreamExt;
1139 let fs = cx.read_entity(&this, |this, _| this.fs.clone());
1140 let expanded = PathBuf::from(shellexpand::tilde(&envelope.payload.path).to_string());
1141 let check_info = envelope
1142 .payload
1143 .config
1144 .as_ref()
1145 .is_some_and(|config| config.is_dir);
1146
1147 let mut entries = Vec::new();
1148 let mut entry_info = Vec::new();
1149 let mut response = fs.read_dir(&expanded).await?;
1150 while let Some(path) = response.next().await {
1151 let path = path?;
1152 if let Some(file_name) = path.file_name() {
1153 entries.push(file_name.to_string_lossy().into_owned());
1154 if check_info {
1155 let is_dir = fs.is_dir(&path).await;
1156 entry_info.push(proto::EntryInfo { is_dir });
1157 }
1158 }
1159 }
1160 Ok(proto::ListRemoteDirectoryResponse {
1161 entries,
1162 entry_info,
1163 })
1164 }
1165
1166 async fn handle_get_path_metadata(
1167 this: Entity<Self>,
1168 envelope: TypedEnvelope<proto::GetPathMetadata>,
1169 cx: AsyncApp,
1170 ) -> Result<proto::GetPathMetadataResponse> {
1171 let fs = cx.read_entity(&this, |this, _| this.fs.clone());
1172 let expanded = PathBuf::from(shellexpand::tilde(&envelope.payload.path).to_string());
1173
1174 let metadata = fs.metadata(&expanded).await?;
1175 let is_dir = metadata.map(|metadata| metadata.is_dir).unwrap_or(false);
1176
1177 Ok(proto::GetPathMetadataResponse {
1178 exists: metadata.is_some(),
1179 is_dir,
1180 path: expanded.to_string_lossy().into_owned(),
1181 })
1182 }
1183
1184 async fn handle_shutdown_remote_server(
1185 _this: Entity<Self>,
1186 _envelope: TypedEnvelope<proto::ShutdownRemoteServer>,
1187 cx: AsyncApp,
1188 ) -> Result<proto::Ack> {
1189 cx.spawn(async move |cx| {
1190 cx.update(|cx| {
1191 // TODO: This is a hack, because in a headless project, shutdown isn't executed
1192 // when calling quit, but it should be.
1193 cx.shutdown();
1194 cx.quit();
1195 })
1196 })
1197 .detach();
1198
1199 Ok(proto::Ack {})
1200 }
1201
1202 pub async fn handle_ping(
1203 _this: Entity<Self>,
1204 _envelope: TypedEnvelope<proto::Ping>,
1205 _cx: AsyncApp,
1206 ) -> Result<proto::Ack> {
1207 log::debug!("Received ping from client");
1208 Ok(proto::Ack {})
1209 }
1210
1211 async fn handle_get_processes(
1212 _this: Entity<Self>,
1213 _envelope: TypedEnvelope<proto::GetProcesses>,
1214 _cx: AsyncApp,
1215 ) -> Result<proto::GetProcessesResponse> {
1216 let mut processes = Vec::new();
1217 let refresh_kind = RefreshKind::nothing().with_processes(
1218 ProcessRefreshKind::nothing()
1219 .without_tasks()
1220 .with_cmd(UpdateKind::Always),
1221 );
1222
1223 for process in System::new_with_specifics(refresh_kind)
1224 .processes()
1225 .values()
1226 {
1227 let name = process.name().to_string_lossy().into_owned();
1228 let command = process
1229 .cmd()
1230 .iter()
1231 .map(|s| s.to_string_lossy().into_owned())
1232 .collect::<Vec<_>>();
1233
1234 processes.push(proto::ProcessInfo {
1235 pid: process.pid().as_u32(),
1236 name,
1237 command,
1238 });
1239 }
1240
1241 processes.sort_by_key(|p| p.name.clone());
1242
1243 Ok(proto::GetProcessesResponse { processes })
1244 }
1245
1246 async fn handle_get_remote_profiling_data(
1247 this: Entity<Self>,
1248 envelope: TypedEnvelope<proto::GetRemoteProfilingData>,
1249 cx: AsyncApp,
1250 ) -> Result<proto::GetRemoteProfilingDataResponse> {
1251 let foreground_only = envelope.payload.foreground_only;
1252
1253 let (deltas, now_nanos) = cx.update(|cx| {
1254 let dispatcher = cx.foreground_executor().dispatcher();
1255 let timings = if foreground_only {
1256 vec![dispatcher.get_current_thread_timings()]
1257 } else {
1258 dispatcher.get_all_timings()
1259 };
1260 this.update(cx, |this, _cx| {
1261 let deltas = this.profiling_collector.collect_unseen(timings);
1262 let now_nanos = Instant::now()
1263 .duration_since(this.profiling_collector.startup_time())
1264 .as_nanos() as u64;
1265 (deltas, now_nanos)
1266 })
1267 });
1268
1269 let threads = deltas
1270 .into_iter()
1271 .map(|delta| proto::RemoteProfilingThread {
1272 thread_name: delta.thread_name,
1273 thread_id: delta.thread_id,
1274 timings: delta
1275 .new_timings
1276 .into_iter()
1277 .map(|t| proto::RemoteProfilingTiming {
1278 location: Some(proto::RemoteProfilingLocation {
1279 file: t.location.file.to_string(),
1280 line: t.location.line,
1281 column: t.location.column,
1282 }),
1283 start_nanos: t.start as u64,
1284 duration_nanos: t.duration as u64,
1285 })
1286 .collect(),
1287 })
1288 .collect();
1289
1290 Ok(proto::GetRemoteProfilingDataResponse { threads, now_nanos })
1291 }
1292
1293 async fn handle_get_directory_environment(
1294 this: Entity<Self>,
1295 envelope: TypedEnvelope<proto::GetDirectoryEnvironment>,
1296 mut cx: AsyncApp,
1297 ) -> Result<proto::DirectoryEnvironment> {
1298 let shell = task::shell_from_proto(envelope.payload.shell.context("missing shell")?)?;
1299 let directory = PathBuf::from(envelope.payload.directory);
1300 let environment = this
1301 .update(&mut cx, |this, cx| {
1302 this.environment.update(cx, |environment, cx| {
1303 environment.local_directory_environment(&shell, directory.into(), cx)
1304 })
1305 })
1306 .await
1307 .context("failed to get directory environment")?
1308 .into_iter()
1309 .collect();
1310 Ok(proto::DirectoryEnvironment { environment })
1311 }
1312}
1313
1314fn prompt_to_proto(
1315 prompt: &project::LanguageServerPromptRequest,
1316) -> proto::language_server_prompt_request::Level {
1317 match prompt.level {
1318 PromptLevel::Info => proto::language_server_prompt_request::Level::Info(
1319 proto::language_server_prompt_request::Info {},
1320 ),
1321 PromptLevel::Warning => proto::language_server_prompt_request::Level::Warning(
1322 proto::language_server_prompt_request::Warning {},
1323 ),
1324 PromptLevel::Critical => proto::language_server_prompt_request::Level::Critical(
1325 proto::language_server_prompt_request::Critical {},
1326 ),
1327 }
1328}