1use anyhow::{Context as _, Result, anyhow};
2use client::ProjectId;
3use collections::HashMap;
4use collections::HashSet;
5use language::File;
6use lsp::LanguageServerId;
7
8use extension::ExtensionHostProxy;
9use extension_host::headless_host::HeadlessExtensionStore;
10use fs::Fs;
11use gpui::{App, AppContext as _, AsyncApp, Context, Entity, PromptLevel};
12use http_client::HttpClient;
13use language::{Buffer, BufferEvent, LanguageRegistry, proto::serialize_operation};
14use node_runtime::NodeRuntime;
15use project::{
16 AgentRegistryStore, LspStore, LspStoreEvent, ManifestTree, PrettierStore, ProjectEnvironment,
17 ProjectPath, ToolchainStore, WorktreeId,
18 agent_server_store::AgentServerStore,
19 buffer_store::{BufferStore, BufferStoreEvent},
20 context_server_store::ContextServerStore,
21 debugger::{breakpoint_store::BreakpointStore, dap_store::DapStore},
22 git_store::GitStore,
23 image_store::ImageId,
24 lsp_store::log_store::{self, GlobalLogStore, LanguageServerKind, LogKind},
25 project_settings::SettingsObserver,
26 search::SearchQuery,
27 task_store::TaskStore,
28 trusted_worktrees::{PathTrust, RemoteHostLocation, TrustedWorktrees},
29 worktree_store::{WorktreeIdCounter, WorktreeStore},
30};
31use rpc::{
32 AnyProtoClient, TypedEnvelope,
33 proto::{self, REMOTE_SERVER_PEER_ID, REMOTE_SERVER_PROJECT_ID},
34};
35use smol::process::Child;
36
37use settings::initial_server_settings_content;
38use std::{
39 num::NonZeroU64,
40 path::{Path, PathBuf},
41 sync::{
42 Arc,
43 atomic::{AtomicU64, AtomicUsize, Ordering},
44 },
45 time::Instant,
46};
47use sysinfo::{ProcessRefreshKind, RefreshKind, System, UpdateKind};
48use util::{ResultExt, paths::PathStyle, rel_path::RelPath};
49use worktree::Worktree;
50
51pub struct HeadlessProject {
52 pub fs: Arc<dyn Fs>,
53 pub session: AnyProtoClient,
54 pub worktree_store: Entity<WorktreeStore>,
55 pub buffer_store: Entity<BufferStore>,
56 pub lsp_store: Entity<LspStore>,
57 pub task_store: Entity<TaskStore>,
58 pub dap_store: Entity<DapStore>,
59 pub breakpoint_store: Entity<BreakpointStore>,
60 pub agent_server_store: Entity<AgentServerStore>,
61 pub context_server_store: Entity<ContextServerStore>,
62 pub settings_observer: Entity<SettingsObserver>,
63 pub next_entry_id: Arc<AtomicUsize>,
64 pub languages: Arc<LanguageRegistry>,
65 pub extensions: Entity<HeadlessExtensionStore>,
66 pub git_store: Entity<GitStore>,
67 pub environment: Entity<ProjectEnvironment>,
68 pub profiling_collector: gpui::ProfilingCollector,
69 // Used mostly to keep alive the toolchain store for RPC handlers.
70 // Local variant is used within LSP store, but that's a separate entity.
71 pub _toolchain_store: Entity<ToolchainStore>,
72 pub kernels: HashMap<String, Child>,
73}
74
75pub struct HeadlessAppState {
76 pub session: AnyProtoClient,
77 pub fs: Arc<dyn Fs>,
78 pub http_client: Arc<dyn HttpClient>,
79 pub node_runtime: NodeRuntime,
80 pub languages: Arc<LanguageRegistry>,
81 pub extension_host_proxy: Arc<ExtensionHostProxy>,
82 pub startup_time: Instant,
83}
84
85impl HeadlessProject {
86 pub fn init(cx: &mut App) {
87 settings::init(cx);
88 log_store::init(true, cx);
89 }
90
91 pub fn new(
92 HeadlessAppState {
93 session,
94 fs,
95 http_client,
96 node_runtime,
97 languages,
98 extension_host_proxy: proxy,
99 startup_time,
100 }: HeadlessAppState,
101 init_worktree_trust: bool,
102 cx: &mut Context<Self>,
103 ) -> Self {
104 debug_adapter_extension::init(proxy.clone(), cx);
105 languages::init(languages.clone(), fs.clone(), node_runtime.clone(), cx);
106
107 let worktree_store = cx.new(|cx| {
108 let mut store = WorktreeStore::local(true, fs.clone(), WorktreeIdCounter::get(cx));
109 store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
110 store
111 });
112
113 if init_worktree_trust {
114 project::trusted_worktrees::track_worktree_trust(
115 worktree_store.clone(),
116 None::<RemoteHostLocation>,
117 Some((session.clone(), ProjectId(REMOTE_SERVER_PROJECT_ID))),
118 None,
119 cx,
120 );
121 }
122
123 let environment =
124 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, true, cx));
125 let manifest_tree = ManifestTree::new(worktree_store.clone(), cx);
126 let toolchain_store = cx.new(|cx| {
127 ToolchainStore::local(
128 languages.clone(),
129 worktree_store.clone(),
130 environment.clone(),
131 manifest_tree.clone(),
132 fs.clone(),
133 cx,
134 )
135 });
136
137 let buffer_store = cx.new(|cx| {
138 let mut buffer_store = BufferStore::local(worktree_store.clone(), cx);
139 buffer_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
140 buffer_store
141 });
142
143 let breakpoint_store = cx.new(|_| {
144 let mut breakpoint_store =
145 BreakpointStore::local(worktree_store.clone(), buffer_store.clone());
146 breakpoint_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone());
147
148 breakpoint_store
149 });
150
151 let dap_store = cx.new(|cx| {
152 let mut dap_store = DapStore::new_local(
153 http_client.clone(),
154 node_runtime.clone(),
155 fs.clone(),
156 environment.clone(),
157 toolchain_store.read(cx).as_language_toolchain_store(),
158 worktree_store.clone(),
159 breakpoint_store.clone(),
160 true,
161 cx,
162 );
163 dap_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
164 dap_store
165 });
166
167 let git_store = cx.new(|cx| {
168 let mut store = GitStore::local(
169 &worktree_store,
170 buffer_store.clone(),
171 environment.clone(),
172 fs.clone(),
173 cx,
174 );
175 store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
176 store
177 });
178
179 let prettier_store = cx.new(|cx| {
180 PrettierStore::new(
181 node_runtime.clone(),
182 fs.clone(),
183 languages.clone(),
184 worktree_store.clone(),
185 cx,
186 )
187 });
188
189 let task_store = cx.new(|cx| {
190 let mut task_store = TaskStore::local(
191 buffer_store.downgrade(),
192 worktree_store.clone(),
193 toolchain_store.read(cx).as_language_toolchain_store(),
194 environment.clone(),
195 cx,
196 );
197 task_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
198 task_store
199 });
200 let settings_observer = cx.new(|cx| {
201 let mut observer = SettingsObserver::new_local(
202 fs.clone(),
203 worktree_store.clone(),
204 task_store.clone(),
205 true,
206 cx,
207 );
208 observer.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
209 observer
210 });
211
212 let lsp_store = cx.new(|cx| {
213 let mut lsp_store = LspStore::new_local(
214 buffer_store.clone(),
215 worktree_store.clone(),
216 prettier_store.clone(),
217 toolchain_store
218 .read(cx)
219 .as_local_store()
220 .expect("Toolchain store to be local")
221 .clone(),
222 environment.clone(),
223 manifest_tree,
224 languages.clone(),
225 http_client.clone(),
226 fs.clone(),
227 cx,
228 );
229 lsp_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
230 lsp_store
231 });
232
233 AgentRegistryStore::init_global(cx, fs.clone(), http_client.clone());
234
235 let agent_server_store = cx.new(|cx| {
236 let mut agent_server_store = AgentServerStore::local(
237 node_runtime.clone(),
238 fs.clone(),
239 environment.clone(),
240 http_client.clone(),
241 cx,
242 );
243 agent_server_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
244 agent_server_store
245 });
246
247 let context_server_store = cx.new(|cx| {
248 let mut context_server_store =
249 ContextServerStore::local(worktree_store.clone(), None, true, cx);
250 context_server_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone());
251 context_server_store
252 });
253
254 cx.subscribe(&lsp_store, Self::on_lsp_store_event).detach();
255 language_extension::init(
256 language_extension::LspAccess::ViaLspStore(lsp_store.clone()),
257 proxy.clone(),
258 languages.clone(),
259 );
260
261 cx.subscribe(&buffer_store, |_this, _buffer_store, event, cx| {
262 if let BufferStoreEvent::BufferAdded(buffer) = event {
263 cx.subscribe(buffer, Self::on_buffer_event).detach();
264 }
265 })
266 .detach();
267
268 let extensions = HeadlessExtensionStore::new(
269 fs.clone(),
270 http_client.clone(),
271 paths::remote_extensions_dir().to_path_buf(),
272 proxy,
273 node_runtime,
274 cx,
275 );
276
277 // local_machine -> ssh handlers
278 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &worktree_store);
279 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &buffer_store);
280 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &cx.entity());
281 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &lsp_store);
282 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &task_store);
283 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &toolchain_store);
284 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &dap_store);
285 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &breakpoint_store);
286 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &settings_observer);
287 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &git_store);
288 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &agent_server_store);
289 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &context_server_store);
290
291 session.add_request_handler(cx.weak_entity(), Self::handle_list_remote_directory);
292 session.add_request_handler(cx.weak_entity(), Self::handle_get_path_metadata);
293 session.add_request_handler(cx.weak_entity(), Self::handle_shutdown_remote_server);
294 session.add_request_handler(cx.weak_entity(), Self::handle_ping);
295 session.add_request_handler(cx.weak_entity(), Self::handle_get_processes);
296 session.add_request_handler(cx.weak_entity(), Self::handle_get_remote_profiling_data);
297
298 session.add_entity_request_handler(Self::handle_add_worktree);
299 session.add_request_handler(cx.weak_entity(), Self::handle_remove_worktree);
300
301 session.add_entity_request_handler(Self::handle_open_buffer_by_path);
302 session.add_entity_request_handler(Self::handle_open_new_buffer);
303 session.add_entity_request_handler(Self::handle_find_search_candidates);
304 session.add_entity_request_handler(Self::handle_open_server_settings);
305 session.add_entity_request_handler(Self::handle_get_directory_environment);
306 session.add_entity_message_handler(Self::handle_toggle_lsp_logs);
307 session.add_entity_request_handler(Self::handle_open_image_by_path);
308 session.add_entity_request_handler(Self::handle_trust_worktrees);
309 session.add_entity_request_handler(Self::handle_restrict_worktrees);
310 session.add_entity_request_handler(Self::handle_download_file_by_path);
311
312 session.add_entity_message_handler(Self::handle_find_search_candidates_cancel);
313 session.add_entity_request_handler(BufferStore::handle_update_buffer);
314 session.add_entity_message_handler(BufferStore::handle_close_buffer);
315
316 session.add_request_handler(
317 extensions.downgrade(),
318 HeadlessExtensionStore::handle_sync_extensions,
319 );
320 session.add_request_handler(
321 extensions.downgrade(),
322 HeadlessExtensionStore::handle_install_extension,
323 );
324
325 session.add_request_handler(cx.weak_entity(), Self::handle_spawn_kernel);
326 session.add_request_handler(cx.weak_entity(), Self::handle_kill_kernel);
327
328 BufferStore::init(&session);
329 WorktreeStore::init(&session);
330 SettingsObserver::init(&session);
331 LspStore::init(&session);
332 TaskStore::init(Some(&session));
333 ToolchainStore::init(&session);
334 DapStore::init(&session, cx);
335 // todo(debugger): Re init breakpoint store when we set it up for collab
336 BreakpointStore::init(&session);
337 GitStore::init(&session);
338 AgentServerStore::init_headless(&session);
339 ContextServerStore::init_headless(&session);
340
341 HeadlessProject {
342 next_entry_id: Default::default(),
343 session,
344 settings_observer,
345 fs,
346 worktree_store,
347 buffer_store,
348 lsp_store,
349 task_store,
350 dap_store,
351 breakpoint_store,
352 agent_server_store,
353 context_server_store,
354 languages,
355 extensions,
356 git_store,
357 environment,
358 profiling_collector: gpui::ProfilingCollector::new(startup_time),
359 _toolchain_store: toolchain_store,
360 kernels: Default::default(),
361 }
362 }
363
364 fn on_buffer_event(
365 &mut self,
366 buffer: Entity<Buffer>,
367 event: &BufferEvent,
368 cx: &mut Context<Self>,
369 ) {
370 if let BufferEvent::Operation {
371 operation,
372 is_local: true,
373 } = event
374 {
375 cx.background_spawn(self.session.request(proto::UpdateBuffer {
376 project_id: REMOTE_SERVER_PROJECT_ID,
377 buffer_id: buffer.read(cx).remote_id().to_proto(),
378 operations: vec![serialize_operation(operation)],
379 }))
380 .detach()
381 }
382 }
383
384 fn on_lsp_store_event(
385 &mut self,
386 lsp_store: Entity<LspStore>,
387 event: &LspStoreEvent,
388 cx: &mut Context<Self>,
389 ) {
390 match event {
391 LspStoreEvent::LanguageServerAdded(id, name, worktree_id) => {
392 let log_store = cx
393 .try_global::<GlobalLogStore>()
394 .map(|lsp_logs| lsp_logs.0.clone());
395 if let Some(log_store) = log_store {
396 log_store.update(cx, |log_store, cx| {
397 log_store.add_language_server(
398 LanguageServerKind::LocalSsh {
399 lsp_store: self.lsp_store.downgrade(),
400 },
401 *id,
402 Some(name.clone()),
403 *worktree_id,
404 lsp_store.read(cx).language_server_for_id(*id),
405 cx,
406 );
407 });
408 }
409 }
410 LspStoreEvent::LanguageServerRemoved(id) => {
411 let log_store = cx
412 .try_global::<GlobalLogStore>()
413 .map(|lsp_logs| lsp_logs.0.clone());
414 if let Some(log_store) = log_store {
415 log_store.update(cx, |log_store, cx| {
416 log_store.remove_language_server(*id, cx);
417 });
418 }
419 }
420 LspStoreEvent::LanguageServerUpdate {
421 language_server_id,
422 name,
423 message,
424 } => {
425 self.session
426 .send(proto::UpdateLanguageServer {
427 project_id: REMOTE_SERVER_PROJECT_ID,
428 server_name: name.as_ref().map(|name| name.to_string()),
429 language_server_id: language_server_id.to_proto(),
430 variant: Some(message.clone()),
431 })
432 .log_err();
433 }
434 LspStoreEvent::Notification(message) => {
435 self.session
436 .send(proto::Toast {
437 project_id: REMOTE_SERVER_PROJECT_ID,
438 notification_id: "lsp".to_string(),
439 message: message.clone(),
440 })
441 .log_err();
442 }
443 LspStoreEvent::LanguageServerPrompt(prompt) => {
444 let request = self.session.request(proto::LanguageServerPromptRequest {
445 project_id: REMOTE_SERVER_PROJECT_ID,
446 actions: prompt
447 .actions
448 .iter()
449 .map(|action| action.title.to_string())
450 .collect(),
451 level: Some(prompt_to_proto(prompt)),
452 lsp_name: prompt.lsp_name.clone(),
453 message: prompt.message.clone(),
454 });
455 let prompt = prompt.clone();
456 cx.background_spawn(async move {
457 let response = request.await?;
458 if let Some(action_response) = response.action_response {
459 prompt.respond(action_response as usize).await;
460 }
461 anyhow::Ok(())
462 })
463 .detach();
464 }
465 _ => {}
466 }
467 }
468
469 pub async fn handle_add_worktree(
470 this: Entity<Self>,
471 message: TypedEnvelope<proto::AddWorktree>,
472 mut cx: AsyncApp,
473 ) -> Result<proto::AddWorktreeResponse> {
474 use client::ErrorCodeExt;
475 let fs = this.read_with(&cx, |this, _| this.fs.clone());
476 let path = PathBuf::from(shellexpand::tilde(&message.payload.path).to_string());
477
478 let canonicalized = match fs.canonicalize(&path).await {
479 Ok(path) => path,
480 Err(e) => {
481 let mut parent = path
482 .parent()
483 .ok_or(e)
484 .with_context(|| format!("{path:?} does not exist"))?;
485 if parent == Path::new("") {
486 parent = util::paths::home_dir();
487 }
488 let parent = fs.canonicalize(parent).await.map_err(|_| {
489 anyhow!(
490 proto::ErrorCode::DevServerProjectPathDoesNotExist
491 .with_tag("path", path.to_string_lossy().as_ref())
492 )
493 })?;
494 if let Some(file_name) = path.file_name() {
495 parent.join(file_name)
496 } else {
497 parent
498 }
499 }
500 };
501 let next_worktree_id = this
502 .update(&mut cx, |this, cx| {
503 this.worktree_store
504 .update(cx, |worktree_store, _| worktree_store.next_worktree_id())
505 })
506 .await?;
507 let worktree = this
508 .read_with(&cx.clone(), |this, _| {
509 Worktree::local(
510 Arc::from(canonicalized.as_path()),
511 message.payload.visible,
512 this.fs.clone(),
513 this.next_entry_id.clone(),
514 true,
515 next_worktree_id,
516 &mut cx,
517 )
518 })
519 .await?;
520
521 let response = this.read_with(&cx, |_, cx| {
522 let worktree = worktree.read(cx);
523 proto::AddWorktreeResponse {
524 worktree_id: worktree.id().to_proto(),
525 canonicalized_path: canonicalized.to_string_lossy().into_owned(),
526 }
527 });
528
529 // We spawn this asynchronously, so that we can send the response back
530 // *before* `worktree_store.add()` can send out UpdateProject requests
531 // to the client about the new worktree.
532 //
533 // That lets the client manage the reference/handles of the newly-added
534 // worktree, before getting interrupted by an UpdateProject request.
535 //
536 // This fixes the problem of the client sending the AddWorktree request,
537 // headless project sending out a project update, client receiving it
538 // and immediately dropping the reference of the new client, causing it
539 // to be dropped on the headless project, and the client only then
540 // receiving a response to AddWorktree.
541 cx.spawn(async move |cx| {
542 this.update(cx, |this, cx| {
543 this.worktree_store.update(cx, |worktree_store, cx| {
544 worktree_store.add(&worktree, cx);
545 });
546 });
547 })
548 .detach();
549
550 Ok(response)
551 }
552
553 pub async fn handle_remove_worktree(
554 this: Entity<Self>,
555 envelope: TypedEnvelope<proto::RemoveWorktree>,
556 mut cx: AsyncApp,
557 ) -> Result<proto::Ack> {
558 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
559 this.update(&mut cx, |this, cx| {
560 this.worktree_store.update(cx, |worktree_store, cx| {
561 worktree_store.remove_worktree(worktree_id, cx);
562 });
563 });
564 Ok(proto::Ack {})
565 }
566
567 pub async fn handle_open_buffer_by_path(
568 this: Entity<Self>,
569 message: TypedEnvelope<proto::OpenBufferByPath>,
570 mut cx: AsyncApp,
571 ) -> Result<proto::OpenBufferResponse> {
572 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
573 let path = RelPath::from_proto(&message.payload.path)?;
574 let (buffer_store, buffer) = this.update(&mut cx, |this, cx| {
575 let buffer_store = this.buffer_store.clone();
576 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
577 buffer_store.open_buffer(ProjectPath { worktree_id, path }, cx)
578 });
579 (buffer_store, buffer)
580 });
581
582 let buffer = buffer.await?;
583 let buffer_id = buffer.read_with(&cx, |b, _| b.remote_id());
584 buffer_store.update(&mut cx, |buffer_store, cx| {
585 buffer_store
586 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
587 .detach_and_log_err(cx);
588 });
589
590 Ok(proto::OpenBufferResponse {
591 buffer_id: buffer_id.to_proto(),
592 })
593 }
594
595 pub async fn handle_open_image_by_path(
596 this: Entity<Self>,
597 message: TypedEnvelope<proto::OpenImageByPath>,
598 mut cx: AsyncApp,
599 ) -> Result<proto::OpenImageResponse> {
600 static NEXT_ID: AtomicU64 = AtomicU64::new(1);
601 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
602 let path = RelPath::from_proto(&message.payload.path)?;
603 let project_id = message.payload.project_id;
604 use proto::create_image_for_peer::Variant;
605
606 let (worktree_store, session) = this.read_with(&cx, |this, _| {
607 (this.worktree_store.clone(), this.session.clone())
608 });
609
610 let worktree = worktree_store
611 .read_with(&cx, |store, cx| store.worktree_for_id(worktree_id, cx))
612 .context("worktree not found")?;
613
614 let load_task = worktree.update(&mut cx, |worktree, cx| {
615 worktree.load_binary_file(path.as_ref(), cx)
616 });
617
618 let loaded_file = load_task.await?;
619 let content = loaded_file.content;
620 let file = loaded_file.file;
621
622 let proto_file = worktree.read_with(&cx, |_worktree, cx| file.to_proto(cx));
623 let image_id =
624 ImageId::from(NonZeroU64::new(NEXT_ID.fetch_add(1, Ordering::Relaxed)).unwrap());
625
626 let format = image::guess_format(&content)
627 .map(|f| format!("{:?}", f).to_lowercase())
628 .unwrap_or_else(|_| "unknown".to_string());
629
630 let state = proto::ImageState {
631 id: image_id.to_proto(),
632 file: Some(proto_file),
633 content_size: content.len() as u64,
634 format,
635 };
636
637 session.send(proto::CreateImageForPeer {
638 project_id,
639 peer_id: Some(REMOTE_SERVER_PEER_ID),
640 variant: Some(Variant::State(state)),
641 })?;
642
643 const CHUNK_SIZE: usize = 1024 * 1024; // 1MB chunks
644 for chunk in content.chunks(CHUNK_SIZE) {
645 session.send(proto::CreateImageForPeer {
646 project_id,
647 peer_id: Some(REMOTE_SERVER_PEER_ID),
648 variant: Some(Variant::Chunk(proto::ImageChunk {
649 image_id: image_id.to_proto(),
650 data: chunk.to_vec(),
651 })),
652 })?;
653 }
654
655 Ok(proto::OpenImageResponse {
656 image_id: image_id.to_proto(),
657 })
658 }
659
660 pub async fn handle_trust_worktrees(
661 this: Entity<Self>,
662 envelope: TypedEnvelope<proto::TrustWorktrees>,
663 mut cx: AsyncApp,
664 ) -> Result<proto::Ack> {
665 let trusted_worktrees = cx
666 .update(|cx| TrustedWorktrees::try_get_global(cx))
667 .context("missing trusted worktrees")?;
668 let worktree_store = this.read_with(&cx, |project, _| project.worktree_store.clone());
669 trusted_worktrees.update(&mut cx, |trusted_worktrees, cx| {
670 trusted_worktrees.trust(
671 &worktree_store,
672 envelope
673 .payload
674 .trusted_paths
675 .into_iter()
676 .filter_map(PathTrust::from_proto)
677 .collect(),
678 cx,
679 );
680 });
681 Ok(proto::Ack {})
682 }
683
684 pub async fn handle_restrict_worktrees(
685 this: Entity<Self>,
686 envelope: TypedEnvelope<proto::RestrictWorktrees>,
687 mut cx: AsyncApp,
688 ) -> Result<proto::Ack> {
689 let trusted_worktrees = cx
690 .update(|cx| TrustedWorktrees::try_get_global(cx))
691 .context("missing trusted worktrees")?;
692 let worktree_store = this.read_with(&cx, |project, _| project.worktree_store.downgrade());
693 trusted_worktrees.update(&mut cx, |trusted_worktrees, cx| {
694 let restricted_paths = envelope
695 .payload
696 .worktree_ids
697 .into_iter()
698 .map(WorktreeId::from_proto)
699 .map(PathTrust::Worktree)
700 .collect::<HashSet<_>>();
701 trusted_worktrees.restrict(worktree_store, restricted_paths, cx);
702 });
703 Ok(proto::Ack {})
704 }
705
706 pub async fn handle_download_file_by_path(
707 this: Entity<Self>,
708 message: TypedEnvelope<proto::DownloadFileByPath>,
709 mut cx: AsyncApp,
710 ) -> Result<proto::DownloadFileResponse> {
711 log::debug!(
712 "handle_download_file_by_path: received request: {:?}",
713 message.payload
714 );
715
716 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
717 let path = RelPath::from_proto(&message.payload.path)?;
718 let project_id = message.payload.project_id;
719 let file_id = message.payload.file_id;
720 log::debug!(
721 "handle_download_file_by_path: worktree_id={:?}, path={:?}, file_id={}",
722 worktree_id,
723 path,
724 file_id
725 );
726 use proto::create_file_for_peer::Variant;
727
728 let (worktree_store, session): (Entity<WorktreeStore>, AnyProtoClient) = this
729 .read_with(&cx, |this, _| {
730 (this.worktree_store.clone(), this.session.clone())
731 });
732
733 let worktree = worktree_store
734 .read_with(&cx, |store, cx| store.worktree_for_id(worktree_id, cx))
735 .context("worktree not found")?;
736
737 let download_task = worktree.update(&mut cx, |worktree: &mut Worktree, cx| {
738 worktree.load_binary_file(path.as_ref(), cx)
739 });
740
741 let downloaded_file = download_task.await?;
742 let content = downloaded_file.content;
743 let file = downloaded_file.file;
744 log::debug!(
745 "handle_download_file_by_path: file loaded, content_size={}",
746 content.len()
747 );
748
749 let proto_file = worktree.read_with(&cx, |_worktree: &Worktree, cx| file.to_proto(cx));
750 log::debug!(
751 "handle_download_file_by_path: using client-provided file_id={}",
752 file_id
753 );
754
755 let state = proto::FileState {
756 id: file_id,
757 file: Some(proto_file),
758 content_size: content.len() as u64,
759 };
760
761 log::debug!("handle_download_file_by_path: sending State message");
762 session.send(proto::CreateFileForPeer {
763 project_id,
764 peer_id: Some(REMOTE_SERVER_PEER_ID),
765 variant: Some(Variant::State(state)),
766 })?;
767
768 const CHUNK_SIZE: usize = 1024 * 1024; // 1MB chunks
769 let num_chunks = content.len().div_ceil(CHUNK_SIZE);
770 log::debug!(
771 "handle_download_file_by_path: sending {} chunks",
772 num_chunks
773 );
774 for (i, chunk) in content.chunks(CHUNK_SIZE).enumerate() {
775 log::trace!(
776 "handle_download_file_by_path: sending chunk {}/{}, size={}",
777 i + 1,
778 num_chunks,
779 chunk.len()
780 );
781 session.send(proto::CreateFileForPeer {
782 project_id,
783 peer_id: Some(REMOTE_SERVER_PEER_ID),
784 variant: Some(Variant::Chunk(proto::FileChunk {
785 file_id,
786 data: chunk.to_vec(),
787 })),
788 })?;
789 }
790
791 log::debug!(
792 "handle_download_file_by_path: returning file_id={}",
793 file_id
794 );
795 Ok(proto::DownloadFileResponse { file_id })
796 }
797
798 pub async fn handle_open_new_buffer(
799 this: Entity<Self>,
800 _message: TypedEnvelope<proto::OpenNewBuffer>,
801 mut cx: AsyncApp,
802 ) -> Result<proto::OpenBufferResponse> {
803 let (buffer_store, buffer) = this.update(&mut cx, |this, cx| {
804 let buffer_store = this.buffer_store.clone();
805 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
806 buffer_store.create_buffer(None, true, cx)
807 });
808 (buffer_store, buffer)
809 });
810
811 let buffer = buffer.await?;
812 let buffer_id = buffer.read_with(&cx, |b, _| b.remote_id());
813 buffer_store.update(&mut cx, |buffer_store, cx| {
814 buffer_store
815 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
816 .detach_and_log_err(cx);
817 });
818
819 Ok(proto::OpenBufferResponse {
820 buffer_id: buffer_id.to_proto(),
821 })
822 }
823
824 async fn handle_toggle_lsp_logs(
825 _: Entity<Self>,
826 envelope: TypedEnvelope<proto::ToggleLspLogs>,
827 cx: AsyncApp,
828 ) -> Result<()> {
829 let server_id = LanguageServerId::from_proto(envelope.payload.server_id);
830 cx.update(|cx| {
831 let log_store = cx
832 .try_global::<GlobalLogStore>()
833 .map(|global_log_store| global_log_store.0.clone())
834 .context("lsp logs store is missing")?;
835 let toggled_log_kind =
836 match proto::toggle_lsp_logs::LogType::from_i32(envelope.payload.log_type)
837 .context("invalid log type")?
838 {
839 proto::toggle_lsp_logs::LogType::Log => LogKind::Logs,
840 proto::toggle_lsp_logs::LogType::Trace => LogKind::Trace,
841 proto::toggle_lsp_logs::LogType::Rpc => LogKind::Rpc,
842 };
843 log_store.update(cx, |log_store, _| {
844 log_store.toggle_lsp_logs(server_id, envelope.payload.enabled, toggled_log_kind);
845 });
846 anyhow::Ok(())
847 })?;
848
849 Ok(())
850 }
851
852 async fn handle_open_server_settings(
853 this: Entity<Self>,
854 _: TypedEnvelope<proto::OpenServerSettings>,
855 mut cx: AsyncApp,
856 ) -> Result<proto::OpenBufferResponse> {
857 let settings_path = paths::settings_file();
858 let (worktree, path) = this
859 .update(&mut cx, |this, cx| {
860 this.worktree_store.update(cx, |worktree_store, cx| {
861 worktree_store.find_or_create_worktree(settings_path, false, cx)
862 })
863 })
864 .await?;
865
866 let (buffer, buffer_store) = this.update(&mut cx, |this, cx| {
867 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
868 buffer_store.open_buffer(
869 ProjectPath {
870 worktree_id: worktree.read(cx).id(),
871 path,
872 },
873 cx,
874 )
875 });
876
877 (buffer, this.buffer_store.clone())
878 });
879
880 let buffer = buffer.await?;
881
882 let buffer_id = cx.update(|cx| {
883 if buffer.read(cx).is_empty() {
884 buffer.update(cx, |buffer, cx| {
885 buffer.edit([(0..0, initial_server_settings_content())], None, cx)
886 });
887 }
888
889 let buffer_id = buffer.read(cx).remote_id();
890
891 buffer_store.update(cx, |buffer_store, cx| {
892 buffer_store
893 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
894 .detach_and_log_err(cx);
895 });
896
897 buffer_id
898 });
899
900 Ok(proto::OpenBufferResponse {
901 buffer_id: buffer_id.to_proto(),
902 })
903 }
904
905 async fn handle_spawn_kernel(
906 this: Entity<Self>,
907 envelope: TypedEnvelope<proto::SpawnKernel>,
908 cx: AsyncApp,
909 ) -> Result<proto::SpawnKernelResponse> {
910 let fs = this.update(&mut cx.clone(), |this, _| this.fs.clone());
911
912 let mut ports = Vec::new();
913 for _ in 0..5 {
914 let listener = std::net::TcpListener::bind("127.0.0.1:0")?;
915 let port = listener.local_addr()?.port();
916 ports.push(port);
917 }
918
919 let connection_info = serde_json::json!({
920 "shell_port": ports[0],
921 "iopub_port": ports[1],
922 "stdin_port": ports[2],
923 "control_port": ports[3],
924 "hb_port": ports[4],
925 "ip": "127.0.0.1",
926 "key": uuid::Uuid::new_v4().to_string(),
927 "transport": "tcp",
928 "signature_scheme": "hmac-sha256",
929 "kernel_name": envelope.payload.kernel_name,
930 });
931
932 let connection_file_content = serde_json::to_string_pretty(&connection_info)?;
933 let kernel_id = uuid::Uuid::new_v4().to_string();
934
935 let connection_file_path = std::env::temp_dir().join(format!("kernel-{}.json", kernel_id));
936 fs.save(
937 &connection_file_path,
938 &connection_file_content.as_str().into(),
939 language::LineEnding::Unix,
940 )
941 .await?;
942
943 let working_directory = if envelope.payload.working_directory.is_empty() {
944 std::env::current_dir()
945 .ok()
946 .map(|p| p.to_string_lossy().into_owned())
947 } else {
948 Some(envelope.payload.working_directory)
949 };
950
951 // Spawn kernel (Assuming python for now, or we'd need to parse kernelspec logic here or pass the command)
952
953 // Spawn kernel
954 let spawn_kernel = |binary: &str, args: &[String]| {
955 let mut command = smol::process::Command::new(binary);
956
957 if !args.is_empty() {
958 for arg in args {
959 if arg == "{connection_file}" {
960 command.arg(&connection_file_path);
961 } else {
962 command.arg(arg);
963 }
964 }
965 } else {
966 command
967 .arg("-m")
968 .arg("ipykernel_launcher")
969 .arg("-f")
970 .arg(&connection_file_path);
971 }
972
973 // This ensures subprocesses spawned from the kernel use the correct Python environment
974 let python_bin_dir = std::path::Path::new(binary).parent();
975 if let Some(bin_dir) = python_bin_dir {
976 if let Some(path_var) = std::env::var_os("PATH") {
977 let mut paths = std::env::split_paths(&path_var).collect::<Vec<_>>();
978 paths.insert(0, bin_dir.to_path_buf());
979 if let Ok(new_path) = std::env::join_paths(paths) {
980 command.env("PATH", new_path);
981 }
982 }
983
984 if let Some(venv_root) = bin_dir.parent() {
985 command.env("VIRTUAL_ENV", venv_root.to_string_lossy().to_string());
986 }
987 }
988
989 if let Some(wd) = &working_directory {
990 command.current_dir(wd);
991 }
992 command.spawn()
993 };
994
995 // We need to manage the child process lifecycle
996 let child = if !envelope.payload.command.is_empty() {
997 spawn_kernel(&envelope.payload.command, &envelope.payload.args).context(format!(
998 "failed to spawn kernel process (command: {})",
999 envelope.payload.command
1000 ))?
1001 } else {
1002 spawn_kernel("python3", &[])
1003 .or_else(|_| spawn_kernel("python", &[]))
1004 .context("failed to spawn kernel process (tried python3 and python)")?
1005 };
1006
1007 this.update(&mut cx.clone(), |this, _cx| {
1008 this.kernels.insert(kernel_id.clone(), child);
1009 });
1010
1011 Ok(proto::SpawnKernelResponse {
1012 kernel_id,
1013 connection_file: connection_file_content,
1014 })
1015 }
1016
1017 async fn handle_kill_kernel(
1018 this: Entity<Self>,
1019 envelope: TypedEnvelope<proto::KillKernel>,
1020 mut cx: AsyncApp,
1021 ) -> Result<proto::Ack> {
1022 let kernel_id = envelope.payload.kernel_id;
1023 let child = this.update(&mut cx, |this, _| this.kernels.remove(&kernel_id));
1024 if let Some(mut child) = child {
1025 child.kill().log_err();
1026 }
1027 Ok(proto::Ack {})
1028 }
1029
1030 async fn handle_find_search_candidates(
1031 this: Entity<Self>,
1032 envelope: TypedEnvelope<proto::FindSearchCandidates>,
1033 mut cx: AsyncApp,
1034 ) -> Result<proto::Ack> {
1035 use futures::stream::StreamExt as _;
1036
1037 let peer_id = envelope.original_sender_id.unwrap_or(envelope.sender_id);
1038 let message = envelope.payload;
1039 let query = SearchQuery::from_proto(
1040 message.query.context("missing query field")?,
1041 PathStyle::local(),
1042 )?;
1043
1044 let project_id = message.project_id;
1045 let buffer_store = this.read_with(&cx, |this, _| this.buffer_store.clone());
1046 let handle = message.handle;
1047 let _buffer_store = buffer_store.clone();
1048 let client = this.read_with(&cx, |this, _| this.session.clone());
1049 let task = cx.spawn(async move |cx| {
1050 let results = this.update(cx, |this, cx| {
1051 project::Search::local(
1052 this.fs.clone(),
1053 this.buffer_store.clone(),
1054 this.worktree_store.clone(),
1055 message.limit as _,
1056 cx,
1057 )
1058 .into_handle(query, cx)
1059 .matching_buffers(cx)
1060 });
1061 let (batcher, batches) =
1062 project::project_search::AdaptiveBatcher::new(cx.background_executor());
1063 let mut new_matches = Box::pin(results.rx);
1064
1065 let sender_task = cx.background_executor().spawn({
1066 let client = client.clone();
1067 async move {
1068 let mut batches = std::pin::pin!(batches);
1069 while let Some(buffer_ids) = batches.next().await {
1070 client
1071 .request(proto::FindSearchCandidatesChunk {
1072 handle,
1073 peer_id: Some(peer_id),
1074 project_id,
1075 variant: Some(
1076 proto::find_search_candidates_chunk::Variant::Matches(
1077 proto::FindSearchCandidatesMatches { buffer_ids },
1078 ),
1079 ),
1080 })
1081 .await?;
1082 }
1083 anyhow::Ok(())
1084 }
1085 });
1086
1087 while let Some(buffer) = new_matches.next().await {
1088 let _ = buffer_store
1089 .update(cx, |this, cx| {
1090 this.create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
1091 })
1092 .await;
1093 let buffer_id = buffer.read_with(cx, |this, _| this.remote_id().to_proto());
1094 batcher.push(buffer_id).await;
1095 }
1096 batcher.flush().await;
1097
1098 sender_task.await?;
1099
1100 client
1101 .request(proto::FindSearchCandidatesChunk {
1102 handle,
1103 peer_id: Some(peer_id),
1104 project_id,
1105 variant: Some(proto::find_search_candidates_chunk::Variant::Done(
1106 proto::FindSearchCandidatesDone {},
1107 )),
1108 })
1109 .await?;
1110 anyhow::Ok(())
1111 });
1112 _buffer_store.update(&mut cx, |this, _| {
1113 this.register_ongoing_project_search((peer_id, handle), task);
1114 });
1115
1116 Ok(proto::Ack {})
1117 }
1118
1119 // Goes from client to host.
1120 async fn handle_find_search_candidates_cancel(
1121 this: Entity<Self>,
1122 envelope: TypedEnvelope<proto::FindSearchCandidatesCancelled>,
1123 mut cx: AsyncApp,
1124 ) -> Result<()> {
1125 let buffer_store = this.read_with(&mut cx, |this, _| this.buffer_store.clone());
1126 BufferStore::handle_find_search_candidates_cancel(buffer_store, envelope, cx).await
1127 }
1128
1129 async fn handle_list_remote_directory(
1130 this: Entity<Self>,
1131 envelope: TypedEnvelope<proto::ListRemoteDirectory>,
1132 cx: AsyncApp,
1133 ) -> Result<proto::ListRemoteDirectoryResponse> {
1134 use smol::stream::StreamExt;
1135 let fs = cx.read_entity(&this, |this, _| this.fs.clone());
1136 let expanded = PathBuf::from(shellexpand::tilde(&envelope.payload.path).to_string());
1137 let check_info = envelope
1138 .payload
1139 .config
1140 .as_ref()
1141 .is_some_and(|config| config.is_dir);
1142
1143 let mut entries = Vec::new();
1144 let mut entry_info = Vec::new();
1145 let mut response = fs.read_dir(&expanded).await?;
1146 while let Some(path) = response.next().await {
1147 let path = path?;
1148 if let Some(file_name) = path.file_name() {
1149 entries.push(file_name.to_string_lossy().into_owned());
1150 if check_info {
1151 let is_dir = fs.is_dir(&path).await;
1152 entry_info.push(proto::EntryInfo { is_dir });
1153 }
1154 }
1155 }
1156 Ok(proto::ListRemoteDirectoryResponse {
1157 entries,
1158 entry_info,
1159 })
1160 }
1161
1162 async fn handle_get_path_metadata(
1163 this: Entity<Self>,
1164 envelope: TypedEnvelope<proto::GetPathMetadata>,
1165 cx: AsyncApp,
1166 ) -> Result<proto::GetPathMetadataResponse> {
1167 let fs = cx.read_entity(&this, |this, _| this.fs.clone());
1168 let expanded = PathBuf::from(shellexpand::tilde(&envelope.payload.path).to_string());
1169
1170 let metadata = fs.metadata(&expanded).await?;
1171 let is_dir = metadata.map(|metadata| metadata.is_dir).unwrap_or(false);
1172
1173 Ok(proto::GetPathMetadataResponse {
1174 exists: metadata.is_some(),
1175 is_dir,
1176 path: expanded.to_string_lossy().into_owned(),
1177 })
1178 }
1179
1180 async fn handle_shutdown_remote_server(
1181 _this: Entity<Self>,
1182 _envelope: TypedEnvelope<proto::ShutdownRemoteServer>,
1183 cx: AsyncApp,
1184 ) -> Result<proto::Ack> {
1185 cx.spawn(async move |cx| {
1186 cx.update(|cx| {
1187 // TODO: This is a hack, because in a headless project, shutdown isn't executed
1188 // when calling quit, but it should be.
1189 cx.shutdown();
1190 cx.quit();
1191 })
1192 })
1193 .detach();
1194
1195 Ok(proto::Ack {})
1196 }
1197
1198 pub async fn handle_ping(
1199 _this: Entity<Self>,
1200 _envelope: TypedEnvelope<proto::Ping>,
1201 _cx: AsyncApp,
1202 ) -> Result<proto::Ack> {
1203 log::debug!("Received ping from client");
1204 Ok(proto::Ack {})
1205 }
1206
1207 async fn handle_get_processes(
1208 _this: Entity<Self>,
1209 _envelope: TypedEnvelope<proto::GetProcesses>,
1210 _cx: AsyncApp,
1211 ) -> Result<proto::GetProcessesResponse> {
1212 let mut processes = Vec::new();
1213 let refresh_kind = RefreshKind::nothing().with_processes(
1214 ProcessRefreshKind::nothing()
1215 .without_tasks()
1216 .with_cmd(UpdateKind::Always),
1217 );
1218
1219 for process in System::new_with_specifics(refresh_kind)
1220 .processes()
1221 .values()
1222 {
1223 let name = process.name().to_string_lossy().into_owned();
1224 let command = process
1225 .cmd()
1226 .iter()
1227 .map(|s| s.to_string_lossy().into_owned())
1228 .collect::<Vec<_>>();
1229
1230 processes.push(proto::ProcessInfo {
1231 pid: process.pid().as_u32(),
1232 name,
1233 command,
1234 });
1235 }
1236
1237 processes.sort_by_key(|p| p.name.clone());
1238
1239 Ok(proto::GetProcessesResponse { processes })
1240 }
1241
1242 async fn handle_get_remote_profiling_data(
1243 this: Entity<Self>,
1244 envelope: TypedEnvelope<proto::GetRemoteProfilingData>,
1245 cx: AsyncApp,
1246 ) -> Result<proto::GetRemoteProfilingDataResponse> {
1247 let foreground_only = envelope.payload.foreground_only;
1248
1249 let (deltas, now_nanos) = cx.update(|cx| {
1250 let dispatcher = cx.foreground_executor().dispatcher();
1251 let timings = if foreground_only {
1252 vec![dispatcher.get_current_thread_timings()]
1253 } else {
1254 dispatcher.get_all_timings()
1255 };
1256 this.update(cx, |this, _cx| {
1257 let deltas = this.profiling_collector.collect_unseen(timings);
1258 let now_nanos = Instant::now()
1259 .duration_since(this.profiling_collector.startup_time())
1260 .as_nanos() as u64;
1261 (deltas, now_nanos)
1262 })
1263 });
1264
1265 let threads = deltas
1266 .into_iter()
1267 .map(|delta| proto::RemoteProfilingThread {
1268 thread_name: delta.thread_name,
1269 thread_id: delta.thread_id,
1270 timings: delta
1271 .new_timings
1272 .into_iter()
1273 .map(|t| proto::RemoteProfilingTiming {
1274 location: Some(proto::RemoteProfilingLocation {
1275 file: t.location.file.to_string(),
1276 line: t.location.line,
1277 column: t.location.column,
1278 }),
1279 start_nanos: t.start as u64,
1280 duration_nanos: t.duration as u64,
1281 })
1282 .collect(),
1283 })
1284 .collect();
1285
1286 Ok(proto::GetRemoteProfilingDataResponse { threads, now_nanos })
1287 }
1288
1289 async fn handle_get_directory_environment(
1290 this: Entity<Self>,
1291 envelope: TypedEnvelope<proto::GetDirectoryEnvironment>,
1292 mut cx: AsyncApp,
1293 ) -> Result<proto::DirectoryEnvironment> {
1294 let shell = task::shell_from_proto(envelope.payload.shell.context("missing shell")?)?;
1295 let directory = PathBuf::from(envelope.payload.directory);
1296 let environment = this
1297 .update(&mut cx, |this, cx| {
1298 this.environment.update(cx, |environment, cx| {
1299 environment.local_directory_environment(&shell, directory.into(), cx)
1300 })
1301 })
1302 .await
1303 .context("failed to get directory environment")?
1304 .into_iter()
1305 .collect();
1306 Ok(proto::DirectoryEnvironment { environment })
1307 }
1308}
1309
1310fn prompt_to_proto(
1311 prompt: &project::LanguageServerPromptRequest,
1312) -> proto::language_server_prompt_request::Level {
1313 match prompt.level {
1314 PromptLevel::Info => proto::language_server_prompt_request::Level::Info(
1315 proto::language_server_prompt_request::Info {},
1316 ),
1317 PromptLevel::Warning => proto::language_server_prompt_request::Level::Warning(
1318 proto::language_server_prompt_request::Warning {},
1319 ),
1320 PromptLevel::Critical => proto::language_server_prompt_request::Level::Critical(
1321 proto::language_server_prompt_request::Critical {},
1322 ),
1323 }
1324}