1use anyhow::{Context as _, Result, anyhow};
2use client::ProjectId;
3use collections::HashMap;
4use collections::HashSet;
5use language::File;
6use lsp::LanguageServerId;
7
8use extension::ExtensionHostProxy;
9use extension_host::headless_host::HeadlessExtensionStore;
10use fs::Fs;
11use gpui::{App, AppContext as _, AsyncApp, Context, Entity, PromptLevel};
12use http_client::HttpClient;
13use language::{Buffer, BufferEvent, LanguageRegistry, proto::serialize_operation};
14use node_runtime::NodeRuntime;
15use project::{
16 AgentRegistryStore, LspStore, LspStoreEvent, ManifestTree, PrettierStore, ProjectEnvironment,
17 ProjectPath, ToolchainStore, WorktreeId,
18 agent_server_store::AgentServerStore,
19 buffer_store::{BufferStore, BufferStoreEvent},
20 context_server_store::ContextServerStore,
21 debugger::{breakpoint_store::BreakpointStore, dap_store::DapStore},
22 git_store::GitStore,
23 image_store::ImageId,
24 lsp_store::log_store::{self, GlobalLogStore, LanguageServerKind, LogKind},
25 project_settings::SettingsObserver,
26 search::SearchQuery,
27 task_store::TaskStore,
28 trusted_worktrees::{PathTrust, RemoteHostLocation, TrustedWorktrees},
29 worktree_store::{WorktreeIdCounter, WorktreeStore},
30};
31use rpc::{
32 AnyProtoClient, TypedEnvelope,
33 proto::{self, REMOTE_SERVER_PEER_ID, REMOTE_SERVER_PROJECT_ID},
34};
35use smol::process::Child;
36
37use settings::initial_server_settings_content;
38use std::{
39 num::NonZeroU64,
40 path::{Path, PathBuf},
41 sync::{
42 Arc,
43 atomic::{AtomicU64, AtomicUsize, Ordering},
44 },
45 time::Instant,
46};
47use sysinfo::{ProcessRefreshKind, RefreshKind, System, UpdateKind};
48use util::{ResultExt, paths::PathStyle, rel_path::RelPath};
49use worktree::Worktree;
50
51pub struct HeadlessProject {
52 pub fs: Arc<dyn Fs>,
53 pub session: AnyProtoClient,
54 pub worktree_store: Entity<WorktreeStore>,
55 pub buffer_store: Entity<BufferStore>,
56 pub lsp_store: Entity<LspStore>,
57 pub task_store: Entity<TaskStore>,
58 pub dap_store: Entity<DapStore>,
59 pub breakpoint_store: Entity<BreakpointStore>,
60 pub agent_server_store: Entity<AgentServerStore>,
61 pub context_server_store: Entity<ContextServerStore>,
62 pub settings_observer: Entity<SettingsObserver>,
63 pub next_entry_id: Arc<AtomicUsize>,
64 pub languages: Arc<LanguageRegistry>,
65 pub extensions: Entity<HeadlessExtensionStore>,
66 pub git_store: Entity<GitStore>,
67 pub environment: Entity<ProjectEnvironment>,
68 pub profiling_collector: gpui::ProfilingCollector,
69 // Used mostly to keep alive the toolchain store for RPC handlers.
70 // Local variant is used within LSP store, but that's a separate entity.
71 pub _toolchain_store: Entity<ToolchainStore>,
72 pub kernels: HashMap<String, Child>,
73}
74
75pub struct HeadlessAppState {
76 pub session: AnyProtoClient,
77 pub fs: Arc<dyn Fs>,
78 pub http_client: Arc<dyn HttpClient>,
79 pub node_runtime: NodeRuntime,
80 pub languages: Arc<LanguageRegistry>,
81 pub extension_host_proxy: Arc<ExtensionHostProxy>,
82 pub startup_time: Instant,
83}
84
85impl HeadlessProject {
86 pub fn init(cx: &mut App) {
87 settings::init(cx);
88 log_store::init(true, cx);
89 }
90
91 pub fn new(
92 HeadlessAppState {
93 session,
94 fs,
95 http_client,
96 node_runtime,
97 languages,
98 extension_host_proxy: proxy,
99 startup_time,
100 }: HeadlessAppState,
101 init_worktree_trust: bool,
102 cx: &mut Context<Self>,
103 ) -> Self {
104 debug_adapter_extension::init(proxy.clone(), cx);
105 languages::init(languages.clone(), fs.clone(), node_runtime.clone(), cx);
106
107 let worktree_store = cx.new(|cx| {
108 let mut store = WorktreeStore::local(true, fs.clone(), WorktreeIdCounter::get(cx));
109 store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
110 store
111 });
112
113 if init_worktree_trust {
114 project::trusted_worktrees::track_worktree_trust(
115 worktree_store.clone(),
116 None::<RemoteHostLocation>,
117 Some((session.clone(), ProjectId(REMOTE_SERVER_PROJECT_ID))),
118 None,
119 cx,
120 );
121 }
122
123 let environment =
124 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, true, cx));
125 let manifest_tree = ManifestTree::new(worktree_store.clone(), cx);
126 let toolchain_store = cx.new(|cx| {
127 ToolchainStore::local(
128 languages.clone(),
129 worktree_store.clone(),
130 environment.clone(),
131 manifest_tree.clone(),
132 cx,
133 )
134 });
135
136 let buffer_store = cx.new(|cx| {
137 let mut buffer_store = BufferStore::local(worktree_store.clone(), cx);
138 buffer_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
139 buffer_store
140 });
141
142 let breakpoint_store = cx.new(|_| {
143 let mut breakpoint_store =
144 BreakpointStore::local(worktree_store.clone(), buffer_store.clone());
145 breakpoint_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone());
146
147 breakpoint_store
148 });
149
150 let dap_store = cx.new(|cx| {
151 let mut dap_store = DapStore::new_local(
152 http_client.clone(),
153 node_runtime.clone(),
154 fs.clone(),
155 environment.clone(),
156 toolchain_store.read(cx).as_language_toolchain_store(),
157 worktree_store.clone(),
158 breakpoint_store.clone(),
159 true,
160 cx,
161 );
162 dap_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
163 dap_store
164 });
165
166 let git_store = cx.new(|cx| {
167 let mut store = GitStore::local(
168 &worktree_store,
169 buffer_store.clone(),
170 environment.clone(),
171 fs.clone(),
172 cx,
173 );
174 store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
175 store
176 });
177
178 let prettier_store = cx.new(|cx| {
179 PrettierStore::new(
180 node_runtime.clone(),
181 fs.clone(),
182 languages.clone(),
183 worktree_store.clone(),
184 cx,
185 )
186 });
187
188 let task_store = cx.new(|cx| {
189 let mut task_store = TaskStore::local(
190 buffer_store.downgrade(),
191 worktree_store.clone(),
192 toolchain_store.read(cx).as_language_toolchain_store(),
193 environment.clone(),
194 git_store.clone(),
195 cx,
196 );
197 task_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
198 task_store
199 });
200 let settings_observer = cx.new(|cx| {
201 let mut observer = SettingsObserver::new_local(
202 fs.clone(),
203 worktree_store.clone(),
204 task_store.clone(),
205 true,
206 cx,
207 );
208 observer.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
209 observer
210 });
211
212 let lsp_store = cx.new(|cx| {
213 let mut lsp_store = LspStore::new_local(
214 buffer_store.clone(),
215 worktree_store.clone(),
216 prettier_store.clone(),
217 toolchain_store
218 .read(cx)
219 .as_local_store()
220 .expect("Toolchain store to be local")
221 .clone(),
222 environment.clone(),
223 manifest_tree,
224 languages.clone(),
225 http_client.clone(),
226 fs.clone(),
227 cx,
228 );
229 lsp_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
230 lsp_store
231 });
232
233 AgentRegistryStore::init_global(cx, fs.clone(), http_client.clone());
234
235 let agent_server_store = cx.new(|cx| {
236 let mut agent_server_store = AgentServerStore::local(
237 node_runtime.clone(),
238 fs.clone(),
239 environment.clone(),
240 http_client.clone(),
241 cx,
242 );
243 agent_server_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone(), cx);
244 agent_server_store
245 });
246
247 let context_server_store = cx.new(|cx| {
248 let mut context_server_store =
249 ContextServerStore::local(worktree_store.clone(), None, true, cx);
250 context_server_store.shared(REMOTE_SERVER_PROJECT_ID, session.clone());
251 context_server_store
252 });
253
254 cx.subscribe(&lsp_store, Self::on_lsp_store_event).detach();
255 language_extension::init(
256 language_extension::LspAccess::ViaLspStore(lsp_store.clone()),
257 proxy.clone(),
258 languages.clone(),
259 );
260
261 cx.subscribe(&buffer_store, |_this, _buffer_store, event, cx| {
262 if let BufferStoreEvent::BufferAdded(buffer) = event {
263 cx.subscribe(buffer, Self::on_buffer_event).detach();
264 }
265 })
266 .detach();
267
268 let extensions = HeadlessExtensionStore::new(
269 fs.clone(),
270 http_client.clone(),
271 paths::remote_extensions_dir().to_path_buf(),
272 proxy,
273 node_runtime,
274 cx,
275 );
276
277 // local_machine -> ssh handlers
278 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &worktree_store);
279 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &buffer_store);
280 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &cx.entity());
281 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &lsp_store);
282 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &task_store);
283 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &toolchain_store);
284 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &dap_store);
285 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &breakpoint_store);
286 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &settings_observer);
287 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &git_store);
288 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &agent_server_store);
289 session.subscribe_to_entity(REMOTE_SERVER_PROJECT_ID, &context_server_store);
290
291 session.add_request_handler(cx.weak_entity(), Self::handle_list_remote_directory);
292 session.add_request_handler(cx.weak_entity(), Self::handle_get_path_metadata);
293 session.add_request_handler(cx.weak_entity(), Self::handle_shutdown_remote_server);
294 session.add_request_handler(cx.weak_entity(), Self::handle_ping);
295 session.add_request_handler(cx.weak_entity(), Self::handle_get_processes);
296 session.add_request_handler(cx.weak_entity(), Self::handle_get_remote_profiling_data);
297
298 session.add_entity_request_handler(Self::handle_add_worktree);
299 session.add_request_handler(cx.weak_entity(), Self::handle_remove_worktree);
300
301 session.add_entity_request_handler(Self::handle_open_buffer_by_path);
302 session.add_entity_request_handler(Self::handle_open_new_buffer);
303 session.add_entity_request_handler(Self::handle_find_search_candidates);
304 session.add_entity_request_handler(Self::handle_open_server_settings);
305 session.add_entity_request_handler(Self::handle_get_directory_environment);
306 session.add_entity_message_handler(Self::handle_toggle_lsp_logs);
307 session.add_entity_request_handler(Self::handle_open_image_by_path);
308 session.add_entity_request_handler(Self::handle_trust_worktrees);
309 session.add_entity_request_handler(Self::handle_restrict_worktrees);
310 session.add_entity_request_handler(Self::handle_download_file_by_path);
311
312 session.add_entity_message_handler(Self::handle_find_search_candidates_cancel);
313 session.add_entity_request_handler(BufferStore::handle_update_buffer);
314 session.add_entity_message_handler(BufferStore::handle_close_buffer);
315
316 session.add_request_handler(
317 extensions.downgrade(),
318 HeadlessExtensionStore::handle_sync_extensions,
319 );
320 session.add_request_handler(
321 extensions.downgrade(),
322 HeadlessExtensionStore::handle_install_extension,
323 );
324
325 session.add_request_handler(cx.weak_entity(), Self::handle_spawn_kernel);
326 session.add_request_handler(cx.weak_entity(), Self::handle_kill_kernel);
327
328 BufferStore::init(&session);
329 WorktreeStore::init(&session);
330 SettingsObserver::init(&session);
331 LspStore::init(&session);
332 TaskStore::init(Some(&session));
333 ToolchainStore::init(&session);
334 DapStore::init(&session, cx);
335 // todo(debugger): Re init breakpoint store when we set it up for collab
336 BreakpointStore::init(&session);
337 GitStore::init(&session);
338 AgentServerStore::init_headless(&session);
339 ContextServerStore::init_headless(&session);
340
341 HeadlessProject {
342 next_entry_id: Default::default(),
343 session,
344 settings_observer,
345 fs,
346 worktree_store,
347 buffer_store,
348 lsp_store,
349 task_store,
350 dap_store,
351 breakpoint_store,
352 agent_server_store,
353 context_server_store,
354 languages,
355 extensions,
356 git_store,
357 environment,
358 profiling_collector: gpui::ProfilingCollector::new(startup_time),
359 _toolchain_store: toolchain_store,
360 kernels: Default::default(),
361 }
362 }
363
364 fn on_buffer_event(
365 &mut self,
366 buffer: Entity<Buffer>,
367 event: &BufferEvent,
368 cx: &mut Context<Self>,
369 ) {
370 if let BufferEvent::Operation {
371 operation,
372 is_local: true,
373 } = event
374 {
375 cx.background_spawn(self.session.request(proto::UpdateBuffer {
376 project_id: REMOTE_SERVER_PROJECT_ID,
377 buffer_id: buffer.read(cx).remote_id().to_proto(),
378 operations: vec![serialize_operation(operation)],
379 }))
380 .detach()
381 }
382 }
383
384 fn on_lsp_store_event(
385 &mut self,
386 lsp_store: Entity<LspStore>,
387 event: &LspStoreEvent,
388 cx: &mut Context<Self>,
389 ) {
390 match event {
391 LspStoreEvent::LanguageServerAdded(id, name, worktree_id) => {
392 let log_store = cx
393 .try_global::<GlobalLogStore>()
394 .map(|lsp_logs| lsp_logs.0.clone());
395 if let Some(log_store) = log_store {
396 log_store.update(cx, |log_store, cx| {
397 log_store.add_language_server(
398 LanguageServerKind::LocalSsh {
399 lsp_store: self.lsp_store.downgrade(),
400 },
401 *id,
402 Some(name.clone()),
403 *worktree_id,
404 lsp_store.read(cx).language_server_for_id(*id),
405 cx,
406 );
407 });
408 }
409 }
410 LspStoreEvent::LanguageServerRemoved(id) => {
411 let log_store = cx
412 .try_global::<GlobalLogStore>()
413 .map(|lsp_logs| lsp_logs.0.clone());
414 if let Some(log_store) = log_store {
415 log_store.update(cx, |log_store, cx| {
416 log_store.remove_language_server(*id, cx);
417 });
418 }
419 }
420 LspStoreEvent::LanguageServerUpdate {
421 language_server_id,
422 name,
423 message,
424 } => {
425 self.session
426 .send(proto::UpdateLanguageServer {
427 project_id: REMOTE_SERVER_PROJECT_ID,
428 server_name: name.as_ref().map(|name| name.to_string()),
429 language_server_id: language_server_id.to_proto(),
430 variant: Some(message.clone()),
431 })
432 .log_err();
433 }
434 LspStoreEvent::Notification(message) => {
435 self.session
436 .send(proto::Toast {
437 project_id: REMOTE_SERVER_PROJECT_ID,
438 notification_id: "lsp".to_string(),
439 message: message.clone(),
440 })
441 .log_err();
442 }
443 LspStoreEvent::LanguageServerPrompt(prompt) => {
444 let request = self.session.request(proto::LanguageServerPromptRequest {
445 project_id: REMOTE_SERVER_PROJECT_ID,
446 actions: prompt
447 .actions
448 .iter()
449 .map(|action| action.title.to_string())
450 .collect(),
451 level: Some(prompt_to_proto(prompt)),
452 lsp_name: prompt.lsp_name.clone(),
453 message: prompt.message.clone(),
454 });
455 let prompt = prompt.clone();
456 cx.background_spawn(async move {
457 let response = request.await?;
458 if let Some(action_response) = response.action_response {
459 prompt.respond(action_response as usize).await;
460 }
461 anyhow::Ok(())
462 })
463 .detach();
464 }
465 _ => {}
466 }
467 }
468
469 pub async fn handle_add_worktree(
470 this: Entity<Self>,
471 message: TypedEnvelope<proto::AddWorktree>,
472 mut cx: AsyncApp,
473 ) -> Result<proto::AddWorktreeResponse> {
474 use client::ErrorCodeExt;
475 let fs = this.read_with(&cx, |this, _| this.fs.clone());
476 let path = PathBuf::from(shellexpand::tilde(&message.payload.path).to_string());
477
478 let canonicalized = match fs.canonicalize(&path).await {
479 Ok(path) => path,
480 Err(e) => {
481 let mut parent = path
482 .parent()
483 .ok_or(e)
484 .with_context(|| format!("{path:?} does not exist"))?;
485 if parent == Path::new("") {
486 parent = util::paths::home_dir();
487 }
488 let parent = fs.canonicalize(parent).await.map_err(|_| {
489 anyhow!(
490 proto::ErrorCode::DevServerProjectPathDoesNotExist
491 .with_tag("path", path.to_string_lossy().as_ref())
492 )
493 })?;
494 if let Some(file_name) = path.file_name() {
495 parent.join(file_name)
496 } else {
497 parent
498 }
499 }
500 };
501 let next_worktree_id = this
502 .update(&mut cx, |this, cx| {
503 this.worktree_store
504 .update(cx, |worktree_store, _| worktree_store.next_worktree_id())
505 })
506 .await?;
507 let worktree = this
508 .read_with(&cx.clone(), |this, _| {
509 Worktree::local(
510 Arc::from(canonicalized.as_path()),
511 message.payload.visible,
512 this.fs.clone(),
513 this.next_entry_id.clone(),
514 true,
515 next_worktree_id,
516 &mut cx,
517 )
518 })
519 .await?;
520
521 let response = this.read_with(&cx, |_, cx| {
522 let worktree = worktree.read(cx);
523 proto::AddWorktreeResponse {
524 worktree_id: worktree.id().to_proto(),
525 canonicalized_path: canonicalized.to_string_lossy().into_owned(),
526 root_repo_common_dir: worktree
527 .root_repo_common_dir()
528 .map(|p| p.to_string_lossy().into_owned()),
529 }
530 });
531
532 // We spawn this asynchronously, so that we can send the response back
533 // *before* `worktree_store.add()` can send out UpdateProject requests
534 // to the client about the new worktree.
535 //
536 // That lets the client manage the reference/handles of the newly-added
537 // worktree, before getting interrupted by an UpdateProject request.
538 //
539 // This fixes the problem of the client sending the AddWorktree request,
540 // headless project sending out a project update, client receiving it
541 // and immediately dropping the reference of the new client, causing it
542 // to be dropped on the headless project, and the client only then
543 // receiving a response to AddWorktree.
544 cx.spawn(async move |cx| {
545 this.update(cx, |this, cx| {
546 this.worktree_store.update(cx, |worktree_store, cx| {
547 worktree_store.add(&worktree, cx);
548 });
549 });
550 })
551 .detach();
552
553 Ok(response)
554 }
555
556 pub async fn handle_remove_worktree(
557 this: Entity<Self>,
558 envelope: TypedEnvelope<proto::RemoveWorktree>,
559 mut cx: AsyncApp,
560 ) -> Result<proto::Ack> {
561 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
562 this.update(&mut cx, |this, cx| {
563 this.worktree_store.update(cx, |worktree_store, cx| {
564 worktree_store.remove_worktree(worktree_id, cx);
565 });
566 });
567 Ok(proto::Ack {})
568 }
569
570 pub async fn handle_open_buffer_by_path(
571 this: Entity<Self>,
572 message: TypedEnvelope<proto::OpenBufferByPath>,
573 mut cx: AsyncApp,
574 ) -> Result<proto::OpenBufferResponse> {
575 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
576 let path = RelPath::from_proto(&message.payload.path)?;
577 let (buffer_store, buffer) = this.update(&mut cx, |this, cx| {
578 let buffer_store = this.buffer_store.clone();
579 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
580 buffer_store.open_buffer(ProjectPath { worktree_id, path }, cx)
581 });
582 (buffer_store, buffer)
583 });
584
585 let buffer = buffer.await?;
586 let buffer_id = buffer.read_with(&cx, |b, _| b.remote_id());
587 buffer_store.update(&mut cx, |buffer_store, cx| {
588 buffer_store
589 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
590 .detach_and_log_err(cx);
591 });
592
593 Ok(proto::OpenBufferResponse {
594 buffer_id: buffer_id.to_proto(),
595 })
596 }
597
598 pub async fn handle_open_image_by_path(
599 this: Entity<Self>,
600 message: TypedEnvelope<proto::OpenImageByPath>,
601 mut cx: AsyncApp,
602 ) -> Result<proto::OpenImageResponse> {
603 static NEXT_ID: AtomicU64 = AtomicU64::new(1);
604 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
605 let path = RelPath::from_proto(&message.payload.path)?;
606 let project_id = message.payload.project_id;
607 use proto::create_image_for_peer::Variant;
608
609 let (worktree_store, session) = this.read_with(&cx, |this, _| {
610 (this.worktree_store.clone(), this.session.clone())
611 });
612
613 let worktree = worktree_store
614 .read_with(&cx, |store, cx| store.worktree_for_id(worktree_id, cx))
615 .context("worktree not found")?;
616
617 let load_task = worktree.update(&mut cx, |worktree, cx| {
618 worktree.load_binary_file(path.as_ref(), cx)
619 });
620
621 let loaded_file = load_task.await?;
622 let content = loaded_file.content;
623 let file = loaded_file.file;
624
625 let proto_file = worktree.read_with(&cx, |_worktree, cx| file.to_proto(cx));
626 let image_id =
627 ImageId::from(NonZeroU64::new(NEXT_ID.fetch_add(1, Ordering::Relaxed)).unwrap());
628
629 let format = image::guess_format(&content)
630 .map(|f| format!("{:?}", f).to_lowercase())
631 .unwrap_or_else(|_| "unknown".to_string());
632
633 let state = proto::ImageState {
634 id: image_id.to_proto(),
635 file: Some(proto_file),
636 content_size: content.len() as u64,
637 format,
638 };
639
640 session.send(proto::CreateImageForPeer {
641 project_id,
642 peer_id: Some(REMOTE_SERVER_PEER_ID),
643 variant: Some(Variant::State(state)),
644 })?;
645
646 const CHUNK_SIZE: usize = 1024 * 1024; // 1MB chunks
647 for chunk in content.chunks(CHUNK_SIZE) {
648 session.send(proto::CreateImageForPeer {
649 project_id,
650 peer_id: Some(REMOTE_SERVER_PEER_ID),
651 variant: Some(Variant::Chunk(proto::ImageChunk {
652 image_id: image_id.to_proto(),
653 data: chunk.to_vec(),
654 })),
655 })?;
656 }
657
658 Ok(proto::OpenImageResponse {
659 image_id: image_id.to_proto(),
660 })
661 }
662
663 pub async fn handle_trust_worktrees(
664 this: Entity<Self>,
665 envelope: TypedEnvelope<proto::TrustWorktrees>,
666 mut cx: AsyncApp,
667 ) -> Result<proto::Ack> {
668 let trusted_worktrees = cx
669 .update(|cx| TrustedWorktrees::try_get_global(cx))
670 .context("missing trusted worktrees")?;
671 let worktree_store = this.read_with(&cx, |project, _| project.worktree_store.clone());
672 trusted_worktrees.update(&mut cx, |trusted_worktrees, cx| {
673 trusted_worktrees.trust(
674 &worktree_store,
675 envelope
676 .payload
677 .trusted_paths
678 .into_iter()
679 .filter_map(PathTrust::from_proto)
680 .collect(),
681 cx,
682 );
683 });
684 Ok(proto::Ack {})
685 }
686
687 pub async fn handle_restrict_worktrees(
688 this: Entity<Self>,
689 envelope: TypedEnvelope<proto::RestrictWorktrees>,
690 mut cx: AsyncApp,
691 ) -> Result<proto::Ack> {
692 let trusted_worktrees = cx
693 .update(|cx| TrustedWorktrees::try_get_global(cx))
694 .context("missing trusted worktrees")?;
695 let worktree_store = this.read_with(&cx, |project, _| project.worktree_store.downgrade());
696 trusted_worktrees.update(&mut cx, |trusted_worktrees, cx| {
697 let restricted_paths = envelope
698 .payload
699 .worktree_ids
700 .into_iter()
701 .map(WorktreeId::from_proto)
702 .map(PathTrust::Worktree)
703 .collect::<HashSet<_>>();
704 trusted_worktrees.restrict(worktree_store, restricted_paths, cx);
705 });
706 Ok(proto::Ack {})
707 }
708
709 pub async fn handle_download_file_by_path(
710 this: Entity<Self>,
711 message: TypedEnvelope<proto::DownloadFileByPath>,
712 mut cx: AsyncApp,
713 ) -> Result<proto::DownloadFileResponse> {
714 log::debug!(
715 "handle_download_file_by_path: received request: {:?}",
716 message.payload
717 );
718
719 let worktree_id = WorktreeId::from_proto(message.payload.worktree_id);
720 let path = RelPath::from_proto(&message.payload.path)?;
721 let project_id = message.payload.project_id;
722 let file_id = message.payload.file_id;
723 log::debug!(
724 "handle_download_file_by_path: worktree_id={:?}, path={:?}, file_id={}",
725 worktree_id,
726 path,
727 file_id
728 );
729 use proto::create_file_for_peer::Variant;
730
731 let (worktree_store, session): (Entity<WorktreeStore>, AnyProtoClient) = this
732 .read_with(&cx, |this, _| {
733 (this.worktree_store.clone(), this.session.clone())
734 });
735
736 let worktree = worktree_store
737 .read_with(&cx, |store, cx| store.worktree_for_id(worktree_id, cx))
738 .context("worktree not found")?;
739
740 let download_task = worktree.update(&mut cx, |worktree: &mut Worktree, cx| {
741 worktree.load_binary_file(path.as_ref(), cx)
742 });
743
744 let downloaded_file = download_task.await?;
745 let content = downloaded_file.content;
746 let file = downloaded_file.file;
747 log::debug!(
748 "handle_download_file_by_path: file loaded, content_size={}",
749 content.len()
750 );
751
752 let proto_file = worktree.read_with(&cx, |_worktree: &Worktree, cx| file.to_proto(cx));
753 log::debug!(
754 "handle_download_file_by_path: using client-provided file_id={}",
755 file_id
756 );
757
758 let state = proto::FileState {
759 id: file_id,
760 file: Some(proto_file),
761 content_size: content.len() as u64,
762 };
763
764 log::debug!("handle_download_file_by_path: sending State message");
765 session.send(proto::CreateFileForPeer {
766 project_id,
767 peer_id: Some(REMOTE_SERVER_PEER_ID),
768 variant: Some(Variant::State(state)),
769 })?;
770
771 const CHUNK_SIZE: usize = 1024 * 1024; // 1MB chunks
772 let num_chunks = content.len().div_ceil(CHUNK_SIZE);
773 log::debug!(
774 "handle_download_file_by_path: sending {} chunks",
775 num_chunks
776 );
777 for (i, chunk) in content.chunks(CHUNK_SIZE).enumerate() {
778 log::trace!(
779 "handle_download_file_by_path: sending chunk {}/{}, size={}",
780 i + 1,
781 num_chunks,
782 chunk.len()
783 );
784 session.send(proto::CreateFileForPeer {
785 project_id,
786 peer_id: Some(REMOTE_SERVER_PEER_ID),
787 variant: Some(Variant::Chunk(proto::FileChunk {
788 file_id,
789 data: chunk.to_vec(),
790 })),
791 })?;
792 }
793
794 log::debug!(
795 "handle_download_file_by_path: returning file_id={}",
796 file_id
797 );
798 Ok(proto::DownloadFileResponse { file_id })
799 }
800
801 pub async fn handle_open_new_buffer(
802 this: Entity<Self>,
803 _message: TypedEnvelope<proto::OpenNewBuffer>,
804 mut cx: AsyncApp,
805 ) -> Result<proto::OpenBufferResponse> {
806 let (buffer_store, buffer) = this.update(&mut cx, |this, cx| {
807 let buffer_store = this.buffer_store.clone();
808 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
809 buffer_store.create_buffer(None, true, cx)
810 });
811 (buffer_store, buffer)
812 });
813
814 let buffer = buffer.await?;
815 let buffer_id = buffer.read_with(&cx, |b, _| b.remote_id());
816 buffer_store.update(&mut cx, |buffer_store, cx| {
817 buffer_store
818 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
819 .detach_and_log_err(cx);
820 });
821
822 Ok(proto::OpenBufferResponse {
823 buffer_id: buffer_id.to_proto(),
824 })
825 }
826
827 async fn handle_toggle_lsp_logs(
828 _: Entity<Self>,
829 envelope: TypedEnvelope<proto::ToggleLspLogs>,
830 cx: AsyncApp,
831 ) -> Result<()> {
832 let server_id = LanguageServerId::from_proto(envelope.payload.server_id);
833 cx.update(|cx| {
834 let log_store = cx
835 .try_global::<GlobalLogStore>()
836 .map(|global_log_store| global_log_store.0.clone())
837 .context("lsp logs store is missing")?;
838 let toggled_log_kind =
839 match proto::toggle_lsp_logs::LogType::from_i32(envelope.payload.log_type)
840 .context("invalid log type")?
841 {
842 proto::toggle_lsp_logs::LogType::Log => LogKind::Logs,
843 proto::toggle_lsp_logs::LogType::Trace => LogKind::Trace,
844 proto::toggle_lsp_logs::LogType::Rpc => LogKind::Rpc,
845 };
846 log_store.update(cx, |log_store, _| {
847 log_store.toggle_lsp_logs(server_id, envelope.payload.enabled, toggled_log_kind);
848 });
849 anyhow::Ok(())
850 })?;
851
852 Ok(())
853 }
854
855 async fn handle_open_server_settings(
856 this: Entity<Self>,
857 _: TypedEnvelope<proto::OpenServerSettings>,
858 mut cx: AsyncApp,
859 ) -> Result<proto::OpenBufferResponse> {
860 let settings_path = paths::settings_file();
861 let (worktree, path) = this
862 .update(&mut cx, |this, cx| {
863 this.worktree_store.update(cx, |worktree_store, cx| {
864 worktree_store.find_or_create_worktree(settings_path, false, cx)
865 })
866 })
867 .await?;
868
869 let (buffer, buffer_store) = this.update(&mut cx, |this, cx| {
870 let buffer = this.buffer_store.update(cx, |buffer_store, cx| {
871 buffer_store.open_buffer(
872 ProjectPath {
873 worktree_id: worktree.read(cx).id(),
874 path,
875 },
876 cx,
877 )
878 });
879
880 (buffer, this.buffer_store.clone())
881 });
882
883 let buffer = buffer.await?;
884
885 let buffer_id = cx.update(|cx| {
886 if buffer.read(cx).is_empty() {
887 buffer.update(cx, |buffer, cx| {
888 buffer.edit([(0..0, initial_server_settings_content())], None, cx)
889 });
890 }
891
892 let buffer_id = buffer.read(cx).remote_id();
893
894 buffer_store.update(cx, |buffer_store, cx| {
895 buffer_store
896 .create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
897 .detach_and_log_err(cx);
898 });
899
900 buffer_id
901 });
902
903 Ok(proto::OpenBufferResponse {
904 buffer_id: buffer_id.to_proto(),
905 })
906 }
907
908 async fn handle_spawn_kernel(
909 this: Entity<Self>,
910 envelope: TypedEnvelope<proto::SpawnKernel>,
911 cx: AsyncApp,
912 ) -> Result<proto::SpawnKernelResponse> {
913 let fs = this.update(&mut cx.clone(), |this, _| this.fs.clone());
914
915 let mut ports = Vec::new();
916 for _ in 0..5 {
917 let listener = std::net::TcpListener::bind("127.0.0.1:0")?;
918 let port = listener.local_addr()?.port();
919 ports.push(port);
920 }
921
922 let connection_info = serde_json::json!({
923 "shell_port": ports[0],
924 "iopub_port": ports[1],
925 "stdin_port": ports[2],
926 "control_port": ports[3],
927 "hb_port": ports[4],
928 "ip": "127.0.0.1",
929 "key": uuid::Uuid::new_v4().to_string(),
930 "transport": "tcp",
931 "signature_scheme": "hmac-sha256",
932 "kernel_name": envelope.payload.kernel_name,
933 });
934
935 let connection_file_content = serde_json::to_string_pretty(&connection_info)?;
936 let kernel_id = uuid::Uuid::new_v4().to_string();
937
938 let connection_file_path = std::env::temp_dir().join(format!("kernel-{}.json", kernel_id));
939 fs.save(
940 &connection_file_path,
941 &connection_file_content.as_str().into(),
942 language::LineEnding::Unix,
943 )
944 .await?;
945
946 let working_directory = if envelope.payload.working_directory.is_empty() {
947 std::env::current_dir()
948 .ok()
949 .map(|p| p.to_string_lossy().into_owned())
950 } else {
951 Some(envelope.payload.working_directory)
952 };
953
954 // Spawn kernel (Assuming python for now, or we'd need to parse kernelspec logic here or pass the command)
955
956 // Spawn kernel
957 let spawn_kernel = |binary: &str, args: &[String]| {
958 let mut command = smol::process::Command::new(binary);
959
960 if !args.is_empty() {
961 for arg in args {
962 if arg == "{connection_file}" {
963 command.arg(&connection_file_path);
964 } else {
965 command.arg(arg);
966 }
967 }
968 } else {
969 command
970 .arg("-m")
971 .arg("ipykernel_launcher")
972 .arg("-f")
973 .arg(&connection_file_path);
974 }
975
976 // This ensures subprocesses spawned from the kernel use the correct Python environment
977 let python_bin_dir = std::path::Path::new(binary).parent();
978 if let Some(bin_dir) = python_bin_dir {
979 if let Some(path_var) = std::env::var_os("PATH") {
980 let mut paths = std::env::split_paths(&path_var).collect::<Vec<_>>();
981 paths.insert(0, bin_dir.to_path_buf());
982 if let Ok(new_path) = std::env::join_paths(paths) {
983 command.env("PATH", new_path);
984 }
985 }
986
987 if let Some(venv_root) = bin_dir.parent() {
988 command.env("VIRTUAL_ENV", venv_root.to_string_lossy().to_string());
989 }
990 }
991
992 if let Some(wd) = &working_directory {
993 command.current_dir(wd);
994 }
995 command.spawn()
996 };
997
998 // We need to manage the child process lifecycle
999 let child = if !envelope.payload.command.is_empty() {
1000 spawn_kernel(&envelope.payload.command, &envelope.payload.args).context(format!(
1001 "failed to spawn kernel process (command: {})",
1002 envelope.payload.command
1003 ))?
1004 } else if let Some(venv_python) = working_directory
1005 .as_ref()
1006 .and_then(|wd| find_venv_python(wd))
1007 {
1008 let path_str = venv_python.to_string_lossy().to_string();
1009 spawn_kernel(&path_str, &[]).context(format!(
1010 "failed to spawn kernel process (venv: {})",
1011 path_str
1012 ))?
1013 } else {
1014 spawn_kernel("python3", &[])
1015 .or_else(|_| spawn_kernel("python", &[]))
1016 .context("failed to spawn kernel process (tried python3 and python)")?
1017 };
1018
1019 this.update(&mut cx.clone(), |this, _cx| {
1020 this.kernels.insert(kernel_id.clone(), child);
1021 });
1022
1023 Ok(proto::SpawnKernelResponse {
1024 kernel_id,
1025 connection_file: connection_file_content,
1026 })
1027 }
1028
1029 async fn handle_kill_kernel(
1030 this: Entity<Self>,
1031 envelope: TypedEnvelope<proto::KillKernel>,
1032 mut cx: AsyncApp,
1033 ) -> Result<proto::Ack> {
1034 let kernel_id = envelope.payload.kernel_id;
1035 let child = this.update(&mut cx, |this, _| this.kernels.remove(&kernel_id));
1036 if let Some(mut child) = child {
1037 child.kill().log_err();
1038 }
1039 Ok(proto::Ack {})
1040 }
1041
1042 async fn handle_find_search_candidates(
1043 this: Entity<Self>,
1044 envelope: TypedEnvelope<proto::FindSearchCandidates>,
1045 mut cx: AsyncApp,
1046 ) -> Result<proto::Ack> {
1047 use futures::stream::StreamExt as _;
1048
1049 let peer_id = envelope.original_sender_id.unwrap_or(envelope.sender_id);
1050 let message = envelope.payload;
1051 let query = SearchQuery::from_proto(
1052 message.query.context("missing query field")?,
1053 PathStyle::local(),
1054 )?;
1055
1056 let project_id = message.project_id;
1057 let buffer_store = this.read_with(&cx, |this, _| this.buffer_store.clone());
1058 let handle = message.handle;
1059 let _buffer_store = buffer_store.clone();
1060 let client = this.read_with(&cx, |this, _| this.session.clone());
1061 let task = cx.spawn(async move |cx| {
1062 let results = this.update(cx, |this, cx| {
1063 project::Search::local(
1064 this.fs.clone(),
1065 this.buffer_store.clone(),
1066 this.worktree_store.clone(),
1067 message.limit as _,
1068 cx,
1069 )
1070 .into_handle(query, cx)
1071 .matching_buffers(cx)
1072 });
1073 let (batcher, batches) =
1074 project::project_search::AdaptiveBatcher::new(cx.background_executor());
1075 let mut new_matches = Box::pin(results.rx);
1076
1077 let sender_task = cx.background_executor().spawn({
1078 let client = client.clone();
1079 async move {
1080 let mut batches = std::pin::pin!(batches);
1081 while let Some(buffer_ids) = batches.next().await {
1082 client
1083 .request(proto::FindSearchCandidatesChunk {
1084 handle,
1085 peer_id: Some(peer_id),
1086 project_id,
1087 variant: Some(
1088 proto::find_search_candidates_chunk::Variant::Matches(
1089 proto::FindSearchCandidatesMatches { buffer_ids },
1090 ),
1091 ),
1092 })
1093 .await?;
1094 }
1095 anyhow::Ok(())
1096 }
1097 });
1098
1099 while let Some(buffer) = new_matches.next().await {
1100 let _ = buffer_store
1101 .update(cx, |this, cx| {
1102 this.create_buffer_for_peer(&buffer, REMOTE_SERVER_PEER_ID, cx)
1103 })
1104 .await;
1105 let buffer_id = buffer.read_with(cx, |this, _| this.remote_id().to_proto());
1106 batcher.push(buffer_id).await;
1107 }
1108 batcher.flush().await;
1109
1110 sender_task.await?;
1111
1112 client
1113 .request(proto::FindSearchCandidatesChunk {
1114 handle,
1115 peer_id: Some(peer_id),
1116 project_id,
1117 variant: Some(proto::find_search_candidates_chunk::Variant::Done(
1118 proto::FindSearchCandidatesDone {},
1119 )),
1120 })
1121 .await?;
1122 anyhow::Ok(())
1123 });
1124 _buffer_store.update(&mut cx, |this, _| {
1125 this.register_ongoing_project_search((peer_id, handle), task);
1126 });
1127
1128 Ok(proto::Ack {})
1129 }
1130
1131 // Goes from client to host.
1132 async fn handle_find_search_candidates_cancel(
1133 this: Entity<Self>,
1134 envelope: TypedEnvelope<proto::FindSearchCandidatesCancelled>,
1135 mut cx: AsyncApp,
1136 ) -> Result<()> {
1137 let buffer_store = this.read_with(&mut cx, |this, _| this.buffer_store.clone());
1138 BufferStore::handle_find_search_candidates_cancel(buffer_store, envelope, cx).await
1139 }
1140
1141 async fn handle_list_remote_directory(
1142 this: Entity<Self>,
1143 envelope: TypedEnvelope<proto::ListRemoteDirectory>,
1144 cx: AsyncApp,
1145 ) -> Result<proto::ListRemoteDirectoryResponse> {
1146 use smol::stream::StreamExt;
1147 let fs = cx.read_entity(&this, |this, _| this.fs.clone());
1148 let expanded = PathBuf::from(shellexpand::tilde(&envelope.payload.path).to_string());
1149 let check_info = envelope
1150 .payload
1151 .config
1152 .as_ref()
1153 .is_some_and(|config| config.is_dir);
1154
1155 let mut entries = Vec::new();
1156 let mut entry_info = Vec::new();
1157 let mut response = fs.read_dir(&expanded).await?;
1158 while let Some(path) = response.next().await {
1159 let path = path?;
1160 if let Some(file_name) = path.file_name() {
1161 entries.push(file_name.to_string_lossy().into_owned());
1162 if check_info {
1163 let is_dir = fs.is_dir(&path).await;
1164 entry_info.push(proto::EntryInfo { is_dir });
1165 }
1166 }
1167 }
1168 Ok(proto::ListRemoteDirectoryResponse {
1169 entries,
1170 entry_info,
1171 })
1172 }
1173
1174 async fn handle_get_path_metadata(
1175 this: Entity<Self>,
1176 envelope: TypedEnvelope<proto::GetPathMetadata>,
1177 cx: AsyncApp,
1178 ) -> Result<proto::GetPathMetadataResponse> {
1179 let fs = cx.read_entity(&this, |this, _| this.fs.clone());
1180 let expanded = PathBuf::from(shellexpand::tilde(&envelope.payload.path).to_string());
1181
1182 let metadata = fs.metadata(&expanded).await?;
1183 let is_dir = metadata.map(|metadata| metadata.is_dir).unwrap_or(false);
1184
1185 Ok(proto::GetPathMetadataResponse {
1186 exists: metadata.is_some(),
1187 is_dir,
1188 path: expanded.to_string_lossy().into_owned(),
1189 })
1190 }
1191
1192 async fn handle_shutdown_remote_server(
1193 _this: Entity<Self>,
1194 _envelope: TypedEnvelope<proto::ShutdownRemoteServer>,
1195 cx: AsyncApp,
1196 ) -> Result<proto::Ack> {
1197 cx.spawn(async move |cx| {
1198 cx.update(|cx| {
1199 // TODO: This is a hack, because in a headless project, shutdown isn't executed
1200 // when calling quit, but it should be.
1201 cx.shutdown();
1202 cx.quit();
1203 })
1204 })
1205 .detach();
1206
1207 Ok(proto::Ack {})
1208 }
1209
1210 pub async fn handle_ping(
1211 _this: Entity<Self>,
1212 _envelope: TypedEnvelope<proto::Ping>,
1213 _cx: AsyncApp,
1214 ) -> Result<proto::Ack> {
1215 log::debug!("Received ping from client");
1216 Ok(proto::Ack {})
1217 }
1218
1219 async fn handle_get_processes(
1220 _this: Entity<Self>,
1221 _envelope: TypedEnvelope<proto::GetProcesses>,
1222 _cx: AsyncApp,
1223 ) -> Result<proto::GetProcessesResponse> {
1224 let mut processes = Vec::new();
1225 let refresh_kind = RefreshKind::nothing().with_processes(
1226 ProcessRefreshKind::nothing()
1227 .without_tasks()
1228 .with_cmd(UpdateKind::Always),
1229 );
1230
1231 for process in System::new_with_specifics(refresh_kind)
1232 .processes()
1233 .values()
1234 {
1235 let name = process.name().to_string_lossy().into_owned();
1236 let command = process
1237 .cmd()
1238 .iter()
1239 .map(|s| s.to_string_lossy().into_owned())
1240 .collect::<Vec<_>>();
1241
1242 processes.push(proto::ProcessInfo {
1243 pid: process.pid().as_u32(),
1244 name,
1245 command,
1246 });
1247 }
1248
1249 processes.sort_by_key(|p| p.name.clone());
1250
1251 Ok(proto::GetProcessesResponse { processes })
1252 }
1253
1254 async fn handle_get_remote_profiling_data(
1255 this: Entity<Self>,
1256 envelope: TypedEnvelope<proto::GetRemoteProfilingData>,
1257 cx: AsyncApp,
1258 ) -> Result<proto::GetRemoteProfilingDataResponse> {
1259 let foreground_only = envelope.payload.foreground_only;
1260
1261 let (deltas, now_nanos) = cx.update(|cx| {
1262 let dispatcher = cx.foreground_executor().dispatcher();
1263 let timings = if foreground_only {
1264 vec![dispatcher.get_current_thread_timings()]
1265 } else {
1266 dispatcher.get_all_timings()
1267 };
1268 this.update(cx, |this, _cx| {
1269 let deltas = this.profiling_collector.collect_unseen(timings);
1270 let now_nanos = Instant::now()
1271 .duration_since(this.profiling_collector.startup_time())
1272 .as_nanos() as u64;
1273 (deltas, now_nanos)
1274 })
1275 });
1276
1277 let threads = deltas
1278 .into_iter()
1279 .map(|delta| proto::RemoteProfilingThread {
1280 thread_name: delta.thread_name,
1281 thread_id: delta.thread_id,
1282 timings: delta
1283 .new_timings
1284 .into_iter()
1285 .map(|t| proto::RemoteProfilingTiming {
1286 location: Some(proto::RemoteProfilingLocation {
1287 file: t.location.file.to_string(),
1288 line: t.location.line,
1289 column: t.location.column,
1290 }),
1291 start_nanos: t.start as u64,
1292 duration_nanos: t.duration as u64,
1293 })
1294 .collect(),
1295 })
1296 .collect();
1297
1298 Ok(proto::GetRemoteProfilingDataResponse { threads, now_nanos })
1299 }
1300
1301 async fn handle_get_directory_environment(
1302 this: Entity<Self>,
1303 envelope: TypedEnvelope<proto::GetDirectoryEnvironment>,
1304 mut cx: AsyncApp,
1305 ) -> Result<proto::DirectoryEnvironment> {
1306 let shell = task::shell_from_proto(envelope.payload.shell.context("missing shell")?)?;
1307 let directory = PathBuf::from(envelope.payload.directory);
1308 let environment = this
1309 .update(&mut cx, |this, cx| {
1310 this.environment.update(cx, |environment, cx| {
1311 environment.local_directory_environment(&shell, directory.into(), cx)
1312 })
1313 })
1314 .await
1315 .context("failed to get directory environment")?
1316 .into_iter()
1317 .collect();
1318 Ok(proto::DirectoryEnvironment { environment })
1319 }
1320}
1321
1322fn prompt_to_proto(
1323 prompt: &project::LanguageServerPromptRequest,
1324) -> proto::language_server_prompt_request::Level {
1325 match prompt.level {
1326 PromptLevel::Info => proto::language_server_prompt_request::Level::Info(
1327 proto::language_server_prompt_request::Info {},
1328 ),
1329 PromptLevel::Warning => proto::language_server_prompt_request::Level::Warning(
1330 proto::language_server_prompt_request::Warning {},
1331 ),
1332 PromptLevel::Critical => proto::language_server_prompt_request::Level::Critical(
1333 proto::language_server_prompt_request::Critical {},
1334 ),
1335 }
1336}
1337
1338fn find_venv_python(working_directory: &str) -> Option<std::path::PathBuf> {
1339 let wd = std::path::Path::new(working_directory);
1340 for dir_name in &[".venv", "venv", ".env", "env"] {
1341 let venv_dir = wd.join(dir_name);
1342 let has_pyvenv_cfg = venv_dir.join("pyvenv.cfg").is_file();
1343 let has_activate = venv_dir.join("bin").join("activate").is_file();
1344 if has_pyvenv_cfg || has_activate {
1345 let python = venv_dir.join("bin").join("python");
1346 if python.is_file() {
1347 return Some(python);
1348 }
1349 let python3 = venv_dir.join("bin").join("python3");
1350 if python3.is_file() {
1351 return Some(python3);
1352 }
1353 }
1354 }
1355 None
1356}