1use std::{
2 io::{BufRead, BufReader},
3 path::{Path, PathBuf},
4 pin::pin,
5 sync::{Arc, atomic::AtomicUsize},
6};
7
8use anyhow::{Context as _, Result, anyhow};
9use collections::{HashMap, HashSet};
10use fs::Fs;
11use futures::{
12 FutureExt, SinkExt,
13 future::{BoxFuture, Shared},
14};
15use gpui::{
16 App, AppContext as _, AsyncApp, Context, Entity, EntityId, EventEmitter, Task, WeakEntity,
17};
18use postage::oneshot;
19use rpc::{
20 AnyProtoClient, ErrorExt, TypedEnvelope,
21 proto::{self, FromProto, SSH_PROJECT_ID, ToProto},
22};
23use smol::{
24 channel::{Receiver, Sender},
25 stream::StreamExt,
26};
27use text::ReplicaId;
28use util::{
29 ResultExt,
30 paths::{PathStyle, RemotePathBuf, SanitizedPath},
31};
32use worktree::{
33 Entry, ProjectEntryId, UpdatedEntriesSet, UpdatedGitRepositoriesSet, Worktree, WorktreeId,
34 WorktreeSettings,
35};
36
37use crate::{ProjectPath, search::SearchQuery};
38
39struct MatchingEntry {
40 worktree_path: Arc<Path>,
41 path: ProjectPath,
42 respond: oneshot::Sender<ProjectPath>,
43}
44
45enum WorktreeStoreState {
46 Local {
47 fs: Arc<dyn Fs>,
48 },
49 Remote {
50 upstream_client: AnyProtoClient,
51 upstream_project_id: u64,
52 path_style: PathStyle,
53 },
54}
55
56pub struct WorktreeStore {
57 next_entry_id: Arc<AtomicUsize>,
58 downstream_client: Option<(AnyProtoClient, u64)>,
59 retain_worktrees: bool,
60 worktrees: Vec<WorktreeHandle>,
61 worktrees_reordered: bool,
62 #[allow(clippy::type_complexity)]
63 loading_worktrees:
64 HashMap<SanitizedPath, Shared<Task<Result<Entity<Worktree>, Arc<anyhow::Error>>>>>,
65 state: WorktreeStoreState,
66}
67
68#[derive(Debug)]
69pub enum WorktreeStoreEvent {
70 WorktreeAdded(Entity<Worktree>),
71 WorktreeRemoved(EntityId, WorktreeId),
72 WorktreeReleased(EntityId, WorktreeId),
73 WorktreeOrderChanged,
74 WorktreeUpdateSent(Entity<Worktree>),
75 WorktreeUpdatedEntries(WorktreeId, UpdatedEntriesSet),
76 WorktreeUpdatedGitRepositories(WorktreeId, UpdatedGitRepositoriesSet),
77 WorktreeDeletedEntry(WorktreeId, ProjectEntryId),
78}
79
80impl EventEmitter<WorktreeStoreEvent> for WorktreeStore {}
81
82impl WorktreeStore {
83 pub fn init(client: &AnyProtoClient) {
84 client.add_entity_request_handler(Self::handle_create_project_entry);
85 client.add_entity_request_handler(Self::handle_copy_project_entry);
86 client.add_entity_request_handler(Self::handle_delete_project_entry);
87 client.add_entity_request_handler(Self::handle_expand_project_entry);
88 client.add_entity_request_handler(Self::handle_expand_all_for_project_entry);
89 }
90
91 pub fn local(retain_worktrees: bool, fs: Arc<dyn Fs>) -> Self {
92 Self {
93 next_entry_id: Default::default(),
94 loading_worktrees: Default::default(),
95 downstream_client: None,
96 worktrees: Vec::new(),
97 worktrees_reordered: false,
98 retain_worktrees,
99 state: WorktreeStoreState::Local { fs },
100 }
101 }
102
103 pub fn remote(
104 retain_worktrees: bool,
105 upstream_client: AnyProtoClient,
106 upstream_project_id: u64,
107 path_style: PathStyle,
108 ) -> Self {
109 Self {
110 next_entry_id: Default::default(),
111 loading_worktrees: Default::default(),
112 downstream_client: None,
113 worktrees: Vec::new(),
114 worktrees_reordered: false,
115 retain_worktrees,
116 state: WorktreeStoreState::Remote {
117 upstream_client,
118 upstream_project_id,
119 path_style,
120 },
121 }
122 }
123
124 /// Iterates through all worktrees, including ones that don't appear in the project panel
125 pub fn worktrees(&self) -> impl '_ + DoubleEndedIterator<Item = Entity<Worktree>> {
126 self.worktrees
127 .iter()
128 .filter_map(move |worktree| worktree.upgrade())
129 }
130
131 /// Iterates through all user-visible worktrees, the ones that appear in the project panel.
132 pub fn visible_worktrees<'a>(
133 &'a self,
134 cx: &'a App,
135 ) -> impl 'a + DoubleEndedIterator<Item = Entity<Worktree>> {
136 self.worktrees()
137 .filter(|worktree| worktree.read(cx).is_visible())
138 }
139
140 pub fn worktree_for_id(&self, id: WorktreeId, cx: &App) -> Option<Entity<Worktree>> {
141 self.worktrees()
142 .find(|worktree| worktree.read(cx).id() == id)
143 }
144
145 pub fn worktree_for_entry(
146 &self,
147 entry_id: ProjectEntryId,
148 cx: &App,
149 ) -> Option<Entity<Worktree>> {
150 self.worktrees()
151 .find(|worktree| worktree.read(cx).contains_entry(entry_id))
152 }
153
154 pub fn find_worktree(
155 &self,
156 abs_path: impl Into<SanitizedPath>,
157 cx: &App,
158 ) -> Option<(Entity<Worktree>, PathBuf)> {
159 let abs_path: SanitizedPath = abs_path.into();
160 for tree in self.worktrees() {
161 if let Ok(relative_path) = abs_path.as_path().strip_prefix(tree.read(cx).abs_path()) {
162 return Some((tree.clone(), relative_path.into()));
163 }
164 }
165 None
166 }
167
168 pub fn absolutize(&self, project_path: &ProjectPath, cx: &App) -> Option<PathBuf> {
169 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
170 worktree.read(cx).absolutize(&project_path.path).ok()
171 }
172
173 pub fn find_or_create_worktree(
174 &mut self,
175 abs_path: impl AsRef<Path>,
176 visible: bool,
177 cx: &mut Context<Self>,
178 ) -> Task<Result<(Entity<Worktree>, PathBuf)>> {
179 let abs_path = abs_path.as_ref();
180 if let Some((tree, relative_path)) = self.find_worktree(abs_path, cx) {
181 Task::ready(Ok((tree, relative_path)))
182 } else {
183 let worktree = self.create_worktree(abs_path, visible, cx);
184 cx.background_spawn(async move { Ok((worktree.await?, PathBuf::new())) })
185 }
186 }
187
188 // pub fn entry_for_id<'a>(&'a self, entry_id: ProjectEntryId, cx: &'a App) -> Option<&'a Entry> {
189 // self.worktrees()
190 // .find_map(|worktree| worktree.read(cx).entry_for_id(entry_id))
191 // }
192 pub fn entry_for_id<'a>(
193 &'a self,
194 _entry_id: ProjectEntryId,
195 _cx: &'a App,
196 ) -> Option<&'a Entry> {
197 todo!("entry_for_id needs to be refactored to handle Ref type")
198 }
199
200 // pub fn worktree_and_entry_for_id<'a>(
201 // &'a self,
202 // entry_id: ProjectEntryId,
203 // cx: &'a App,
204 // ) -> Option<(Entity<Worktree>, &'a Entry)> {
205 // self.worktrees().find_map(|worktree| {
206 // worktree
207 // .read(cx)
208 // .entry_for_id(entry_id)
209 // .map(|e| (worktree.clone(), e))
210 // })
211 // }
212 pub fn worktree_and_entry_for_id<'a>(
213 &'a self,
214 _entry_id: ProjectEntryId,
215 _cx: &'a App,
216 ) -> Option<(Entity<Worktree>, &'a Entry)> {
217 todo!("worktree_and_entry_for_id needs to be refactored to handle Ref type")
218 }
219
220 pub fn entry_for_path(&self, path: &ProjectPath, cx: &App) -> Option<Entry> {
221 self.worktree_for_id(path.worktree_id, cx)?
222 .read(cx)
223 .entry_for_path(&path.path)
224 .cloned()
225 }
226
227 pub fn create_worktree(
228 &mut self,
229 abs_path: impl Into<SanitizedPath>,
230 visible: bool,
231 cx: &mut Context<Self>,
232 ) -> Task<Result<Entity<Worktree>>> {
233 let abs_path: SanitizedPath = abs_path.into();
234 if !self.loading_worktrees.contains_key(&abs_path) {
235 let task = match &self.state {
236 WorktreeStoreState::Remote {
237 upstream_client,
238 path_style,
239 ..
240 } => {
241 if upstream_client.is_via_collab() {
242 Task::ready(Err(Arc::new(anyhow!("cannot create worktrees via collab"))))
243 } else {
244 let abs_path =
245 RemotePathBuf::new(abs_path.as_path().to_path_buf(), *path_style);
246 self.create_ssh_worktree(upstream_client.clone(), abs_path, visible, cx)
247 }
248 }
249 WorktreeStoreState::Local { fs } => {
250 self.create_local_worktree(fs.clone(), abs_path.clone(), visible, cx)
251 }
252 };
253
254 self.loading_worktrees
255 .insert(abs_path.clone(), task.shared());
256 }
257 let task = self.loading_worktrees.get(&abs_path).unwrap().clone();
258 cx.spawn(async move |this, cx| {
259 let result = task.await;
260 this.update(cx, |this, _| this.loading_worktrees.remove(&abs_path))
261 .ok();
262 match result {
263 Ok(worktree) => Ok(worktree),
264 Err(err) => Err((*err).cloned()),
265 }
266 })
267 }
268
269 fn create_ssh_worktree(
270 &mut self,
271 client: AnyProtoClient,
272 abs_path: RemotePathBuf,
273 visible: bool,
274 cx: &mut Context<Self>,
275 ) -> Task<Result<Entity<Worktree>, Arc<anyhow::Error>>> {
276 let path_style = abs_path.path_style();
277 let mut abs_path = abs_path.to_string();
278 // If we start with `/~` that means the ssh path was something like `ssh://user@host/~/home-dir-folder/`
279 // in which case want to strip the leading the `/`.
280 // On the host-side, the `~` will get expanded.
281 // That's what git does too: https://github.com/libgit2/libgit2/issues/3345#issuecomment-127050850
282 if abs_path.starts_with("/~") {
283 abs_path = abs_path[1..].to_string();
284 }
285 if abs_path.is_empty() {
286 abs_path = "~/".to_string();
287 }
288
289 cx.spawn(async move |this, cx| {
290 let this = this.upgrade().context("Dropped worktree store")?;
291
292 let path = RemotePathBuf::new(abs_path.into(), path_style);
293 let response = client
294 .request(proto::AddWorktree {
295 project_id: SSH_PROJECT_ID,
296 path: path.to_proto(),
297 visible,
298 })
299 .await?;
300
301 if let Some(existing_worktree) = this.read_with(cx, |this, cx| {
302 this.worktree_for_id(WorktreeId::from_proto(response.worktree_id), cx)
303 })? {
304 return Ok(existing_worktree);
305 }
306
307 let root_path_buf = PathBuf::from_proto(response.canonicalized_path.clone());
308 let root_name = root_path_buf
309 .file_name()
310 .map(|n| n.to_string_lossy().to_string())
311 .unwrap_or(root_path_buf.to_string_lossy().to_string());
312
313 let worktree = cx.update(|cx| {
314 Worktree::remote(
315 SSH_PROJECT_ID,
316 0,
317 proto::WorktreeMetadata {
318 id: response.worktree_id,
319 root_name,
320 visible,
321 abs_path: response.canonicalized_path,
322 },
323 client,
324 cx,
325 )
326 })?;
327
328 this.update(cx, |this, cx| {
329 this.add(&worktree, cx);
330 })?;
331 Ok(worktree)
332 })
333 }
334
335 fn create_local_worktree(
336 &mut self,
337 fs: Arc<dyn Fs>,
338 abs_path: impl Into<SanitizedPath>,
339 visible: bool,
340 cx: &mut Context<Self>,
341 ) -> Task<Result<Entity<Worktree>, Arc<anyhow::Error>>> {
342 let next_entry_id = self.next_entry_id.clone();
343 let path: SanitizedPath = abs_path.into();
344
345 cx.spawn(async move |this, cx| {
346 let worktree = Worktree::local(path.clone(), visible, fs, next_entry_id, cx).await;
347
348 let worktree = worktree?;
349
350 this.update(cx, |this, cx| this.add(&worktree, cx))?;
351
352 if visible {
353 cx.update(|cx| {
354 cx.add_recent_document(path.as_path());
355 })
356 .log_err();
357 }
358
359 Ok(worktree)
360 })
361 }
362
363 pub fn add(&mut self, worktree: &Entity<Worktree>, cx: &mut Context<Self>) {
364 let worktree_id = worktree.read(cx).id();
365 debug_assert!(self.worktrees().all(|w| w.read(cx).id() != worktree_id));
366
367 let push_strong_handle = self.retain_worktrees || worktree.read(cx).is_visible();
368 let handle = if push_strong_handle {
369 WorktreeHandle::Strong(worktree.clone())
370 } else {
371 WorktreeHandle::Weak(worktree.downgrade())
372 };
373 if self.worktrees_reordered {
374 self.worktrees.push(handle);
375 } else {
376 let i = match self
377 .worktrees
378 .binary_search_by_key(&Some(worktree.read(cx).abs_path()), |other| {
379 other.upgrade().map(|worktree| worktree.read(cx).abs_path())
380 }) {
381 Ok(i) | Err(i) => i,
382 };
383 self.worktrees.insert(i, handle);
384 }
385
386 cx.emit(WorktreeStoreEvent::WorktreeAdded(worktree.clone()));
387 self.send_project_updates(cx);
388
389 let handle_id = worktree.entity_id();
390 cx.subscribe(worktree, |_, worktree, event, cx| {
391 let worktree_id = worktree.read(cx).id();
392 match event {
393 worktree::Event::UpdatedEntries(changes) => {
394 cx.emit(WorktreeStoreEvent::WorktreeUpdatedEntries(
395 worktree_id,
396 changes.clone(),
397 ));
398 }
399 worktree::Event::UpdatedGitRepositories(set) => {
400 cx.emit(WorktreeStoreEvent::WorktreeUpdatedGitRepositories(
401 worktree_id,
402 set.clone(),
403 ));
404 }
405 worktree::Event::DeletedEntry(id) => {
406 cx.emit(WorktreeStoreEvent::WorktreeDeletedEntry(worktree_id, *id))
407 }
408 }
409 })
410 .detach();
411 cx.observe_release(worktree, move |this, worktree, cx| {
412 cx.emit(WorktreeStoreEvent::WorktreeReleased(
413 handle_id,
414 worktree.id(),
415 ));
416 cx.emit(WorktreeStoreEvent::WorktreeRemoved(
417 handle_id,
418 worktree.id(),
419 ));
420 this.send_project_updates(cx);
421 })
422 .detach();
423 }
424
425 pub fn remove_worktree(&mut self, id_to_remove: WorktreeId, cx: &mut Context<Self>) {
426 self.worktrees.retain(|worktree| {
427 if let Some(worktree) = worktree.upgrade() {
428 if worktree.read(cx).id() == id_to_remove {
429 cx.emit(WorktreeStoreEvent::WorktreeRemoved(
430 worktree.entity_id(),
431 id_to_remove,
432 ));
433 false
434 } else {
435 true
436 }
437 } else {
438 false
439 }
440 });
441 self.send_project_updates(cx);
442 }
443
444 pub fn set_worktrees_reordered(&mut self, worktrees_reordered: bool) {
445 self.worktrees_reordered = worktrees_reordered;
446 }
447
448 fn upstream_client(&self) -> Option<(AnyProtoClient, u64)> {
449 match &self.state {
450 WorktreeStoreState::Remote {
451 upstream_client,
452 upstream_project_id,
453 ..
454 } => Some((upstream_client.clone(), *upstream_project_id)),
455 WorktreeStoreState::Local { .. } => None,
456 }
457 }
458
459 pub fn set_worktrees_from_proto(
460 &mut self,
461 worktrees: Vec<proto::WorktreeMetadata>,
462 replica_id: ReplicaId,
463 cx: &mut Context<Self>,
464 ) -> Result<()> {
465 let mut old_worktrees_by_id = self
466 .worktrees
467 .drain(..)
468 .filter_map(|worktree| {
469 let worktree = worktree.upgrade()?;
470 let worktree_id = worktree.read(cx).id();
471 Some((worktree_id, worktree))
472 })
473 .collect::<HashMap<_, _>>();
474
475 let (client, project_id) = self.upstream_client().clone().context("invalid project")?;
476
477 for worktree in worktrees {
478 if let Some(old_worktree) =
479 old_worktrees_by_id.remove(&WorktreeId::from_proto(worktree.id))
480 {
481 let push_strong_handle =
482 self.retain_worktrees || old_worktree.read(cx).is_visible();
483 let handle = if push_strong_handle {
484 WorktreeHandle::Strong(old_worktree.clone())
485 } else {
486 WorktreeHandle::Weak(old_worktree.downgrade())
487 };
488 self.worktrees.push(handle);
489 } else {
490 self.add(
491 &Worktree::remote(project_id, replica_id, worktree, client.clone(), cx),
492 cx,
493 );
494 }
495 }
496 self.send_project_updates(cx);
497
498 Ok(())
499 }
500
501 pub fn move_worktree(
502 &mut self,
503 source: WorktreeId,
504 destination: WorktreeId,
505 cx: &mut Context<Self>,
506 ) -> Result<()> {
507 if source == destination {
508 return Ok(());
509 }
510
511 let mut source_index = None;
512 let mut destination_index = None;
513 for (i, worktree) in self.worktrees.iter().enumerate() {
514 if let Some(worktree) = worktree.upgrade() {
515 let worktree_id = worktree.read(cx).id();
516 if worktree_id == source {
517 source_index = Some(i);
518 if destination_index.is_some() {
519 break;
520 }
521 } else if worktree_id == destination {
522 destination_index = Some(i);
523 if source_index.is_some() {
524 break;
525 }
526 }
527 }
528 }
529
530 let source_index =
531 source_index.with_context(|| format!("Missing worktree for id {source}"))?;
532 let destination_index =
533 destination_index.with_context(|| format!("Missing worktree for id {destination}"))?;
534
535 if source_index == destination_index {
536 return Ok(());
537 }
538
539 let worktree_to_move = self.worktrees.remove(source_index);
540 self.worktrees.insert(destination_index, worktree_to_move);
541 self.worktrees_reordered = true;
542 cx.emit(WorktreeStoreEvent::WorktreeOrderChanged);
543 cx.notify();
544 Ok(())
545 }
546
547 pub fn disconnected_from_host(&mut self, cx: &mut App) {
548 for worktree in &self.worktrees {
549 if let Some(worktree) = worktree.upgrade() {
550 worktree.update(cx, |worktree, _| {
551 if let Some(worktree) = worktree.as_remote_mut() {
552 worktree.disconnected_from_host();
553 }
554 });
555 }
556 }
557 }
558
559 pub fn send_project_updates(&mut self, cx: &mut Context<Self>) {
560 let Some((downstream_client, project_id)) = self.downstream_client.clone() else {
561 return;
562 };
563
564 let update = proto::UpdateProject {
565 project_id,
566 worktrees: self.worktree_metadata_protos(cx),
567 };
568
569 // collab has bad concurrency guarantees, so we send requests in serial.
570 let update_project = if downstream_client.is_via_collab() {
571 Some(downstream_client.request(update))
572 } else {
573 downstream_client.send(update).log_err();
574 None
575 };
576 cx.spawn(async move |this, cx| {
577 if let Some(update_project) = update_project {
578 update_project.await?;
579 }
580
581 this.update(cx, |this, cx| {
582 let worktrees = this.worktrees().collect::<Vec<_>>();
583
584 for worktree in worktrees {
585 worktree.update(cx, |worktree, cx| {
586 let client = downstream_client.clone();
587 worktree.observe_updates(project_id, cx, {
588 move |update| {
589 let client = client.clone();
590 async move {
591 if client.is_via_collab() {
592 client
593 .request(update)
594 .map(|result| result.log_err().is_some())
595 .await
596 } else {
597 client.send(update).log_err().is_some()
598 }
599 }
600 }
601 });
602 });
603
604 cx.emit(WorktreeStoreEvent::WorktreeUpdateSent(worktree.clone()))
605 }
606
607 anyhow::Ok(())
608 })
609 })
610 .detach_and_log_err(cx);
611 }
612
613 pub fn worktree_metadata_protos(&self, cx: &App) -> Vec<proto::WorktreeMetadata> {
614 self.worktrees()
615 .map(|worktree| {
616 let worktree = worktree.read(cx);
617 proto::WorktreeMetadata {
618 id: worktree.id().to_proto(),
619 root_name: worktree.root_name().into(),
620 visible: worktree.is_visible(),
621 abs_path: worktree.abs_path().to_proto(),
622 }
623 })
624 .collect()
625 }
626
627 pub fn shared(
628 &mut self,
629 remote_id: u64,
630 downstream_client: AnyProtoClient,
631 cx: &mut Context<Self>,
632 ) {
633 self.retain_worktrees = true;
634 self.downstream_client = Some((downstream_client, remote_id));
635
636 // When shared, retain all worktrees
637 for worktree_handle in self.worktrees.iter_mut() {
638 match worktree_handle {
639 WorktreeHandle::Strong(_) => {}
640 WorktreeHandle::Weak(worktree) => {
641 if let Some(worktree) = worktree.upgrade() {
642 *worktree_handle = WorktreeHandle::Strong(worktree);
643 }
644 }
645 }
646 }
647 self.send_project_updates(cx);
648 }
649
650 pub fn unshared(&mut self, cx: &mut Context<Self>) {
651 self.retain_worktrees = false;
652 self.downstream_client.take();
653
654 // When not shared, only retain the visible worktrees
655 for worktree_handle in self.worktrees.iter_mut() {
656 if let WorktreeHandle::Strong(worktree) = worktree_handle {
657 let is_visible = worktree.update(cx, |worktree, _| {
658 worktree.stop_observing_updates();
659 worktree.is_visible()
660 });
661 if !is_visible {
662 *worktree_handle = WorktreeHandle::Weak(worktree.downgrade());
663 }
664 }
665 }
666 }
667
668 /// search over all worktrees and return buffers that *might* match the search.
669 pub fn find_search_candidates(
670 &self,
671 query: SearchQuery,
672 limit: usize,
673 open_entries: HashSet<ProjectEntryId>,
674 fs: Arc<dyn Fs>,
675 cx: &Context<Self>,
676 ) -> Receiver<ProjectPath> {
677 let snapshots = self
678 .visible_worktrees(cx)
679 .filter_map(|tree| {
680 let tree = tree.read(cx);
681 Some((tree.snapshot(), tree.as_local()?.settings()))
682 })
683 .collect::<Vec<_>>();
684
685 let executor = cx.background_executor().clone();
686
687 // We want to return entries in the order they are in the worktrees, so we have one
688 // thread that iterates over the worktrees (and ignored directories) as necessary,
689 // and pushes a oneshot::Receiver to the output channel and a oneshot::Sender to the filter
690 // channel.
691 // We spawn a number of workers that take items from the filter channel and check the query
692 // against the version of the file on disk.
693 let (filter_tx, filter_rx) = smol::channel::bounded(64);
694 let (output_tx, output_rx) = smol::channel::bounded(64);
695 let (matching_paths_tx, matching_paths_rx) = smol::channel::unbounded();
696
697 let input = cx.background_spawn({
698 let fs = fs.clone();
699 let query = query.clone();
700 async move {
701 Self::find_candidate_paths(
702 fs,
703 snapshots,
704 open_entries,
705 query,
706 filter_tx,
707 output_tx,
708 )
709 .await
710 .log_err();
711 }
712 });
713 const MAX_CONCURRENT_FILE_SCANS: usize = 64;
714 let filters = cx.background_spawn(async move {
715 let fs = &fs;
716 let query = &query;
717 executor
718 .scoped(move |scope| {
719 for _ in 0..MAX_CONCURRENT_FILE_SCANS {
720 let filter_rx = filter_rx.clone();
721 scope.spawn(async move {
722 Self::filter_paths(fs, filter_rx, query)
723 .await
724 .log_with_level(log::Level::Debug);
725 })
726 }
727 })
728 .await;
729 });
730 cx.background_spawn(async move {
731 let mut matched = 0;
732 while let Ok(mut receiver) = output_rx.recv().await {
733 let Some(path) = receiver.next().await else {
734 continue;
735 };
736 let Ok(_) = matching_paths_tx.send(path).await else {
737 break;
738 };
739 matched += 1;
740 if matched == limit {
741 break;
742 }
743 }
744 drop(input);
745 drop(filters);
746 })
747 .detach();
748 matching_paths_rx
749 }
750
751 fn scan_ignored_dir<'a>(
752 fs: &'a Arc<dyn Fs>,
753 snapshot: &'a worktree::Snapshot,
754 path: &'a Path,
755 query: &'a SearchQuery,
756 filter_tx: &'a Sender<MatchingEntry>,
757 output_tx: &'a Sender<oneshot::Receiver<ProjectPath>>,
758 ) -> BoxFuture<'a, Result<()>> {
759 async move {
760 let abs_path = snapshot.abs_path().join(path);
761 let Some(mut files) = fs
762 .read_dir(&abs_path)
763 .await
764 .with_context(|| format!("listing ignored path {abs_path:?}"))
765 .log_err()
766 else {
767 return Ok(());
768 };
769
770 let mut results = Vec::new();
771
772 while let Some(Ok(file)) = files.next().await {
773 let Some(metadata) = fs
774 .metadata(&file)
775 .await
776 .with_context(|| format!("fetching fs metadata for {abs_path:?}"))
777 .log_err()
778 .flatten()
779 else {
780 continue;
781 };
782 if metadata.is_symlink || metadata.is_fifo {
783 continue;
784 }
785 results.push((
786 file.strip_prefix(snapshot.abs_path())?.to_path_buf(),
787 !metadata.is_dir,
788 ))
789 }
790 results.sort_by(|(a_path, _), (b_path, _)| a_path.cmp(b_path));
791 for (path, is_file) in results {
792 if is_file {
793 if query.filters_path() {
794 let matched_path = if query.match_full_paths() {
795 let mut full_path = PathBuf::from(snapshot.root_name());
796 full_path.push(&path);
797 query.match_path(&full_path)
798 } else {
799 query.match_path(&path)
800 };
801 if !matched_path {
802 continue;
803 }
804 }
805 let (tx, rx) = oneshot::channel();
806 output_tx.send(rx).await?;
807 filter_tx
808 .send(MatchingEntry {
809 respond: tx,
810 worktree_path: snapshot.abs_path().clone(),
811 path: ProjectPath {
812 worktree_id: snapshot.id(),
813 path: Arc::from(path),
814 },
815 })
816 .await?;
817 } else {
818 Self::scan_ignored_dir(fs, snapshot, &path, query, filter_tx, output_tx)
819 .await?;
820 }
821 }
822 Ok(())
823 }
824 .boxed()
825 }
826
827 async fn find_candidate_paths(
828 fs: Arc<dyn Fs>,
829 snapshots: Vec<(worktree::Snapshot, WorktreeSettings)>,
830 open_entries: HashSet<ProjectEntryId>,
831 query: SearchQuery,
832 filter_tx: Sender<MatchingEntry>,
833 output_tx: Sender<oneshot::Receiver<ProjectPath>>,
834 ) -> Result<()> {
835 for (snapshot, settings) in snapshots {
836 for entry in snapshot.entries(query.include_ignored(), 0) {
837 if entry.is_dir() && entry.is_ignored {
838 if !settings.is_path_excluded(&entry.path) {
839 Self::scan_ignored_dir(
840 &fs,
841 &snapshot,
842 &entry.path,
843 &query,
844 &filter_tx,
845 &output_tx,
846 )
847 .await?;
848 }
849 continue;
850 }
851
852 if entry.is_fifo || !entry.is_file() {
853 continue;
854 }
855
856 if query.filters_path() {
857 let matched_path = if query.match_full_paths() {
858 let mut full_path = PathBuf::from(snapshot.root_name());
859 full_path.push(&entry.path);
860 query.match_path(&full_path)
861 } else {
862 query.match_path(&entry.path)
863 };
864 if !matched_path {
865 continue;
866 }
867 }
868
869 let (mut tx, rx) = oneshot::channel();
870
871 if open_entries.contains(&entry.id) {
872 tx.send(ProjectPath {
873 worktree_id: snapshot.id(),
874 path: entry.path.clone(),
875 })
876 .await?;
877 } else {
878 filter_tx
879 .send(MatchingEntry {
880 respond: tx,
881 worktree_path: snapshot.abs_path().clone(),
882 path: ProjectPath {
883 worktree_id: snapshot.id(),
884 path: entry.path.clone(),
885 },
886 })
887 .await?;
888 }
889
890 output_tx.send(rx).await?;
891 }
892 }
893 Ok(())
894 }
895
896 async fn filter_paths(
897 fs: &Arc<dyn Fs>,
898 input: Receiver<MatchingEntry>,
899 query: &SearchQuery,
900 ) -> Result<()> {
901 let mut input = pin!(input);
902 while let Some(mut entry) = input.next().await {
903 let abs_path = entry.worktree_path.join(&entry.path.path);
904 let Some(file) = fs.open_sync(&abs_path).await.log_err() else {
905 continue;
906 };
907
908 let mut file = BufReader::new(file);
909 let file_start = file.fill_buf()?;
910
911 if let Err(Some(starting_position)) =
912 std::str::from_utf8(file_start).map_err(|e| e.error_len())
913 {
914 // Before attempting to match the file content, throw away files that have invalid UTF-8 sequences early on;
915 // That way we can still match files in a streaming fashion without having look at "obviously binary" files.
916 log::debug!(
917 "Invalid UTF-8 sequence in file {abs_path:?} at byte position {starting_position}"
918 );
919 continue;
920 }
921
922 if query.detect(file).unwrap_or(false) {
923 entry.respond.send(entry.path).await?
924 }
925 }
926
927 Ok(())
928 }
929
930 pub async fn handle_create_project_entry(
931 this: Entity<Self>,
932 envelope: TypedEnvelope<proto::CreateProjectEntry>,
933 mut cx: AsyncApp,
934 ) -> Result<proto::ProjectEntryResponse> {
935 let worktree = this.update(&mut cx, |this, cx| {
936 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
937 this.worktree_for_id(worktree_id, cx)
938 .context("worktree not found")
939 })??;
940 Worktree::handle_create_entry(worktree, envelope.payload, cx).await
941 }
942
943 pub async fn handle_copy_project_entry(
944 this: Entity<Self>,
945 envelope: TypedEnvelope<proto::CopyProjectEntry>,
946 mut cx: AsyncApp,
947 ) -> Result<proto::ProjectEntryResponse> {
948 let entry_id = ProjectEntryId::from_proto(envelope.payload.entry_id);
949 let worktree = this.update(&mut cx, |this, cx| {
950 this.worktree_for_entry(entry_id, cx)
951 .context("worktree not found")
952 })??;
953 Worktree::handle_copy_entry(worktree, envelope.payload, cx).await
954 }
955
956 pub async fn handle_delete_project_entry(
957 this: Entity<Self>,
958 envelope: TypedEnvelope<proto::DeleteProjectEntry>,
959 mut cx: AsyncApp,
960 ) -> Result<proto::ProjectEntryResponse> {
961 let entry_id = ProjectEntryId::from_proto(envelope.payload.entry_id);
962 let worktree = this.update(&mut cx, |this, cx| {
963 this.worktree_for_entry(entry_id, cx)
964 .context("worktree not found")
965 })??;
966 Worktree::handle_delete_entry(worktree, envelope.payload, cx).await
967 }
968
969 pub async fn handle_expand_project_entry(
970 this: Entity<Self>,
971 envelope: TypedEnvelope<proto::ExpandProjectEntry>,
972 mut cx: AsyncApp,
973 ) -> Result<proto::ExpandProjectEntryResponse> {
974 let entry_id = ProjectEntryId::from_proto(envelope.payload.entry_id);
975 let worktree = this
976 .update(&mut cx, |this, cx| this.worktree_for_entry(entry_id, cx))?
977 .context("invalid request")?;
978 Worktree::handle_expand_entry(worktree, envelope.payload, cx).await
979 }
980
981 pub async fn handle_expand_all_for_project_entry(
982 this: Entity<Self>,
983 envelope: TypedEnvelope<proto::ExpandAllForProjectEntry>,
984 mut cx: AsyncApp,
985 ) -> Result<proto::ExpandAllForProjectEntryResponse> {
986 let entry_id = ProjectEntryId::from_proto(envelope.payload.entry_id);
987 let worktree = this
988 .update(&mut cx, |this, cx| this.worktree_for_entry(entry_id, cx))?
989 .context("invalid request")?;
990 Worktree::handle_expand_all_for_entry(worktree, envelope.payload, cx).await
991 }
992
993 pub fn fs(&self) -> Option<Arc<dyn Fs>> {
994 match &self.state {
995 WorktreeStoreState::Local { fs } => Some(fs.clone()),
996 WorktreeStoreState::Remote { .. } => None,
997 }
998 }
999}
1000
1001#[derive(Clone, Debug)]
1002enum WorktreeHandle {
1003 Strong(Entity<Worktree>),
1004 Weak(WeakEntity<Worktree>),
1005}
1006
1007impl WorktreeHandle {
1008 fn upgrade(&self) -> Option<Entity<Worktree>> {
1009 match self {
1010 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1011 WorktreeHandle::Weak(handle) => handle.upgrade(),
1012 }
1013 }
1014}