1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap};
11use futures::{Stream, StreamExt};
12use fuzzy::CharBag;
13use gpui::{
14 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
15 Task, UpgradeModelHandle, WeakModelHandle,
16};
17use language::{
18 Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
19 Operation, PointUtf16, Rope,
20};
21use lazy_static::lazy_static;
22use lsp::LanguageServer;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink as _, Stream as _},
26 watch,
27};
28use serde::Deserialize;
29use smol::channel::{self, Sender};
30use std::{
31 any::Any,
32 cmp::{self, Ordering},
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::{Deref, Range},
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::Bias;
46use sum_tree::{Edit, SeekTarget, SumTree};
47use util::{post_inc, ResultExt, TryFutureExt};
48
49lazy_static! {
50 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
51}
52
53#[derive(Clone, Debug)]
54enum ScanState {
55 Idle,
56 Scanning,
57 Err(Arc<anyhow::Error>),
58}
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub enum Event {
66 Closed,
67}
68
69impl Entity for Worktree {
70 type Event = Event;
71
72 fn app_will_quit(
73 &mut self,
74 _: &mut MutableAppContext,
75 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
76 use futures::FutureExt;
77
78 if let Self::Local(worktree) = self {
79 let shutdown_futures = worktree
80 .language_servers
81 .drain()
82 .filter_map(|(_, server)| server.shutdown())
83 .collect::<Vec<_>>();
84 Some(
85 async move {
86 futures::future::join_all(shutdown_futures).await;
87 }
88 .boxed(),
89 )
90 } else {
91 None
92 }
93 }
94}
95
96impl Worktree {
97 pub async fn open_local(
98 client: Arc<Client>,
99 user_store: ModelHandle<UserStore>,
100 path: impl Into<Arc<Path>>,
101 fs: Arc<dyn Fs>,
102 languages: Arc<LanguageRegistry>,
103 cx: &mut AsyncAppContext,
104 ) -> Result<ModelHandle<Self>> {
105 let (tree, scan_states_tx) =
106 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
107 tree.update(cx, |tree, cx| {
108 let tree = tree.as_local_mut().unwrap();
109 let abs_path = tree.snapshot.abs_path.clone();
110 let background_snapshot = tree.background_snapshot.clone();
111 let background = cx.background().clone();
112 tree._background_scanner_task = Some(cx.background().spawn(async move {
113 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
114 let scanner =
115 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
116 scanner.run(events).await;
117 }));
118 });
119 Ok(tree)
120 }
121
122 pub async fn remote(
123 project_remote_id: u64,
124 replica_id: ReplicaId,
125 worktree: proto::Worktree,
126 client: Arc<Client>,
127 user_store: ModelHandle<UserStore>,
128 languages: Arc<LanguageRegistry>,
129 cx: &mut AsyncAppContext,
130 ) -> Result<ModelHandle<Self>> {
131 let remote_id = worktree.id;
132 let root_char_bag: CharBag = worktree
133 .root_name
134 .chars()
135 .map(|c| c.to_ascii_lowercase())
136 .collect();
137 let root_name = worktree.root_name.clone();
138 let (entries_by_path, entries_by_id) = cx
139 .background()
140 .spawn(async move {
141 let mut entries_by_path_edits = Vec::new();
142 let mut entries_by_id_edits = Vec::new();
143 for entry in worktree.entries {
144 match Entry::try_from((&root_char_bag, entry)) {
145 Ok(entry) => {
146 entries_by_id_edits.push(Edit::Insert(PathEntry {
147 id: entry.id,
148 path: entry.path.clone(),
149 is_ignored: entry.is_ignored,
150 scan_id: 0,
151 }));
152 entries_by_path_edits.push(Edit::Insert(entry));
153 }
154 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
155 }
156 }
157
158 let mut entries_by_path = SumTree::new();
159 let mut entries_by_id = SumTree::new();
160 entries_by_path.edit(entries_by_path_edits, &());
161 entries_by_id.edit(entries_by_id_edits, &());
162 (entries_by_path, entries_by_id)
163 })
164 .await;
165
166 let worktree = cx.update(|cx| {
167 cx.add_model(|cx: &mut ModelContext<Worktree>| {
168 let snapshot = Snapshot {
169 id: cx.model_id(),
170 scan_id: 0,
171 abs_path: Path::new("").into(),
172 root_name,
173 root_char_bag,
174 ignores: Default::default(),
175 entries_by_path,
176 entries_by_id,
177 removed_entry_ids: Default::default(),
178 next_entry_id: Default::default(),
179 };
180
181 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
182 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
183
184 cx.background()
185 .spawn(async move {
186 while let Some(update) = updates_rx.recv().await {
187 let mut snapshot = snapshot_tx.borrow().clone();
188 if let Err(error) = snapshot.apply_update(update) {
189 log::error!("error applying worktree update: {}", error);
190 }
191 *snapshot_tx.borrow_mut() = snapshot;
192 }
193 })
194 .detach();
195
196 {
197 let mut snapshot_rx = snapshot_rx.clone();
198 cx.spawn_weak(|this, mut cx| async move {
199 while let Some(_) = snapshot_rx.recv().await {
200 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
201 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
202 } else {
203 break;
204 }
205 }
206 })
207 .detach();
208 }
209
210 Worktree::Remote(RemoteWorktree {
211 project_remote_id,
212 remote_id,
213 replica_id,
214 snapshot,
215 snapshot_rx,
216 updates_tx,
217 client: client.clone(),
218 loading_buffers: Default::default(),
219 open_buffers: Default::default(),
220 diagnostic_summaries: HashMap::default(),
221 queued_operations: Default::default(),
222 languages,
223 user_store,
224 })
225 })
226 });
227
228 Ok(worktree)
229 }
230
231 pub fn as_local(&self) -> Option<&LocalWorktree> {
232 if let Worktree::Local(worktree) = self {
233 Some(worktree)
234 } else {
235 None
236 }
237 }
238
239 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
240 if let Worktree::Local(worktree) = self {
241 Some(worktree)
242 } else {
243 None
244 }
245 }
246
247 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
248 if let Worktree::Remote(worktree) = self {
249 Some(worktree)
250 } else {
251 None
252 }
253 }
254
255 pub fn snapshot(&self) -> Snapshot {
256 match self {
257 Worktree::Local(worktree) => worktree.snapshot(),
258 Worktree::Remote(worktree) => worktree.snapshot(),
259 }
260 }
261
262 pub fn replica_id(&self) -> ReplicaId {
263 match self {
264 Worktree::Local(_) => 0,
265 Worktree::Remote(worktree) => worktree.replica_id,
266 }
267 }
268
269 pub fn remove_collaborator(
270 &mut self,
271 peer_id: PeerId,
272 replica_id: ReplicaId,
273 cx: &mut ModelContext<Self>,
274 ) {
275 match self {
276 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
277 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
278 }
279 }
280
281 pub fn languages(&self) -> &Arc<LanguageRegistry> {
282 match self {
283 Worktree::Local(worktree) => &worktree.languages,
284 Worktree::Remote(worktree) => &worktree.languages,
285 }
286 }
287
288 pub fn user_store(&self) -> &ModelHandle<UserStore> {
289 match self {
290 Worktree::Local(worktree) => &worktree.user_store,
291 Worktree::Remote(worktree) => &worktree.user_store,
292 }
293 }
294
295 pub fn handle_open_buffer(
296 &mut self,
297 envelope: TypedEnvelope<proto::OpenBuffer>,
298 rpc: Arc<Client>,
299 cx: &mut ModelContext<Self>,
300 ) -> anyhow::Result<()> {
301 let receipt = envelope.receipt();
302
303 let response = self
304 .as_local_mut()
305 .unwrap()
306 .open_remote_buffer(envelope, cx);
307
308 cx.background()
309 .spawn(
310 async move {
311 rpc.respond(receipt, response.await?).await?;
312 Ok(())
313 }
314 .log_err(),
315 )
316 .detach();
317
318 Ok(())
319 }
320
321 pub fn handle_close_buffer(
322 &mut self,
323 envelope: TypedEnvelope<proto::CloseBuffer>,
324 _: Arc<Client>,
325 cx: &mut ModelContext<Self>,
326 ) -> anyhow::Result<()> {
327 self.as_local_mut()
328 .unwrap()
329 .close_remote_buffer(envelope, cx)
330 }
331
332 pub fn diagnostic_summaries<'a>(
333 &'a self,
334 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
335 match self {
336 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
337 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
338 }
339 .iter()
340 .map(|(path, summary)| (path.clone(), summary.clone()))
341 }
342
343 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
344 match self {
345 Worktree::Local(worktree) => &mut worktree.loading_buffers,
346 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
347 }
348 }
349
350 pub fn open_buffer(
351 &mut self,
352 path: impl AsRef<Path>,
353 cx: &mut ModelContext<Self>,
354 ) -> Task<Result<ModelHandle<Buffer>>> {
355 let path = path.as_ref();
356
357 // If there is already a buffer for the given path, then return it.
358 let existing_buffer = match self {
359 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
360 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
361 };
362 if let Some(existing_buffer) = existing_buffer {
363 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
364 }
365
366 let path: Arc<Path> = Arc::from(path);
367 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
368 // If the given path is already being loaded, then wait for that existing
369 // task to complete and return the same buffer.
370 hash_map::Entry::Occupied(e) => e.get().clone(),
371
372 // Otherwise, record the fact that this path is now being loaded.
373 hash_map::Entry::Vacant(entry) => {
374 let (mut tx, rx) = postage::watch::channel();
375 entry.insert(rx.clone());
376
377 let load_buffer = match self {
378 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
379 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
380 };
381 cx.spawn(move |this, mut cx| async move {
382 let result = load_buffer.await;
383
384 // After the buffer loads, record the fact that it is no longer
385 // loading.
386 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
387 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
388 })
389 .detach();
390 rx
391 }
392 };
393
394 cx.spawn(|_, _| async move {
395 loop {
396 if let Some(result) = loading_watch.borrow().as_ref() {
397 return result.clone().map_err(|e| anyhow!("{}", e));
398 }
399 loading_watch.recv().await;
400 }
401 })
402 }
403
404 #[cfg(feature = "test-support")]
405 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
406 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
407 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
408 Worktree::Remote(worktree) => {
409 Box::new(worktree.open_buffers.values().filter_map(|buf| {
410 if let RemoteBuffer::Loaded(buf) = buf {
411 Some(buf)
412 } else {
413 None
414 }
415 }))
416 }
417 };
418
419 let path = path.as_ref();
420 open_buffers
421 .find(|buffer| {
422 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
423 file.path().as_ref() == path
424 } else {
425 false
426 }
427 })
428 .is_some()
429 }
430
431 pub fn handle_update_buffer(
432 &mut self,
433 envelope: TypedEnvelope<proto::UpdateBuffer>,
434 cx: &mut ModelContext<Self>,
435 ) -> Result<()> {
436 let payload = envelope.payload.clone();
437 let buffer_id = payload.buffer_id as usize;
438 let ops = payload
439 .operations
440 .into_iter()
441 .map(|op| language::proto::deserialize_operation(op))
442 .collect::<Result<Vec<_>, _>>()?;
443
444 match self {
445 Worktree::Local(worktree) => {
446 let buffer = worktree
447 .open_buffers
448 .get(&buffer_id)
449 .and_then(|buf| buf.upgrade(cx))
450 .ok_or_else(|| {
451 anyhow!("invalid buffer {} in update buffer message", buffer_id)
452 })?;
453 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
454 }
455 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
456 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
457 Some(RemoteBuffer::Loaded(buffer)) => {
458 if let Some(buffer) = buffer.upgrade(cx) {
459 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
460 } else {
461 worktree
462 .open_buffers
463 .insert(buffer_id, RemoteBuffer::Operations(ops));
464 }
465 }
466 None => {
467 worktree
468 .open_buffers
469 .insert(buffer_id, RemoteBuffer::Operations(ops));
470 }
471 },
472 }
473
474 Ok(())
475 }
476
477 pub fn handle_save_buffer(
478 &mut self,
479 envelope: TypedEnvelope<proto::SaveBuffer>,
480 rpc: Arc<Client>,
481 cx: &mut ModelContext<Self>,
482 ) -> Result<()> {
483 let sender_id = envelope.original_sender_id()?;
484 let this = self.as_local().unwrap();
485 let project_id = this
486 .project_remote_id
487 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?;
488
489 let buffer = this
490 .shared_buffers
491 .get(&sender_id)
492 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
493 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
494
495 let receipt = envelope.receipt();
496 let worktree_id = envelope.payload.worktree_id;
497 let buffer_id = envelope.payload.buffer_id;
498 let save = cx.spawn(|_, mut cx| async move {
499 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
500 });
501
502 cx.background()
503 .spawn(
504 async move {
505 let (version, mtime) = save.await?;
506
507 rpc.respond(
508 receipt,
509 proto::BufferSaved {
510 project_id,
511 worktree_id,
512 buffer_id,
513 version: (&version).into(),
514 mtime: Some(mtime.into()),
515 },
516 )
517 .await?;
518
519 Ok(())
520 }
521 .log_err(),
522 )
523 .detach();
524
525 Ok(())
526 }
527
528 pub fn handle_buffer_saved(
529 &mut self,
530 envelope: TypedEnvelope<proto::BufferSaved>,
531 cx: &mut ModelContext<Self>,
532 ) -> Result<()> {
533 let payload = envelope.payload.clone();
534 let worktree = self.as_remote_mut().unwrap();
535 if let Some(buffer) = worktree
536 .open_buffers
537 .get(&(payload.buffer_id as usize))
538 .and_then(|buf| buf.upgrade(cx))
539 {
540 buffer.update(cx, |buffer, cx| {
541 let version = payload.version.try_into()?;
542 let mtime = payload
543 .mtime
544 .ok_or_else(|| anyhow!("missing mtime"))?
545 .into();
546 buffer.did_save(version, mtime, None, cx);
547 Result::<_, anyhow::Error>::Ok(())
548 })?;
549 }
550 Ok(())
551 }
552
553 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
554 match self {
555 Self::Local(worktree) => {
556 let is_fake_fs = worktree.fs.is_fake();
557 worktree.snapshot = worktree.background_snapshot.lock().clone();
558 if worktree.is_scanning() {
559 if worktree.poll_task.is_none() {
560 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
561 if is_fake_fs {
562 smol::future::yield_now().await;
563 } else {
564 smol::Timer::after(Duration::from_millis(100)).await;
565 }
566 this.update(&mut cx, |this, cx| {
567 this.as_local_mut().unwrap().poll_task = None;
568 this.poll_snapshot(cx);
569 })
570 }));
571 }
572 } else {
573 worktree.poll_task.take();
574 self.update_open_buffers(cx);
575 }
576 }
577 Self::Remote(worktree) => {
578 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
579 self.update_open_buffers(cx);
580 }
581 };
582
583 cx.notify();
584 }
585
586 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
587 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
588 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
589 Self::Remote(worktree) => {
590 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
591 if let RemoteBuffer::Loaded(buf) = buf {
592 Some((id, buf))
593 } else {
594 None
595 }
596 }))
597 }
598 };
599
600 let local = self.as_local().is_some();
601 let worktree_path = self.abs_path.clone();
602 let worktree_handle = cx.handle();
603 let mut buffers_to_delete = Vec::new();
604 for (buffer_id, buffer) in open_buffers {
605 if let Some(buffer) = buffer.upgrade(cx) {
606 buffer.update(cx, |buffer, cx| {
607 if let Some(old_file) = buffer.file() {
608 let new_file = if let Some(entry) = old_file
609 .entry_id()
610 .and_then(|entry_id| self.entry_for_id(entry_id))
611 {
612 File {
613 is_local: local,
614 worktree_path: worktree_path.clone(),
615 entry_id: Some(entry.id),
616 mtime: entry.mtime,
617 path: entry.path.clone(),
618 worktree: worktree_handle.clone(),
619 }
620 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
621 File {
622 is_local: local,
623 worktree_path: worktree_path.clone(),
624 entry_id: Some(entry.id),
625 mtime: entry.mtime,
626 path: entry.path.clone(),
627 worktree: worktree_handle.clone(),
628 }
629 } else {
630 File {
631 is_local: local,
632 worktree_path: worktree_path.clone(),
633 entry_id: None,
634 path: old_file.path().clone(),
635 mtime: old_file.mtime(),
636 worktree: worktree_handle.clone(),
637 }
638 };
639
640 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
641 task.detach();
642 }
643 }
644 });
645 } else {
646 buffers_to_delete.push(*buffer_id);
647 }
648 }
649
650 for buffer_id in buffers_to_delete {
651 match self {
652 Self::Local(worktree) => {
653 worktree.open_buffers.remove(&buffer_id);
654 }
655 Self::Remote(worktree) => {
656 worktree.open_buffers.remove(&buffer_id);
657 }
658 }
659 }
660 }
661
662 fn update_diagnostics(
663 &mut self,
664 params: lsp::PublishDiagnosticsParams,
665 cx: &mut ModelContext<Worktree>,
666 ) -> Result<()> {
667 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
668 let abs_path = params
669 .uri
670 .to_file_path()
671 .map_err(|_| anyhow!("URI is not a file"))?;
672 let worktree_path = Arc::from(
673 abs_path
674 .strip_prefix(&this.abs_path)
675 .context("path is not within worktree")?,
676 );
677
678 let mut group_ids_by_diagnostic_range = HashMap::default();
679 let mut diagnostics_by_group_id = HashMap::default();
680 let mut next_group_id = 0;
681 for diagnostic in ¶ms.diagnostics {
682 let source = diagnostic.source.as_ref();
683 let code = diagnostic.code.as_ref();
684 let group_id = diagnostic_ranges(&diagnostic, &abs_path)
685 .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
686 .copied()
687 .unwrap_or_else(|| {
688 let group_id = post_inc(&mut next_group_id);
689 for range in diagnostic_ranges(&diagnostic, &abs_path) {
690 group_ids_by_diagnostic_range.insert((source, code, range), group_id);
691 }
692 group_id
693 });
694
695 diagnostics_by_group_id
696 .entry(group_id)
697 .or_insert(Vec::new())
698 .push(DiagnosticEntry {
699 range: diagnostic.range.start.to_point_utf16()
700 ..diagnostic.range.end.to_point_utf16(),
701 diagnostic: Diagnostic {
702 source: diagnostic.source.clone(),
703 code: diagnostic.code.clone().map(|code| match code {
704 lsp::NumberOrString::Number(code) => code.to_string(),
705 lsp::NumberOrString::String(code) => code,
706 }),
707 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
708 message: diagnostic.message.clone(),
709 group_id,
710 is_primary: false,
711 },
712 });
713 }
714
715 let diagnostics = diagnostics_by_group_id
716 .into_values()
717 .flat_map(|mut diagnostics| {
718 let primary = diagnostics
719 .iter_mut()
720 .min_by_key(|entry| entry.diagnostic.severity)
721 .unwrap();
722 primary.diagnostic.is_primary = true;
723 diagnostics
724 })
725 .collect::<Vec<_>>();
726
727 for buffer in this.open_buffers.values() {
728 if let Some(buffer) = buffer.upgrade(cx) {
729 if buffer
730 .read(cx)
731 .file()
732 .map_or(false, |file| *file.path() == worktree_path)
733 {
734 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
735 (
736 buffer.remote_id(),
737 buffer.update_diagnostics(params.version, diagnostics.clone(), cx),
738 )
739 });
740 self.send_buffer_update(remote_id, operation?, cx);
741 break;
742 }
743 }
744 }
745
746 let this = self.as_local_mut().unwrap();
747 this.diagnostic_summaries
748 .insert(worktree_path.clone(), DiagnosticSummary::new(&diagnostics));
749 this.diagnostics.insert(worktree_path.clone(), diagnostics);
750 Ok(())
751 }
752
753 fn send_buffer_update(
754 &mut self,
755 buffer_id: u64,
756 operation: Operation,
757 cx: &mut ModelContext<Self>,
758 ) {
759 if let Some((rpc, project_id)) = match self {
760 Worktree::Local(worktree) => worktree
761 .project_remote_id
762 .map(|id| (worktree.client.clone(), id)),
763 Worktree::Remote(worktree) => {
764 Some((worktree.client.clone(), worktree.project_remote_id))
765 }
766 } {
767 cx.spawn(|worktree, mut cx| async move {
768 if let Err(error) = rpc
769 .request(proto::UpdateBuffer {
770 project_id,
771 worktree_id: worktree.id() as u64,
772 buffer_id,
773 operations: vec![language::proto::serialize_operation(&operation)],
774 })
775 .await
776 {
777 worktree.update(&mut cx, |worktree, _| {
778 log::error!("error sending buffer operation: {}", error);
779 match worktree {
780 Worktree::Local(t) => &mut t.queued_operations,
781 Worktree::Remote(t) => &mut t.queued_operations,
782 }
783 .push((buffer_id, operation));
784 });
785 }
786 })
787 .detach();
788 }
789 }
790}
791
792#[derive(Clone)]
793pub struct Snapshot {
794 id: usize,
795 scan_id: usize,
796 abs_path: Arc<Path>,
797 root_name: String,
798 root_char_bag: CharBag,
799 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
800 entries_by_path: SumTree<Entry>,
801 entries_by_id: SumTree<PathEntry>,
802 removed_entry_ids: HashMap<u64, usize>,
803 next_entry_id: Arc<AtomicUsize>,
804}
805
806pub struct LocalWorktree {
807 snapshot: Snapshot,
808 config: WorktreeConfig,
809 background_snapshot: Arc<Mutex<Snapshot>>,
810 last_scan_state_rx: watch::Receiver<ScanState>,
811 _background_scanner_task: Option<Task<()>>,
812 project_remote_id: Option<u64>,
813 poll_task: Option<Task<()>>,
814 share: Option<ShareState>,
815 loading_buffers: LoadingBuffers,
816 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
817 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
818 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
819 diagnostic_summaries: HashMap<Arc<Path>, DiagnosticSummary>,
820 queued_operations: Vec<(u64, Operation)>,
821 languages: Arc<LanguageRegistry>,
822 client: Arc<Client>,
823 user_store: ModelHandle<UserStore>,
824 fs: Arc<dyn Fs>,
825 language_servers: HashMap<String, Arc<LanguageServer>>,
826}
827
828struct ShareState {
829 snapshots_tx: Sender<Snapshot>,
830}
831
832pub struct RemoteWorktree {
833 project_remote_id: u64,
834 remote_id: u64,
835 snapshot: Snapshot,
836 snapshot_rx: watch::Receiver<Snapshot>,
837 client: Arc<Client>,
838 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
839 replica_id: ReplicaId,
840 loading_buffers: LoadingBuffers,
841 open_buffers: HashMap<usize, RemoteBuffer>,
842 diagnostic_summaries: HashMap<Arc<Path>, DiagnosticSummary>,
843 languages: Arc<LanguageRegistry>,
844 user_store: ModelHandle<UserStore>,
845 queued_operations: Vec<(u64, Operation)>,
846}
847
848type LoadingBuffers = HashMap<
849 Arc<Path>,
850 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
851>;
852
853#[derive(Default, Deserialize)]
854struct WorktreeConfig {
855 collaborators: Vec<String>,
856}
857
858impl LocalWorktree {
859 async fn new(
860 client: Arc<Client>,
861 user_store: ModelHandle<UserStore>,
862 path: impl Into<Arc<Path>>,
863 fs: Arc<dyn Fs>,
864 languages: Arc<LanguageRegistry>,
865 cx: &mut AsyncAppContext,
866 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
867 let abs_path = path.into();
868 let path: Arc<Path> = Arc::from(Path::new(""));
869 let next_entry_id = AtomicUsize::new(0);
870
871 // After determining whether the root entry is a file or a directory, populate the
872 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
873 let root_name = abs_path
874 .file_name()
875 .map_or(String::new(), |f| f.to_string_lossy().to_string());
876 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
877 let metadata = fs.metadata(&abs_path).await?;
878
879 let mut config = WorktreeConfig::default();
880 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
881 if let Ok(parsed) = toml::from_str(&zed_toml) {
882 config = parsed;
883 }
884 }
885
886 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
887 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
888 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
889 let mut snapshot = Snapshot {
890 id: cx.model_id(),
891 scan_id: 0,
892 abs_path,
893 root_name: root_name.clone(),
894 root_char_bag,
895 ignores: Default::default(),
896 entries_by_path: Default::default(),
897 entries_by_id: Default::default(),
898 removed_entry_ids: Default::default(),
899 next_entry_id: Arc::new(next_entry_id),
900 };
901 if let Some(metadata) = metadata {
902 snapshot.insert_entry(
903 Entry::new(
904 path.into(),
905 &metadata,
906 &snapshot.next_entry_id,
907 snapshot.root_char_bag,
908 ),
909 fs.as_ref(),
910 );
911 }
912
913 let tree = Self {
914 snapshot: snapshot.clone(),
915 config,
916 project_remote_id: None,
917 background_snapshot: Arc::new(Mutex::new(snapshot)),
918 last_scan_state_rx,
919 _background_scanner_task: None,
920 share: None,
921 poll_task: None,
922 loading_buffers: Default::default(),
923 open_buffers: Default::default(),
924 shared_buffers: Default::default(),
925 diagnostics: Default::default(),
926 diagnostic_summaries: Default::default(),
927 queued_operations: Default::default(),
928 languages,
929 client,
930 user_store,
931 fs,
932 language_servers: Default::default(),
933 };
934
935 cx.spawn_weak(|this, mut cx| async move {
936 while let Ok(scan_state) = scan_states_rx.recv().await {
937 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
938 let to_send = handle.update(&mut cx, |this, cx| {
939 last_scan_state_tx.blocking_send(scan_state).ok();
940 this.poll_snapshot(cx);
941 let tree = this.as_local_mut().unwrap();
942 if !tree.is_scanning() {
943 if let Some(share) = tree.share.as_ref() {
944 return Some((tree.snapshot(), share.snapshots_tx.clone()));
945 }
946 }
947 None
948 });
949
950 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
951 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
952 log::error!("error submitting snapshot to send {}", err);
953 }
954 }
955 } else {
956 break;
957 }
958 }
959 })
960 .detach();
961
962 Worktree::Local(tree)
963 });
964
965 Ok((tree, scan_states_tx))
966 }
967
968 pub fn set_project_remote_id(&mut self, id: Option<u64>) {
969 self.project_remote_id = id;
970 }
971
972 pub fn authorized_logins(&self) -> Vec<String> {
973 self.config.collaborators.clone()
974 }
975
976 pub fn languages(&self) -> &LanguageRegistry {
977 &self.languages
978 }
979
980 pub fn ensure_language_server(
981 &mut self,
982 language: &Language,
983 cx: &mut ModelContext<Worktree>,
984 ) -> Option<Arc<LanguageServer>> {
985 if let Some(server) = self.language_servers.get(language.name()) {
986 return Some(server.clone());
987 }
988
989 if let Some(language_server) = language
990 .start_server(self.abs_path(), cx)
991 .log_err()
992 .flatten()
993 {
994 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
995 language_server
996 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
997 smol::block_on(diagnostics_tx.send(params)).ok();
998 })
999 .detach();
1000 cx.spawn_weak(|this, mut cx| async move {
1001 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1002 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1003 handle.update(&mut cx, |this, cx| {
1004 this.update_diagnostics(diagnostics, cx).log_err();
1005 });
1006 } else {
1007 break;
1008 }
1009 }
1010 })
1011 .detach();
1012
1013 self.language_servers
1014 .insert(language.name().to_string(), language_server.clone());
1015 Some(language_server.clone())
1016 } else {
1017 None
1018 }
1019 }
1020
1021 fn get_open_buffer(
1022 &mut self,
1023 path: &Path,
1024 cx: &mut ModelContext<Worktree>,
1025 ) -> Option<ModelHandle<Buffer>> {
1026 let handle = cx.handle();
1027 let mut result = None;
1028 self.open_buffers.retain(|_buffer_id, buffer| {
1029 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1030 if let Some(file) = buffer.read(cx.as_ref()).file() {
1031 if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1032 result = Some(buffer);
1033 }
1034 }
1035 true
1036 } else {
1037 false
1038 }
1039 });
1040 result
1041 }
1042
1043 fn open_buffer(
1044 &mut self,
1045 path: &Path,
1046 cx: &mut ModelContext<Worktree>,
1047 ) -> Task<Result<ModelHandle<Buffer>>> {
1048 let path = Arc::from(path);
1049 cx.spawn(move |this, mut cx| async move {
1050 let (file, contents) = this
1051 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1052 .await?;
1053
1054 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1055 let this = this.as_local_mut().unwrap();
1056 let diagnostics = this.diagnostics.remove(&path);
1057 let language = this.languages.select_language(file.full_path()).cloned();
1058 let server = language
1059 .as_ref()
1060 .and_then(|language| this.ensure_language_server(language, cx));
1061 (diagnostics, language, server)
1062 });
1063
1064 let buffer = cx.add_model(|cx| {
1065 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1066 buffer.set_language(language, language_server, cx);
1067 if let Some(diagnostics) = diagnostics {
1068 buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1069 }
1070 buffer
1071 });
1072
1073 this.update(&mut cx, |this, _| {
1074 let this = this.as_local_mut().unwrap();
1075 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1076 });
1077
1078 Ok(buffer)
1079 })
1080 }
1081
1082 pub fn open_remote_buffer(
1083 &mut self,
1084 envelope: TypedEnvelope<proto::OpenBuffer>,
1085 cx: &mut ModelContext<Worktree>,
1086 ) -> Task<Result<proto::OpenBufferResponse>> {
1087 cx.spawn(|this, mut cx| async move {
1088 let peer_id = envelope.original_sender_id();
1089 let path = Path::new(&envelope.payload.path);
1090 let buffer = this
1091 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1092 .await?;
1093 this.update(&mut cx, |this, cx| {
1094 this.as_local_mut()
1095 .unwrap()
1096 .shared_buffers
1097 .entry(peer_id?)
1098 .or_default()
1099 .insert(buffer.id() as u64, buffer.clone());
1100
1101 Ok(proto::OpenBufferResponse {
1102 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1103 })
1104 })
1105 })
1106 }
1107
1108 pub fn close_remote_buffer(
1109 &mut self,
1110 envelope: TypedEnvelope<proto::CloseBuffer>,
1111 cx: &mut ModelContext<Worktree>,
1112 ) -> Result<()> {
1113 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1114 shared_buffers.remove(&envelope.payload.buffer_id);
1115 cx.notify();
1116 }
1117
1118 Ok(())
1119 }
1120
1121 pub fn remove_collaborator(
1122 &mut self,
1123 peer_id: PeerId,
1124 replica_id: ReplicaId,
1125 cx: &mut ModelContext<Worktree>,
1126 ) {
1127 self.shared_buffers.remove(&peer_id);
1128 for (_, buffer) in &self.open_buffers {
1129 if let Some(buffer) = buffer.upgrade(cx) {
1130 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1131 }
1132 }
1133 cx.notify();
1134 }
1135
1136 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1137 let mut scan_state_rx = self.last_scan_state_rx.clone();
1138 async move {
1139 let mut scan_state = Some(scan_state_rx.borrow().clone());
1140 while let Some(ScanState::Scanning) = scan_state {
1141 scan_state = scan_state_rx.recv().await;
1142 }
1143 }
1144 }
1145
1146 fn is_scanning(&self) -> bool {
1147 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1148 true
1149 } else {
1150 false
1151 }
1152 }
1153
1154 pub fn snapshot(&self) -> Snapshot {
1155 self.snapshot.clone()
1156 }
1157
1158 pub fn abs_path(&self) -> &Path {
1159 self.snapshot.abs_path.as_ref()
1160 }
1161
1162 pub fn contains_abs_path(&self, path: &Path) -> bool {
1163 path.starts_with(&self.snapshot.abs_path)
1164 }
1165
1166 fn absolutize(&self, path: &Path) -> PathBuf {
1167 if path.file_name().is_some() {
1168 self.snapshot.abs_path.join(path)
1169 } else {
1170 self.snapshot.abs_path.to_path_buf()
1171 }
1172 }
1173
1174 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1175 let handle = cx.handle();
1176 let path = Arc::from(path);
1177 let worktree_path = self.abs_path.clone();
1178 let abs_path = self.absolutize(&path);
1179 let background_snapshot = self.background_snapshot.clone();
1180 let fs = self.fs.clone();
1181 cx.spawn(|this, mut cx| async move {
1182 let text = fs.load(&abs_path).await?;
1183 // Eagerly populate the snapshot with an updated entry for the loaded file
1184 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1185 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1186 Ok((
1187 File {
1188 entry_id: Some(entry.id),
1189 worktree: handle,
1190 worktree_path,
1191 path: entry.path,
1192 mtime: entry.mtime,
1193 is_local: true,
1194 },
1195 text,
1196 ))
1197 })
1198 }
1199
1200 pub fn save_buffer_as(
1201 &self,
1202 buffer: ModelHandle<Buffer>,
1203 path: impl Into<Arc<Path>>,
1204 text: Rope,
1205 cx: &mut ModelContext<Worktree>,
1206 ) -> Task<Result<File>> {
1207 let save = self.save(path, text, cx);
1208 cx.spawn(|this, mut cx| async move {
1209 let entry = save.await?;
1210 this.update(&mut cx, |this, cx| {
1211 let this = this.as_local_mut().unwrap();
1212 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1213 Ok(File {
1214 entry_id: Some(entry.id),
1215 worktree: cx.handle(),
1216 worktree_path: this.abs_path.clone(),
1217 path: entry.path,
1218 mtime: entry.mtime,
1219 is_local: true,
1220 })
1221 })
1222 })
1223 }
1224
1225 fn save(
1226 &self,
1227 path: impl Into<Arc<Path>>,
1228 text: Rope,
1229 cx: &mut ModelContext<Worktree>,
1230 ) -> Task<Result<Entry>> {
1231 let path = path.into();
1232 let abs_path = self.absolutize(&path);
1233 let background_snapshot = self.background_snapshot.clone();
1234 let fs = self.fs.clone();
1235 let save = cx.background().spawn(async move {
1236 fs.save(&abs_path, &text).await?;
1237 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1238 });
1239
1240 cx.spawn(|this, mut cx| async move {
1241 let entry = save.await?;
1242 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1243 Ok(entry)
1244 })
1245 }
1246
1247 pub fn share(&mut self, cx: &mut ModelContext<Worktree>) -> Task<anyhow::Result<()>> {
1248 let snapshot = self.snapshot();
1249 let rpc = self.client.clone();
1250 let project_id = self.project_remote_id;
1251 let worktree_id = cx.model_id() as u64;
1252 cx.spawn(|this, mut cx| async move {
1253 let project_id = project_id.ok_or_else(|| anyhow!("no project id"))?;
1254
1255 let (snapshots_to_send_tx, snapshots_to_send_rx) =
1256 smol::channel::unbounded::<Snapshot>();
1257 cx.background()
1258 .spawn({
1259 let rpc = rpc.clone();
1260 async move {
1261 let mut prev_snapshot = snapshot;
1262 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1263 let message = snapshot.build_update(
1264 &prev_snapshot,
1265 project_id,
1266 worktree_id,
1267 false,
1268 );
1269 match rpc.send(message).await {
1270 Ok(()) => prev_snapshot = snapshot,
1271 Err(err) => log::error!("error sending snapshot diff {}", err),
1272 }
1273 }
1274 }
1275 })
1276 .detach();
1277
1278 this.update(&mut cx, |worktree, _| {
1279 let worktree = worktree.as_local_mut().unwrap();
1280 worktree.share = Some(ShareState {
1281 snapshots_tx: snapshots_to_send_tx,
1282 });
1283 });
1284
1285 Ok(())
1286 })
1287 }
1288
1289 pub fn to_proto(&self, cx: &mut ModelContext<Worktree>) -> proto::Worktree {
1290 let id = cx.model_id() as u64;
1291 let snapshot = self.snapshot();
1292 let root_name = self.root_name.clone();
1293 proto::Worktree {
1294 id,
1295 root_name,
1296 entries: snapshot
1297 .entries_by_path
1298 .cursor::<()>()
1299 .filter(|e| !e.is_ignored)
1300 .map(Into::into)
1301 .collect(),
1302 }
1303 }
1304}
1305
1306fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1307 let contents = smol::block_on(fs.load(&abs_path))?;
1308 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1309 let mut builder = GitignoreBuilder::new(parent);
1310 for line in contents.lines() {
1311 builder.add_line(Some(abs_path.into()), line)?;
1312 }
1313 Ok(builder.build()?)
1314}
1315
1316impl Deref for Worktree {
1317 type Target = Snapshot;
1318
1319 fn deref(&self) -> &Self::Target {
1320 match self {
1321 Worktree::Local(worktree) => &worktree.snapshot,
1322 Worktree::Remote(worktree) => &worktree.snapshot,
1323 }
1324 }
1325}
1326
1327impl Deref for LocalWorktree {
1328 type Target = Snapshot;
1329
1330 fn deref(&self) -> &Self::Target {
1331 &self.snapshot
1332 }
1333}
1334
1335impl fmt::Debug for LocalWorktree {
1336 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1337 self.snapshot.fmt(f)
1338 }
1339}
1340
1341impl RemoteWorktree {
1342 fn get_open_buffer(
1343 &mut self,
1344 path: &Path,
1345 cx: &mut ModelContext<Worktree>,
1346 ) -> Option<ModelHandle<Buffer>> {
1347 let handle = cx.handle();
1348 let mut existing_buffer = None;
1349 self.open_buffers.retain(|_buffer_id, buffer| {
1350 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1351 if let Some(file) = buffer.read(cx.as_ref()).file() {
1352 if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1353 existing_buffer = Some(buffer);
1354 }
1355 }
1356 true
1357 } else {
1358 false
1359 }
1360 });
1361 existing_buffer
1362 }
1363
1364 fn open_buffer(
1365 &mut self,
1366 path: &Path,
1367 cx: &mut ModelContext<Worktree>,
1368 ) -> Task<Result<ModelHandle<Buffer>>> {
1369 let rpc = self.client.clone();
1370 let replica_id = self.replica_id;
1371 let project_id = self.project_remote_id;
1372 let remote_worktree_id = self.remote_id;
1373 let root_path = self.snapshot.abs_path.clone();
1374 let path: Arc<Path> = Arc::from(path);
1375 let path_string = path.to_string_lossy().to_string();
1376 cx.spawn_weak(move |this, mut cx| async move {
1377 let entry = this
1378 .upgrade(&cx)
1379 .ok_or_else(|| anyhow!("worktree was closed"))?
1380 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1381 .ok_or_else(|| anyhow!("file does not exist"))?;
1382 let response = rpc
1383 .request(proto::OpenBuffer {
1384 project_id,
1385 worktree_id: remote_worktree_id as u64,
1386 path: path_string,
1387 })
1388 .await?;
1389
1390 let this = this
1391 .upgrade(&cx)
1392 .ok_or_else(|| anyhow!("worktree was closed"))?;
1393 let file = File {
1394 entry_id: Some(entry.id),
1395 worktree: this.clone(),
1396 worktree_path: root_path,
1397 path: entry.path,
1398 mtime: entry.mtime,
1399 is_local: false,
1400 };
1401 let language = this.read_with(&cx, |this, _| {
1402 use language::File;
1403 this.languages().select_language(file.full_path()).cloned()
1404 });
1405 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1406 let buffer_id = remote_buffer.id as usize;
1407 let buffer = cx.add_model(|cx| {
1408 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1409 .unwrap()
1410 .with_language(language, None, cx)
1411 });
1412 this.update(&mut cx, move |this, cx| {
1413 let this = this.as_remote_mut().unwrap();
1414 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1415 .open_buffers
1416 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1417 {
1418 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1419 }
1420 Result::<_, anyhow::Error>::Ok(buffer)
1421 })
1422 })
1423 }
1424
1425 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1426 for (_, buffer) in self.open_buffers.drain() {
1427 if let RemoteBuffer::Loaded(buffer) = buffer {
1428 if let Some(buffer) = buffer.upgrade(cx) {
1429 buffer.update(cx, |buffer, cx| buffer.close(cx))
1430 }
1431 }
1432 }
1433 }
1434
1435 fn snapshot(&self) -> Snapshot {
1436 self.snapshot.clone()
1437 }
1438
1439 pub fn update_from_remote(
1440 &mut self,
1441 envelope: TypedEnvelope<proto::UpdateWorktree>,
1442 cx: &mut ModelContext<Worktree>,
1443 ) -> Result<()> {
1444 let mut tx = self.updates_tx.clone();
1445 let payload = envelope.payload.clone();
1446 cx.background()
1447 .spawn(async move {
1448 tx.send(payload).await.expect("receiver runs to completion");
1449 })
1450 .detach();
1451
1452 Ok(())
1453 }
1454
1455 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1456 for (_, buffer) in &self.open_buffers {
1457 if let Some(buffer) = buffer.upgrade(cx) {
1458 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1459 }
1460 }
1461 cx.notify();
1462 }
1463}
1464
1465enum RemoteBuffer {
1466 Operations(Vec<Operation>),
1467 Loaded(WeakModelHandle<Buffer>),
1468}
1469
1470impl RemoteBuffer {
1471 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1472 match self {
1473 Self::Operations(_) => None,
1474 Self::Loaded(buffer) => buffer.upgrade(cx),
1475 }
1476 }
1477}
1478
1479impl Snapshot {
1480 pub fn id(&self) -> usize {
1481 self.id
1482 }
1483
1484 pub fn build_update(
1485 &self,
1486 other: &Self,
1487 project_id: u64,
1488 worktree_id: u64,
1489 include_ignored: bool,
1490 ) -> proto::UpdateWorktree {
1491 let mut updated_entries = Vec::new();
1492 let mut removed_entries = Vec::new();
1493 let mut self_entries = self
1494 .entries_by_id
1495 .cursor::<()>()
1496 .filter(|e| include_ignored || !e.is_ignored)
1497 .peekable();
1498 let mut other_entries = other
1499 .entries_by_id
1500 .cursor::<()>()
1501 .filter(|e| include_ignored || !e.is_ignored)
1502 .peekable();
1503 loop {
1504 match (self_entries.peek(), other_entries.peek()) {
1505 (Some(self_entry), Some(other_entry)) => {
1506 match Ord::cmp(&self_entry.id, &other_entry.id) {
1507 Ordering::Less => {
1508 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1509 updated_entries.push(entry);
1510 self_entries.next();
1511 }
1512 Ordering::Equal => {
1513 if self_entry.scan_id != other_entry.scan_id {
1514 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1515 updated_entries.push(entry);
1516 }
1517
1518 self_entries.next();
1519 other_entries.next();
1520 }
1521 Ordering::Greater => {
1522 removed_entries.push(other_entry.id as u64);
1523 other_entries.next();
1524 }
1525 }
1526 }
1527 (Some(self_entry), None) => {
1528 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1529 updated_entries.push(entry);
1530 self_entries.next();
1531 }
1532 (None, Some(other_entry)) => {
1533 removed_entries.push(other_entry.id as u64);
1534 other_entries.next();
1535 }
1536 (None, None) => break,
1537 }
1538 }
1539
1540 proto::UpdateWorktree {
1541 project_id,
1542 worktree_id,
1543 updated_entries,
1544 removed_entries,
1545 }
1546 }
1547
1548 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1549 self.scan_id += 1;
1550 let scan_id = self.scan_id;
1551
1552 let mut entries_by_path_edits = Vec::new();
1553 let mut entries_by_id_edits = Vec::new();
1554 for entry_id in update.removed_entries {
1555 let entry_id = entry_id as usize;
1556 let entry = self
1557 .entry_for_id(entry_id)
1558 .ok_or_else(|| anyhow!("unknown entry"))?;
1559 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1560 entries_by_id_edits.push(Edit::Remove(entry.id));
1561 }
1562
1563 for entry in update.updated_entries {
1564 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1565 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1566 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1567 }
1568 entries_by_id_edits.push(Edit::Insert(PathEntry {
1569 id: entry.id,
1570 path: entry.path.clone(),
1571 is_ignored: entry.is_ignored,
1572 scan_id,
1573 }));
1574 entries_by_path_edits.push(Edit::Insert(entry));
1575 }
1576
1577 self.entries_by_path.edit(entries_by_path_edits, &());
1578 self.entries_by_id.edit(entries_by_id_edits, &());
1579
1580 Ok(())
1581 }
1582
1583 pub fn file_count(&self) -> usize {
1584 self.entries_by_path.summary().file_count
1585 }
1586
1587 pub fn visible_file_count(&self) -> usize {
1588 self.entries_by_path.summary().visible_file_count
1589 }
1590
1591 fn traverse_from_offset(
1592 &self,
1593 include_dirs: bool,
1594 include_ignored: bool,
1595 start_offset: usize,
1596 ) -> Traversal {
1597 let mut cursor = self.entries_by_path.cursor();
1598 cursor.seek(
1599 &TraversalTarget::Count {
1600 count: start_offset,
1601 include_dirs,
1602 include_ignored,
1603 },
1604 Bias::Right,
1605 &(),
1606 );
1607 Traversal {
1608 cursor,
1609 include_dirs,
1610 include_ignored,
1611 }
1612 }
1613
1614 fn traverse_from_path(
1615 &self,
1616 include_dirs: bool,
1617 include_ignored: bool,
1618 path: &Path,
1619 ) -> Traversal {
1620 let mut cursor = self.entries_by_path.cursor();
1621 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1622 Traversal {
1623 cursor,
1624 include_dirs,
1625 include_ignored,
1626 }
1627 }
1628
1629 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1630 self.traverse_from_offset(false, include_ignored, start)
1631 }
1632
1633 pub fn entries(&self, include_ignored: bool) -> Traversal {
1634 self.traverse_from_offset(true, include_ignored, 0)
1635 }
1636
1637 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1638 let empty_path = Path::new("");
1639 self.entries_by_path
1640 .cursor::<()>()
1641 .filter(move |entry| entry.path.as_ref() != empty_path)
1642 .map(|entry| &entry.path)
1643 }
1644
1645 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1646 let mut cursor = self.entries_by_path.cursor();
1647 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1648 let traversal = Traversal {
1649 cursor,
1650 include_dirs: true,
1651 include_ignored: true,
1652 };
1653 ChildEntriesIter {
1654 traversal,
1655 parent_path,
1656 }
1657 }
1658
1659 pub fn root_entry(&self) -> Option<&Entry> {
1660 self.entry_for_path("")
1661 }
1662
1663 pub fn root_name(&self) -> &str {
1664 &self.root_name
1665 }
1666
1667 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1668 let path = path.as_ref();
1669 self.traverse_from_path(true, true, path)
1670 .entry()
1671 .and_then(|entry| {
1672 if entry.path.as_ref() == path {
1673 Some(entry)
1674 } else {
1675 None
1676 }
1677 })
1678 }
1679
1680 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1681 let entry = self.entries_by_id.get(&id, &())?;
1682 self.entry_for_path(&entry.path)
1683 }
1684
1685 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1686 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1687 }
1688
1689 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1690 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1691 let abs_path = self.abs_path.join(&entry.path);
1692 match build_gitignore(&abs_path, fs) {
1693 Ok(ignore) => {
1694 let ignore_dir_path = entry.path.parent().unwrap();
1695 self.ignores
1696 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1697 }
1698 Err(error) => {
1699 log::error!(
1700 "error loading .gitignore file {:?} - {:?}",
1701 &entry.path,
1702 error
1703 );
1704 }
1705 }
1706 }
1707
1708 self.reuse_entry_id(&mut entry);
1709 self.entries_by_path.insert_or_replace(entry.clone(), &());
1710 self.entries_by_id.insert_or_replace(
1711 PathEntry {
1712 id: entry.id,
1713 path: entry.path.clone(),
1714 is_ignored: entry.is_ignored,
1715 scan_id: self.scan_id,
1716 },
1717 &(),
1718 );
1719 entry
1720 }
1721
1722 fn populate_dir(
1723 &mut self,
1724 parent_path: Arc<Path>,
1725 entries: impl IntoIterator<Item = Entry>,
1726 ignore: Option<Arc<Gitignore>>,
1727 ) {
1728 let mut parent_entry = self
1729 .entries_by_path
1730 .get(&PathKey(parent_path.clone()), &())
1731 .unwrap()
1732 .clone();
1733 if let Some(ignore) = ignore {
1734 self.ignores.insert(parent_path, (ignore, self.scan_id));
1735 }
1736 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1737 parent_entry.kind = EntryKind::Dir;
1738 } else {
1739 unreachable!();
1740 }
1741
1742 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1743 let mut entries_by_id_edits = Vec::new();
1744
1745 for mut entry in entries {
1746 self.reuse_entry_id(&mut entry);
1747 entries_by_id_edits.push(Edit::Insert(PathEntry {
1748 id: entry.id,
1749 path: entry.path.clone(),
1750 is_ignored: entry.is_ignored,
1751 scan_id: self.scan_id,
1752 }));
1753 entries_by_path_edits.push(Edit::Insert(entry));
1754 }
1755
1756 self.entries_by_path.edit(entries_by_path_edits, &());
1757 self.entries_by_id.edit(entries_by_id_edits, &());
1758 }
1759
1760 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1761 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1762 entry.id = removed_entry_id;
1763 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1764 entry.id = existing_entry.id;
1765 }
1766 }
1767
1768 fn remove_path(&mut self, path: &Path) {
1769 let mut new_entries;
1770 let removed_entries;
1771 {
1772 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1773 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1774 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1775 new_entries.push_tree(cursor.suffix(&()), &());
1776 }
1777 self.entries_by_path = new_entries;
1778
1779 let mut entries_by_id_edits = Vec::new();
1780 for entry in removed_entries.cursor::<()>() {
1781 let removed_entry_id = self
1782 .removed_entry_ids
1783 .entry(entry.inode)
1784 .or_insert(entry.id);
1785 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1786 entries_by_id_edits.push(Edit::Remove(entry.id));
1787 }
1788 self.entries_by_id.edit(entries_by_id_edits, &());
1789
1790 if path.file_name() == Some(&GITIGNORE) {
1791 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1792 *scan_id = self.scan_id;
1793 }
1794 }
1795 }
1796
1797 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1798 let mut new_ignores = Vec::new();
1799 for ancestor in path.ancestors().skip(1) {
1800 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1801 new_ignores.push((ancestor, Some(ignore.clone())));
1802 } else {
1803 new_ignores.push((ancestor, None));
1804 }
1805 }
1806
1807 let mut ignore_stack = IgnoreStack::none();
1808 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1809 if ignore_stack.is_path_ignored(&parent_path, true) {
1810 ignore_stack = IgnoreStack::all();
1811 break;
1812 } else if let Some(ignore) = ignore {
1813 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1814 }
1815 }
1816
1817 if ignore_stack.is_path_ignored(path, is_dir) {
1818 ignore_stack = IgnoreStack::all();
1819 }
1820
1821 ignore_stack
1822 }
1823}
1824
1825impl fmt::Debug for Snapshot {
1826 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1827 for entry in self.entries_by_path.cursor::<()>() {
1828 for _ in entry.path.ancestors().skip(1) {
1829 write!(f, " ")?;
1830 }
1831 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
1832 }
1833 Ok(())
1834 }
1835}
1836
1837#[derive(Clone, PartialEq)]
1838pub struct File {
1839 entry_id: Option<usize>,
1840 worktree: ModelHandle<Worktree>,
1841 worktree_path: Arc<Path>,
1842 pub path: Arc<Path>,
1843 pub mtime: SystemTime,
1844 is_local: bool,
1845}
1846
1847impl language::File for File {
1848 fn worktree_id(&self) -> usize {
1849 self.worktree.id()
1850 }
1851
1852 fn entry_id(&self) -> Option<usize> {
1853 self.entry_id
1854 }
1855
1856 fn mtime(&self) -> SystemTime {
1857 self.mtime
1858 }
1859
1860 fn path(&self) -> &Arc<Path> {
1861 &self.path
1862 }
1863
1864 fn abs_path(&self) -> Option<PathBuf> {
1865 if self.is_local {
1866 Some(self.worktree_path.join(&self.path))
1867 } else {
1868 None
1869 }
1870 }
1871
1872 fn full_path(&self) -> PathBuf {
1873 let mut full_path = PathBuf::new();
1874 if let Some(worktree_name) = self.worktree_path.file_name() {
1875 full_path.push(worktree_name);
1876 }
1877 full_path.push(&self.path);
1878 full_path
1879 }
1880
1881 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1882 /// of its worktree, then this method will return the name of the worktree itself.
1883 fn file_name<'a>(&'a self) -> Option<OsString> {
1884 self.path
1885 .file_name()
1886 .or_else(|| self.worktree_path.file_name())
1887 .map(Into::into)
1888 }
1889
1890 fn is_deleted(&self) -> bool {
1891 self.entry_id.is_none()
1892 }
1893
1894 fn save(
1895 &self,
1896 buffer_id: u64,
1897 text: Rope,
1898 version: clock::Global,
1899 cx: &mut MutableAppContext,
1900 ) -> Task<Result<(clock::Global, SystemTime)>> {
1901 let worktree_id = self.worktree.id() as u64;
1902 self.worktree.update(cx, |worktree, cx| match worktree {
1903 Worktree::Local(worktree) => {
1904 let rpc = worktree.client.clone();
1905 let project_id = worktree.project_remote_id;
1906 let save = worktree.save(self.path.clone(), text, cx);
1907 cx.background().spawn(async move {
1908 let entry = save.await?;
1909 if let Some(project_id) = project_id {
1910 rpc.send(proto::BufferSaved {
1911 project_id,
1912 worktree_id,
1913 buffer_id,
1914 version: (&version).into(),
1915 mtime: Some(entry.mtime.into()),
1916 })
1917 .await?;
1918 }
1919 Ok((version, entry.mtime))
1920 })
1921 }
1922 Worktree::Remote(worktree) => {
1923 let rpc = worktree.client.clone();
1924 let project_id = worktree.project_remote_id;
1925 cx.foreground().spawn(async move {
1926 let response = rpc
1927 .request(proto::SaveBuffer {
1928 project_id,
1929 worktree_id,
1930 buffer_id,
1931 })
1932 .await?;
1933 let version = response.version.try_into()?;
1934 let mtime = response
1935 .mtime
1936 .ok_or_else(|| anyhow!("missing mtime"))?
1937 .into();
1938 Ok((version, mtime))
1939 })
1940 }
1941 })
1942 }
1943
1944 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
1945 let worktree = self.worktree.read(cx).as_local()?;
1946 let abs_path = worktree.absolutize(&self.path);
1947 let fs = worktree.fs.clone();
1948 Some(
1949 cx.background()
1950 .spawn(async move { fs.load(&abs_path).await }),
1951 )
1952 }
1953
1954 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1955 self.worktree.update(cx, |worktree, cx| {
1956 worktree.send_buffer_update(buffer_id, operation, cx);
1957 });
1958 }
1959
1960 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1961 let worktree_id = self.worktree.id() as u64;
1962 self.worktree.update(cx, |worktree, cx| {
1963 if let Worktree::Remote(worktree) = worktree {
1964 let project_id = worktree.project_remote_id;
1965 let rpc = worktree.client.clone();
1966 cx.background()
1967 .spawn(async move {
1968 if let Err(error) = rpc
1969 .send(proto::CloseBuffer {
1970 project_id,
1971 worktree_id,
1972 buffer_id,
1973 })
1974 .await
1975 {
1976 log::error!("error closing remote buffer: {}", error);
1977 }
1978 })
1979 .detach();
1980 }
1981 });
1982 }
1983
1984 fn boxed_clone(&self) -> Box<dyn language::File> {
1985 Box::new(self.clone())
1986 }
1987
1988 fn as_any(&self) -> &dyn Any {
1989 self
1990 }
1991}
1992
1993#[derive(Clone, Debug)]
1994pub struct Entry {
1995 pub id: usize,
1996 pub kind: EntryKind,
1997 pub path: Arc<Path>,
1998 pub inode: u64,
1999 pub mtime: SystemTime,
2000 pub is_symlink: bool,
2001 pub is_ignored: bool,
2002}
2003
2004#[derive(Clone, Debug)]
2005pub enum EntryKind {
2006 PendingDir,
2007 Dir,
2008 File(CharBag),
2009}
2010
2011impl Entry {
2012 fn new(
2013 path: Arc<Path>,
2014 metadata: &fs::Metadata,
2015 next_entry_id: &AtomicUsize,
2016 root_char_bag: CharBag,
2017 ) -> Self {
2018 Self {
2019 id: next_entry_id.fetch_add(1, SeqCst),
2020 kind: if metadata.is_dir {
2021 EntryKind::PendingDir
2022 } else {
2023 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2024 },
2025 path,
2026 inode: metadata.inode,
2027 mtime: metadata.mtime,
2028 is_symlink: metadata.is_symlink,
2029 is_ignored: false,
2030 }
2031 }
2032
2033 pub fn is_dir(&self) -> bool {
2034 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2035 }
2036
2037 pub fn is_file(&self) -> bool {
2038 matches!(self.kind, EntryKind::File(_))
2039 }
2040}
2041
2042impl sum_tree::Item for Entry {
2043 type Summary = EntrySummary;
2044
2045 fn summary(&self) -> Self::Summary {
2046 let visible_count = if self.is_ignored { 0 } else { 1 };
2047 let file_count;
2048 let visible_file_count;
2049 if self.is_file() {
2050 file_count = 1;
2051 visible_file_count = visible_count;
2052 } else {
2053 file_count = 0;
2054 visible_file_count = 0;
2055 }
2056
2057 EntrySummary {
2058 max_path: self.path.clone(),
2059 count: 1,
2060 visible_count,
2061 file_count,
2062 visible_file_count,
2063 }
2064 }
2065}
2066
2067impl sum_tree::KeyedItem for Entry {
2068 type Key = PathKey;
2069
2070 fn key(&self) -> Self::Key {
2071 PathKey(self.path.clone())
2072 }
2073}
2074
2075#[derive(Clone, Debug)]
2076pub struct EntrySummary {
2077 max_path: Arc<Path>,
2078 count: usize,
2079 visible_count: usize,
2080 file_count: usize,
2081 visible_file_count: usize,
2082}
2083
2084impl Default for EntrySummary {
2085 fn default() -> Self {
2086 Self {
2087 max_path: Arc::from(Path::new("")),
2088 count: 0,
2089 visible_count: 0,
2090 file_count: 0,
2091 visible_file_count: 0,
2092 }
2093 }
2094}
2095
2096impl sum_tree::Summary for EntrySummary {
2097 type Context = ();
2098
2099 fn add_summary(&mut self, rhs: &Self, _: &()) {
2100 self.max_path = rhs.max_path.clone();
2101 self.visible_count += rhs.visible_count;
2102 self.file_count += rhs.file_count;
2103 self.visible_file_count += rhs.visible_file_count;
2104 }
2105}
2106
2107#[derive(Clone, Debug)]
2108struct PathEntry {
2109 id: usize,
2110 path: Arc<Path>,
2111 is_ignored: bool,
2112 scan_id: usize,
2113}
2114
2115impl sum_tree::Item for PathEntry {
2116 type Summary = PathEntrySummary;
2117
2118 fn summary(&self) -> Self::Summary {
2119 PathEntrySummary { max_id: self.id }
2120 }
2121}
2122
2123impl sum_tree::KeyedItem for PathEntry {
2124 type Key = usize;
2125
2126 fn key(&self) -> Self::Key {
2127 self.id
2128 }
2129}
2130
2131#[derive(Clone, Debug, Default)]
2132struct PathEntrySummary {
2133 max_id: usize,
2134}
2135
2136impl sum_tree::Summary for PathEntrySummary {
2137 type Context = ();
2138
2139 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2140 self.max_id = summary.max_id;
2141 }
2142}
2143
2144impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2145 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2146 *self = summary.max_id;
2147 }
2148}
2149
2150#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2151pub struct PathKey(Arc<Path>);
2152
2153impl Default for PathKey {
2154 fn default() -> Self {
2155 Self(Path::new("").into())
2156 }
2157}
2158
2159impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2160 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2161 self.0 = summary.max_path.clone();
2162 }
2163}
2164
2165struct BackgroundScanner {
2166 fs: Arc<dyn Fs>,
2167 snapshot: Arc<Mutex<Snapshot>>,
2168 notify: Sender<ScanState>,
2169 executor: Arc<executor::Background>,
2170}
2171
2172impl BackgroundScanner {
2173 fn new(
2174 snapshot: Arc<Mutex<Snapshot>>,
2175 notify: Sender<ScanState>,
2176 fs: Arc<dyn Fs>,
2177 executor: Arc<executor::Background>,
2178 ) -> Self {
2179 Self {
2180 fs,
2181 snapshot,
2182 notify,
2183 executor,
2184 }
2185 }
2186
2187 fn abs_path(&self) -> Arc<Path> {
2188 self.snapshot.lock().abs_path.clone()
2189 }
2190
2191 fn snapshot(&self) -> Snapshot {
2192 self.snapshot.lock().clone()
2193 }
2194
2195 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2196 if self.notify.send(ScanState::Scanning).await.is_err() {
2197 return;
2198 }
2199
2200 if let Err(err) = self.scan_dirs().await {
2201 if self
2202 .notify
2203 .send(ScanState::Err(Arc::new(err)))
2204 .await
2205 .is_err()
2206 {
2207 return;
2208 }
2209 }
2210
2211 if self.notify.send(ScanState::Idle).await.is_err() {
2212 return;
2213 }
2214
2215 futures::pin_mut!(events_rx);
2216 while let Some(events) = events_rx.next().await {
2217 if self.notify.send(ScanState::Scanning).await.is_err() {
2218 break;
2219 }
2220
2221 if !self.process_events(events).await {
2222 break;
2223 }
2224
2225 if self.notify.send(ScanState::Idle).await.is_err() {
2226 break;
2227 }
2228 }
2229 }
2230
2231 async fn scan_dirs(&mut self) -> Result<()> {
2232 let root_char_bag;
2233 let next_entry_id;
2234 let is_dir;
2235 {
2236 let snapshot = self.snapshot.lock();
2237 root_char_bag = snapshot.root_char_bag;
2238 next_entry_id = snapshot.next_entry_id.clone();
2239 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2240 };
2241
2242 if is_dir {
2243 let path: Arc<Path> = Arc::from(Path::new(""));
2244 let abs_path = self.abs_path();
2245 let (tx, rx) = channel::unbounded();
2246 tx.send(ScanJob {
2247 abs_path: abs_path.to_path_buf(),
2248 path,
2249 ignore_stack: IgnoreStack::none(),
2250 scan_queue: tx.clone(),
2251 })
2252 .await
2253 .unwrap();
2254 drop(tx);
2255
2256 self.executor
2257 .scoped(|scope| {
2258 for _ in 0..self.executor.num_cpus() {
2259 scope.spawn(async {
2260 while let Ok(job) = rx.recv().await {
2261 if let Err(err) = self
2262 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2263 .await
2264 {
2265 log::error!("error scanning {:?}: {}", job.abs_path, err);
2266 }
2267 }
2268 });
2269 }
2270 })
2271 .await;
2272 }
2273
2274 Ok(())
2275 }
2276
2277 async fn scan_dir(
2278 &self,
2279 root_char_bag: CharBag,
2280 next_entry_id: Arc<AtomicUsize>,
2281 job: &ScanJob,
2282 ) -> Result<()> {
2283 let mut new_entries: Vec<Entry> = Vec::new();
2284 let mut new_jobs: Vec<ScanJob> = Vec::new();
2285 let mut ignore_stack = job.ignore_stack.clone();
2286 let mut new_ignore = None;
2287
2288 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2289 while let Some(child_abs_path) = child_paths.next().await {
2290 let child_abs_path = match child_abs_path {
2291 Ok(child_abs_path) => child_abs_path,
2292 Err(error) => {
2293 log::error!("error processing entry {:?}", error);
2294 continue;
2295 }
2296 };
2297 let child_name = child_abs_path.file_name().unwrap();
2298 let child_path: Arc<Path> = job.path.join(child_name).into();
2299 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2300 Some(metadata) => metadata,
2301 None => continue,
2302 };
2303
2304 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2305 if child_name == *GITIGNORE {
2306 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2307 Ok(ignore) => {
2308 let ignore = Arc::new(ignore);
2309 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2310 new_ignore = Some(ignore);
2311 }
2312 Err(error) => {
2313 log::error!(
2314 "error loading .gitignore file {:?} - {:?}",
2315 child_name,
2316 error
2317 );
2318 }
2319 }
2320
2321 // Update ignore status of any child entries we've already processed to reflect the
2322 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2323 // there should rarely be too numerous. Update the ignore stack associated with any
2324 // new jobs as well.
2325 let mut new_jobs = new_jobs.iter_mut();
2326 for entry in &mut new_entries {
2327 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2328 if entry.is_dir() {
2329 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2330 IgnoreStack::all()
2331 } else {
2332 ignore_stack.clone()
2333 };
2334 }
2335 }
2336 }
2337
2338 let mut child_entry = Entry::new(
2339 child_path.clone(),
2340 &child_metadata,
2341 &next_entry_id,
2342 root_char_bag,
2343 );
2344
2345 if child_metadata.is_dir {
2346 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2347 child_entry.is_ignored = is_ignored;
2348 new_entries.push(child_entry);
2349 new_jobs.push(ScanJob {
2350 abs_path: child_abs_path,
2351 path: child_path,
2352 ignore_stack: if is_ignored {
2353 IgnoreStack::all()
2354 } else {
2355 ignore_stack.clone()
2356 },
2357 scan_queue: job.scan_queue.clone(),
2358 });
2359 } else {
2360 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2361 new_entries.push(child_entry);
2362 };
2363 }
2364
2365 self.snapshot
2366 .lock()
2367 .populate_dir(job.path.clone(), new_entries, new_ignore);
2368 for new_job in new_jobs {
2369 job.scan_queue.send(new_job).await.unwrap();
2370 }
2371
2372 Ok(())
2373 }
2374
2375 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2376 let mut snapshot = self.snapshot();
2377 snapshot.scan_id += 1;
2378
2379 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2380 abs_path
2381 } else {
2382 return false;
2383 };
2384 let root_char_bag = snapshot.root_char_bag;
2385 let next_entry_id = snapshot.next_entry_id.clone();
2386
2387 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2388 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2389
2390 for event in &events {
2391 match event.path.strip_prefix(&root_abs_path) {
2392 Ok(path) => snapshot.remove_path(&path),
2393 Err(_) => {
2394 log::error!(
2395 "unexpected event {:?} for root path {:?}",
2396 event.path,
2397 root_abs_path
2398 );
2399 continue;
2400 }
2401 }
2402 }
2403
2404 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2405 for event in events {
2406 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2407 Ok(path) => Arc::from(path.to_path_buf()),
2408 Err(_) => {
2409 log::error!(
2410 "unexpected event {:?} for root path {:?}",
2411 event.path,
2412 root_abs_path
2413 );
2414 continue;
2415 }
2416 };
2417
2418 match self.fs.metadata(&event.path).await {
2419 Ok(Some(metadata)) => {
2420 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2421 let mut fs_entry = Entry::new(
2422 path.clone(),
2423 &metadata,
2424 snapshot.next_entry_id.as_ref(),
2425 snapshot.root_char_bag,
2426 );
2427 fs_entry.is_ignored = ignore_stack.is_all();
2428 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2429 if metadata.is_dir {
2430 scan_queue_tx
2431 .send(ScanJob {
2432 abs_path: event.path,
2433 path,
2434 ignore_stack,
2435 scan_queue: scan_queue_tx.clone(),
2436 })
2437 .await
2438 .unwrap();
2439 }
2440 }
2441 Ok(None) => {}
2442 Err(err) => {
2443 // TODO - create a special 'error' entry in the entries tree to mark this
2444 log::error!("error reading file on event {:?}", err);
2445 }
2446 }
2447 }
2448
2449 *self.snapshot.lock() = snapshot;
2450
2451 // Scan any directories that were created as part of this event batch.
2452 drop(scan_queue_tx);
2453 self.executor
2454 .scoped(|scope| {
2455 for _ in 0..self.executor.num_cpus() {
2456 scope.spawn(async {
2457 while let Ok(job) = scan_queue_rx.recv().await {
2458 if let Err(err) = self
2459 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2460 .await
2461 {
2462 log::error!("error scanning {:?}: {}", job.abs_path, err);
2463 }
2464 }
2465 });
2466 }
2467 })
2468 .await;
2469
2470 // Attempt to detect renames only over a single batch of file-system events.
2471 self.snapshot.lock().removed_entry_ids.clear();
2472
2473 self.update_ignore_statuses().await;
2474 true
2475 }
2476
2477 async fn update_ignore_statuses(&self) {
2478 let mut snapshot = self.snapshot();
2479
2480 let mut ignores_to_update = Vec::new();
2481 let mut ignores_to_delete = Vec::new();
2482 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2483 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2484 ignores_to_update.push(parent_path.clone());
2485 }
2486
2487 let ignore_path = parent_path.join(&*GITIGNORE);
2488 if snapshot.entry_for_path(ignore_path).is_none() {
2489 ignores_to_delete.push(parent_path.clone());
2490 }
2491 }
2492
2493 for parent_path in ignores_to_delete {
2494 snapshot.ignores.remove(&parent_path);
2495 self.snapshot.lock().ignores.remove(&parent_path);
2496 }
2497
2498 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2499 ignores_to_update.sort_unstable();
2500 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2501 while let Some(parent_path) = ignores_to_update.next() {
2502 while ignores_to_update
2503 .peek()
2504 .map_or(false, |p| p.starts_with(&parent_path))
2505 {
2506 ignores_to_update.next().unwrap();
2507 }
2508
2509 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2510 ignore_queue_tx
2511 .send(UpdateIgnoreStatusJob {
2512 path: parent_path,
2513 ignore_stack,
2514 ignore_queue: ignore_queue_tx.clone(),
2515 })
2516 .await
2517 .unwrap();
2518 }
2519 drop(ignore_queue_tx);
2520
2521 self.executor
2522 .scoped(|scope| {
2523 for _ in 0..self.executor.num_cpus() {
2524 scope.spawn(async {
2525 while let Ok(job) = ignore_queue_rx.recv().await {
2526 self.update_ignore_status(job, &snapshot).await;
2527 }
2528 });
2529 }
2530 })
2531 .await;
2532 }
2533
2534 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2535 let mut ignore_stack = job.ignore_stack;
2536 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2537 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2538 }
2539
2540 let mut entries_by_id_edits = Vec::new();
2541 let mut entries_by_path_edits = Vec::new();
2542 for mut entry in snapshot.child_entries(&job.path).cloned() {
2543 let was_ignored = entry.is_ignored;
2544 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2545 if entry.is_dir() {
2546 let child_ignore_stack = if entry.is_ignored {
2547 IgnoreStack::all()
2548 } else {
2549 ignore_stack.clone()
2550 };
2551 job.ignore_queue
2552 .send(UpdateIgnoreStatusJob {
2553 path: entry.path.clone(),
2554 ignore_stack: child_ignore_stack,
2555 ignore_queue: job.ignore_queue.clone(),
2556 })
2557 .await
2558 .unwrap();
2559 }
2560
2561 if entry.is_ignored != was_ignored {
2562 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2563 path_entry.scan_id = snapshot.scan_id;
2564 path_entry.is_ignored = entry.is_ignored;
2565 entries_by_id_edits.push(Edit::Insert(path_entry));
2566 entries_by_path_edits.push(Edit::Insert(entry));
2567 }
2568 }
2569
2570 let mut snapshot = self.snapshot.lock();
2571 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2572 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2573 }
2574}
2575
2576async fn refresh_entry(
2577 fs: &dyn Fs,
2578 snapshot: &Mutex<Snapshot>,
2579 path: Arc<Path>,
2580 abs_path: &Path,
2581) -> Result<Entry> {
2582 let root_char_bag;
2583 let next_entry_id;
2584 {
2585 let snapshot = snapshot.lock();
2586 root_char_bag = snapshot.root_char_bag;
2587 next_entry_id = snapshot.next_entry_id.clone();
2588 }
2589 let entry = Entry::new(
2590 path,
2591 &fs.metadata(abs_path)
2592 .await?
2593 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2594 &next_entry_id,
2595 root_char_bag,
2596 );
2597 Ok(snapshot.lock().insert_entry(entry, fs))
2598}
2599
2600fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2601 let mut result = root_char_bag;
2602 result.extend(
2603 path.to_string_lossy()
2604 .chars()
2605 .map(|c| c.to_ascii_lowercase()),
2606 );
2607 result
2608}
2609
2610struct ScanJob {
2611 abs_path: PathBuf,
2612 path: Arc<Path>,
2613 ignore_stack: Arc<IgnoreStack>,
2614 scan_queue: Sender<ScanJob>,
2615}
2616
2617struct UpdateIgnoreStatusJob {
2618 path: Arc<Path>,
2619 ignore_stack: Arc<IgnoreStack>,
2620 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2621}
2622
2623pub trait WorktreeHandle {
2624 #[cfg(test)]
2625 fn flush_fs_events<'a>(
2626 &self,
2627 cx: &'a gpui::TestAppContext,
2628 ) -> futures::future::LocalBoxFuture<'a, ()>;
2629}
2630
2631impl WorktreeHandle for ModelHandle<Worktree> {
2632 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2633 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2634 // extra directory scans, and emit extra scan-state notifications.
2635 //
2636 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2637 // to ensure that all redundant FS events have already been processed.
2638 #[cfg(test)]
2639 fn flush_fs_events<'a>(
2640 &self,
2641 cx: &'a gpui::TestAppContext,
2642 ) -> futures::future::LocalBoxFuture<'a, ()> {
2643 use smol::future::FutureExt;
2644
2645 let filename = "fs-event-sentinel";
2646 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2647 let tree = self.clone();
2648 async move {
2649 std::fs::write(root_path.join(filename), "").unwrap();
2650 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2651 .await;
2652
2653 std::fs::remove_file(root_path.join(filename)).unwrap();
2654 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2655 .await;
2656
2657 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2658 .await;
2659 }
2660 .boxed_local()
2661 }
2662}
2663
2664#[derive(Clone, Debug)]
2665struct TraversalProgress<'a> {
2666 max_path: &'a Path,
2667 count: usize,
2668 visible_count: usize,
2669 file_count: usize,
2670 visible_file_count: usize,
2671}
2672
2673impl<'a> TraversalProgress<'a> {
2674 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2675 match (include_ignored, include_dirs) {
2676 (true, true) => self.count,
2677 (true, false) => self.file_count,
2678 (false, true) => self.visible_count,
2679 (false, false) => self.visible_file_count,
2680 }
2681 }
2682}
2683
2684impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2685 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2686 self.max_path = summary.max_path.as_ref();
2687 self.count += summary.count;
2688 self.visible_count += summary.visible_count;
2689 self.file_count += summary.file_count;
2690 self.visible_file_count += summary.visible_file_count;
2691 }
2692}
2693
2694impl<'a> Default for TraversalProgress<'a> {
2695 fn default() -> Self {
2696 Self {
2697 max_path: Path::new(""),
2698 count: 0,
2699 visible_count: 0,
2700 file_count: 0,
2701 visible_file_count: 0,
2702 }
2703 }
2704}
2705
2706pub struct Traversal<'a> {
2707 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2708 include_ignored: bool,
2709 include_dirs: bool,
2710}
2711
2712impl<'a> Traversal<'a> {
2713 pub fn advance(&mut self) -> bool {
2714 self.advance_to_offset(self.offset() + 1)
2715 }
2716
2717 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2718 self.cursor.seek_forward(
2719 &TraversalTarget::Count {
2720 count: offset,
2721 include_dirs: self.include_dirs,
2722 include_ignored: self.include_ignored,
2723 },
2724 Bias::Right,
2725 &(),
2726 )
2727 }
2728
2729 pub fn advance_to_sibling(&mut self) -> bool {
2730 while let Some(entry) = self.cursor.item() {
2731 self.cursor.seek_forward(
2732 &TraversalTarget::PathSuccessor(&entry.path),
2733 Bias::Left,
2734 &(),
2735 );
2736 if let Some(entry) = self.cursor.item() {
2737 if (self.include_dirs || !entry.is_dir())
2738 && (self.include_ignored || !entry.is_ignored)
2739 {
2740 return true;
2741 }
2742 }
2743 }
2744 false
2745 }
2746
2747 pub fn entry(&self) -> Option<&'a Entry> {
2748 self.cursor.item()
2749 }
2750
2751 pub fn offset(&self) -> usize {
2752 self.cursor
2753 .start()
2754 .count(self.include_dirs, self.include_ignored)
2755 }
2756}
2757
2758impl<'a> Iterator for Traversal<'a> {
2759 type Item = &'a Entry;
2760
2761 fn next(&mut self) -> Option<Self::Item> {
2762 if let Some(item) = self.entry() {
2763 self.advance();
2764 Some(item)
2765 } else {
2766 None
2767 }
2768 }
2769}
2770
2771#[derive(Debug)]
2772enum TraversalTarget<'a> {
2773 Path(&'a Path),
2774 PathSuccessor(&'a Path),
2775 Count {
2776 count: usize,
2777 include_ignored: bool,
2778 include_dirs: bool,
2779 },
2780}
2781
2782impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2783 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2784 match self {
2785 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2786 TraversalTarget::PathSuccessor(path) => {
2787 if !cursor_location.max_path.starts_with(path) {
2788 Ordering::Equal
2789 } else {
2790 Ordering::Greater
2791 }
2792 }
2793 TraversalTarget::Count {
2794 count,
2795 include_dirs,
2796 include_ignored,
2797 } => Ord::cmp(
2798 count,
2799 &cursor_location.count(*include_dirs, *include_ignored),
2800 ),
2801 }
2802 }
2803}
2804
2805struct ChildEntriesIter<'a> {
2806 parent_path: &'a Path,
2807 traversal: Traversal<'a>,
2808}
2809
2810impl<'a> Iterator for ChildEntriesIter<'a> {
2811 type Item = &'a Entry;
2812
2813 fn next(&mut self) -> Option<Self::Item> {
2814 if let Some(item) = self.traversal.entry() {
2815 if item.path.starts_with(&self.parent_path) {
2816 self.traversal.advance_to_sibling();
2817 return Some(item);
2818 }
2819 }
2820 None
2821 }
2822}
2823
2824impl<'a> From<&'a Entry> for proto::Entry {
2825 fn from(entry: &'a Entry) -> Self {
2826 Self {
2827 id: entry.id as u64,
2828 is_dir: entry.is_dir(),
2829 path: entry.path.to_string_lossy().to_string(),
2830 inode: entry.inode,
2831 mtime: Some(entry.mtime.into()),
2832 is_symlink: entry.is_symlink,
2833 is_ignored: entry.is_ignored,
2834 }
2835 }
2836}
2837
2838impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2839 type Error = anyhow::Error;
2840
2841 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2842 if let Some(mtime) = entry.mtime {
2843 let kind = if entry.is_dir {
2844 EntryKind::Dir
2845 } else {
2846 let mut char_bag = root_char_bag.clone();
2847 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2848 EntryKind::File(char_bag)
2849 };
2850 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2851 Ok(Entry {
2852 id: entry.id as usize,
2853 kind,
2854 path: path.clone(),
2855 inode: entry.inode,
2856 mtime: mtime.into(),
2857 is_symlink: entry.is_symlink,
2858 is_ignored: entry.is_ignored,
2859 })
2860 } else {
2861 Err(anyhow!(
2862 "missing mtime in remote worktree entry {:?}",
2863 entry.path
2864 ))
2865 }
2866 }
2867}
2868
2869trait ToPointUtf16 {
2870 fn to_point_utf16(self) -> PointUtf16;
2871}
2872
2873impl ToPointUtf16 for lsp::Position {
2874 fn to_point_utf16(self) -> PointUtf16 {
2875 PointUtf16::new(self.line, self.character)
2876 }
2877}
2878
2879fn diagnostic_ranges<'a>(
2880 diagnostic: &'a lsp::Diagnostic,
2881 abs_path: &'a Path,
2882) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
2883 diagnostic
2884 .related_information
2885 .iter()
2886 .flatten()
2887 .filter_map(move |info| {
2888 if info.location.uri.to_file_path().ok()? == abs_path {
2889 let info_start = PointUtf16::new(
2890 info.location.range.start.line,
2891 info.location.range.start.character,
2892 );
2893 let info_end = PointUtf16::new(
2894 info.location.range.end.line,
2895 info.location.range.end.character,
2896 );
2897 Some(info_start..info_end)
2898 } else {
2899 None
2900 }
2901 })
2902 .chain(Some(
2903 diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
2904 ))
2905}
2906
2907#[cfg(test)]
2908mod tests {
2909 use super::*;
2910 use crate::fs::FakeFs;
2911 use anyhow::Result;
2912 use client::test::{FakeHttpClient, FakeServer};
2913 use fs::RealFs;
2914 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
2915 use language::{Diagnostic, LanguageConfig};
2916 use lsp::Url;
2917 use rand::prelude::*;
2918 use serde_json::json;
2919 use std::{cell::RefCell, rc::Rc};
2920 use std::{
2921 env,
2922 fmt::Write,
2923 time::{SystemTime, UNIX_EPOCH},
2924 };
2925 use text::Point;
2926 use unindent::Unindent as _;
2927 use util::test::temp_tree;
2928
2929 #[gpui::test]
2930 async fn test_traversal(mut cx: gpui::TestAppContext) {
2931 let fs = FakeFs::new();
2932 fs.insert_tree(
2933 "/root",
2934 json!({
2935 ".gitignore": "a/b\n",
2936 "a": {
2937 "b": "",
2938 "c": "",
2939 }
2940 }),
2941 )
2942 .await;
2943
2944 let client = Client::new();
2945 let http_client = FakeHttpClient::with_404_response();
2946 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2947
2948 let tree = Worktree::open_local(
2949 client,
2950 user_store,
2951 Arc::from(Path::new("/root")),
2952 Arc::new(fs),
2953 Default::default(),
2954 &mut cx.to_async(),
2955 )
2956 .await
2957 .unwrap();
2958 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2959 .await;
2960
2961 tree.read_with(&cx, |tree, _| {
2962 assert_eq!(
2963 tree.entries(false)
2964 .map(|entry| entry.path.as_ref())
2965 .collect::<Vec<_>>(),
2966 vec![
2967 Path::new(""),
2968 Path::new(".gitignore"),
2969 Path::new("a"),
2970 Path::new("a/c"),
2971 ]
2972 );
2973 })
2974 }
2975
2976 #[gpui::test]
2977 async fn test_save_file(mut cx: gpui::TestAppContext) {
2978 let dir = temp_tree(json!({
2979 "file1": "the old contents",
2980 }));
2981
2982 let client = Client::new();
2983 let http_client = FakeHttpClient::with_404_response();
2984 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2985
2986 let tree = Worktree::open_local(
2987 client,
2988 user_store,
2989 dir.path(),
2990 Arc::new(RealFs),
2991 Default::default(),
2992 &mut cx.to_async(),
2993 )
2994 .await
2995 .unwrap();
2996 let buffer = tree
2997 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
2998 .await
2999 .unwrap();
3000 let save = buffer.update(&mut cx, |buffer, cx| {
3001 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3002 buffer.save(cx).unwrap()
3003 });
3004 save.await.unwrap();
3005
3006 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3007 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3008 }
3009
3010 #[gpui::test]
3011 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3012 let dir = temp_tree(json!({
3013 "file1": "the old contents",
3014 }));
3015 let file_path = dir.path().join("file1");
3016
3017 let client = Client::new();
3018 let http_client = FakeHttpClient::with_404_response();
3019 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3020
3021 let tree = Worktree::open_local(
3022 client,
3023 user_store,
3024 file_path.clone(),
3025 Arc::new(RealFs),
3026 Default::default(),
3027 &mut cx.to_async(),
3028 )
3029 .await
3030 .unwrap();
3031 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3032 .await;
3033 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3034
3035 let buffer = tree
3036 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3037 .await
3038 .unwrap();
3039 let save = buffer.update(&mut cx, |buffer, cx| {
3040 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3041 buffer.save(cx).unwrap()
3042 });
3043 save.await.unwrap();
3044
3045 let new_text = std::fs::read_to_string(file_path).unwrap();
3046 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3047 }
3048
3049 // #[gpui::test]
3050 // async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3051 // let dir = temp_tree(json!({
3052 // "a": {
3053 // "file1": "",
3054 // "file2": "",
3055 // "file3": "",
3056 // },
3057 // "b": {
3058 // "c": {
3059 // "file4": "",
3060 // "file5": "",
3061 // }
3062 // }
3063 // }));
3064
3065 // let user_id = 5;
3066 // let mut client = Client::new();
3067 // let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3068 // let user_store = server.build_user_store(client.clone(), &mut cx).await;
3069 // let tree = Worktree::open_local(
3070 // client,
3071 // user_store.clone(),
3072 // dir.path(),
3073 // Arc::new(RealFs),
3074 // Default::default(),
3075 // &mut cx.to_async(),
3076 // )
3077 // .await
3078 // .unwrap();
3079
3080 // let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3081 // let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3082 // async move { buffer.await.unwrap() }
3083 // };
3084 // let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3085 // tree.read_with(cx, |tree, _| {
3086 // tree.entry_for_path(path)
3087 // .expect(&format!("no entry for path {}", path))
3088 // .id
3089 // })
3090 // };
3091
3092 // let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3093 // let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3094 // let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3095 // let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3096
3097 // let file2_id = id_for_path("a/file2", &cx);
3098 // let file3_id = id_for_path("a/file3", &cx);
3099 // let file4_id = id_for_path("b/c/file4", &cx);
3100
3101 // // Wait for the initial scan.
3102 // cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3103 // .await;
3104
3105 // // Create a remote copy of this worktree.
3106 // let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3107 // let worktree_id = 1;
3108 // let proto_message = tree.update(&mut cx, |tree, cx| tree.as_local().unwrap().to_proto(cx));
3109 // let open_worktree = server.receive::<proto::OpenWorktree>().await.unwrap();
3110 // server
3111 // .respond(
3112 // open_worktree.receipt(),
3113 // proto::OpenWorktreeResponse { worktree_id: 1 },
3114 // )
3115 // .await;
3116
3117 // let remote = Worktree::remote(
3118 // proto::JoinWorktreeResponse {
3119 // worktree: Some(proto_message.await),
3120 // replica_id: 1,
3121 // collaborators: Vec::new(),
3122 // },
3123 // Client::new(),
3124 // user_store,
3125 // Default::default(),
3126 // &mut cx.to_async(),
3127 // )
3128 // .await
3129 // .unwrap();
3130
3131 // cx.read(|cx| {
3132 // assert!(!buffer2.read(cx).is_dirty());
3133 // assert!(!buffer3.read(cx).is_dirty());
3134 // assert!(!buffer4.read(cx).is_dirty());
3135 // assert!(!buffer5.read(cx).is_dirty());
3136 // });
3137
3138 // // Rename and delete files and directories.
3139 // tree.flush_fs_events(&cx).await;
3140 // std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3141 // std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3142 // std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3143 // std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3144 // tree.flush_fs_events(&cx).await;
3145
3146 // let expected_paths = vec![
3147 // "a",
3148 // "a/file1",
3149 // "a/file2.new",
3150 // "b",
3151 // "d",
3152 // "d/file3",
3153 // "d/file4",
3154 // ];
3155
3156 // cx.read(|app| {
3157 // assert_eq!(
3158 // tree.read(app)
3159 // .paths()
3160 // .map(|p| p.to_str().unwrap())
3161 // .collect::<Vec<_>>(),
3162 // expected_paths
3163 // );
3164
3165 // assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3166 // assert_eq!(id_for_path("d/file3", &cx), file3_id);
3167 // assert_eq!(id_for_path("d/file4", &cx), file4_id);
3168
3169 // assert_eq!(
3170 // buffer2.read(app).file().unwrap().path().as_ref(),
3171 // Path::new("a/file2.new")
3172 // );
3173 // assert_eq!(
3174 // buffer3.read(app).file().unwrap().path().as_ref(),
3175 // Path::new("d/file3")
3176 // );
3177 // assert_eq!(
3178 // buffer4.read(app).file().unwrap().path().as_ref(),
3179 // Path::new("d/file4")
3180 // );
3181 // assert_eq!(
3182 // buffer5.read(app).file().unwrap().path().as_ref(),
3183 // Path::new("b/c/file5")
3184 // );
3185
3186 // assert!(!buffer2.read(app).file().unwrap().is_deleted());
3187 // assert!(!buffer3.read(app).file().unwrap().is_deleted());
3188 // assert!(!buffer4.read(app).file().unwrap().is_deleted());
3189 // assert!(buffer5.read(app).file().unwrap().is_deleted());
3190 // });
3191
3192 // // Update the remote worktree. Check that it becomes consistent with the
3193 // // local worktree.
3194 // remote.update(&mut cx, |remote, cx| {
3195 // let update_message =
3196 // tree.read(cx)
3197 // .snapshot()
3198 // .build_update(&initial_snapshot, worktree_id, true);
3199 // remote
3200 // .as_remote_mut()
3201 // .unwrap()
3202 // .snapshot
3203 // .apply_update(update_message)
3204 // .unwrap();
3205
3206 // assert_eq!(
3207 // remote
3208 // .paths()
3209 // .map(|p| p.to_str().unwrap())
3210 // .collect::<Vec<_>>(),
3211 // expected_paths
3212 // );
3213 // });
3214 // }
3215
3216 #[gpui::test]
3217 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3218 let dir = temp_tree(json!({
3219 ".git": {},
3220 ".gitignore": "ignored-dir\n",
3221 "tracked-dir": {
3222 "tracked-file1": "tracked contents",
3223 },
3224 "ignored-dir": {
3225 "ignored-file1": "ignored contents",
3226 }
3227 }));
3228
3229 let client = Client::new();
3230 let http_client = FakeHttpClient::with_404_response();
3231 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3232
3233 let tree = Worktree::open_local(
3234 client,
3235 user_store,
3236 dir.path(),
3237 Arc::new(RealFs),
3238 Default::default(),
3239 &mut cx.to_async(),
3240 )
3241 .await
3242 .unwrap();
3243 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3244 .await;
3245 tree.flush_fs_events(&cx).await;
3246 cx.read(|cx| {
3247 let tree = tree.read(cx);
3248 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3249 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3250 assert_eq!(tracked.is_ignored, false);
3251 assert_eq!(ignored.is_ignored, true);
3252 });
3253
3254 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3255 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3256 tree.flush_fs_events(&cx).await;
3257 cx.read(|cx| {
3258 let tree = tree.read(cx);
3259 let dot_git = tree.entry_for_path(".git").unwrap();
3260 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3261 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3262 assert_eq!(tracked.is_ignored, false);
3263 assert_eq!(ignored.is_ignored, true);
3264 assert_eq!(dot_git.is_ignored, true);
3265 });
3266 }
3267
3268 // #[gpui::test]
3269 // async fn test_open_and_share_worktree(mut cx: gpui::TestAppContext) {
3270 // let user_id = 100;
3271 // let mut client = Client::new();
3272 // let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3273 // let user_store = server.build_user_store(client.clone(), &mut cx).await;
3274
3275 // let fs = Arc::new(FakeFs::new());
3276 // fs.insert_tree(
3277 // "/path",
3278 // json!({
3279 // "to": {
3280 // "the-dir": {
3281 // ".zed.toml": r#"collaborators = ["friend-1", "friend-2"]"#,
3282 // "a.txt": "a-contents",
3283 // },
3284 // },
3285 // }),
3286 // )
3287 // .await;
3288
3289 // let worktree = Worktree::open_local(
3290 // client.clone(),
3291 // user_store,
3292 // "/path/to/the-dir".as_ref(),
3293 // fs,
3294 // Default::default(),
3295 // &mut cx.to_async(),
3296 // )
3297 // .await
3298 // .unwrap();
3299
3300 // let open_worktree = server.receive::<proto::OpenWorktree>().await.unwrap();
3301 // assert_eq!(
3302 // open_worktree.payload,
3303 // proto::OpenWorktree {
3304 // root_name: "the-dir".to_string(),
3305 // authorized_logins: vec!["friend-1".to_string(), "friend-2".to_string()],
3306 // }
3307 // );
3308
3309 // server
3310 // .respond(
3311 // open_worktree.receipt(),
3312 // proto::OpenWorktreeResponse { worktree_id: 5 },
3313 // )
3314 // .await;
3315 // let remote_id = worktree
3316 // .update(&mut cx, |tree, _| tree.as_local().unwrap().next_remote_id())
3317 // .await;
3318 // assert_eq!(remote_id, Some(5));
3319
3320 // cx.update(move |_| drop(worktree));
3321 // server.receive::<proto::CloseWorktree>().await.unwrap();
3322 // }
3323
3324 #[gpui::test]
3325 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3326 let user_id = 100;
3327 let mut client = Client::new();
3328 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3329 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3330
3331 let fs = Arc::new(FakeFs::new());
3332 fs.insert_tree(
3333 "/the-dir",
3334 json!({
3335 "a.txt": "a-contents",
3336 "b.txt": "b-contents",
3337 }),
3338 )
3339 .await;
3340
3341 let worktree = Worktree::open_local(
3342 client.clone(),
3343 user_store,
3344 "/the-dir".as_ref(),
3345 fs,
3346 Default::default(),
3347 &mut cx.to_async(),
3348 )
3349 .await
3350 .unwrap();
3351
3352 // Spawn multiple tasks to open paths, repeating some paths.
3353 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3354 (
3355 worktree.open_buffer("a.txt", cx),
3356 worktree.open_buffer("b.txt", cx),
3357 worktree.open_buffer("a.txt", cx),
3358 )
3359 });
3360
3361 let buffer_a_1 = buffer_a_1.await.unwrap();
3362 let buffer_a_2 = buffer_a_2.await.unwrap();
3363 let buffer_b = buffer_b.await.unwrap();
3364 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3365 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3366
3367 // There is only one buffer per path.
3368 let buffer_a_id = buffer_a_1.id();
3369 assert_eq!(buffer_a_2.id(), buffer_a_id);
3370
3371 // Open the same path again while it is still open.
3372 drop(buffer_a_1);
3373 let buffer_a_3 = worktree
3374 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3375 .await
3376 .unwrap();
3377
3378 // There's still only one buffer per path.
3379 assert_eq!(buffer_a_3.id(), buffer_a_id);
3380 }
3381
3382 #[gpui::test]
3383 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3384 use std::fs;
3385
3386 let dir = temp_tree(json!({
3387 "file1": "abc",
3388 "file2": "def",
3389 "file3": "ghi",
3390 }));
3391 let client = Client::new();
3392 let http_client = FakeHttpClient::with_404_response();
3393 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3394
3395 let tree = Worktree::open_local(
3396 client,
3397 user_store,
3398 dir.path(),
3399 Arc::new(RealFs),
3400 Default::default(),
3401 &mut cx.to_async(),
3402 )
3403 .await
3404 .unwrap();
3405 tree.flush_fs_events(&cx).await;
3406 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3407 .await;
3408
3409 let buffer1 = tree
3410 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3411 .await
3412 .unwrap();
3413 let events = Rc::new(RefCell::new(Vec::new()));
3414
3415 // initially, the buffer isn't dirty.
3416 buffer1.update(&mut cx, |buffer, cx| {
3417 cx.subscribe(&buffer1, {
3418 let events = events.clone();
3419 move |_, _, event, _| events.borrow_mut().push(event.clone())
3420 })
3421 .detach();
3422
3423 assert!(!buffer.is_dirty());
3424 assert!(events.borrow().is_empty());
3425
3426 buffer.edit(vec![1..2], "", cx);
3427 });
3428
3429 // after the first edit, the buffer is dirty, and emits a dirtied event.
3430 buffer1.update(&mut cx, |buffer, cx| {
3431 assert!(buffer.text() == "ac");
3432 assert!(buffer.is_dirty());
3433 assert_eq!(
3434 *events.borrow(),
3435 &[language::Event::Edited, language::Event::Dirtied]
3436 );
3437 events.borrow_mut().clear();
3438 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3439 });
3440
3441 // after saving, the buffer is not dirty, and emits a saved event.
3442 buffer1.update(&mut cx, |buffer, cx| {
3443 assert!(!buffer.is_dirty());
3444 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3445 events.borrow_mut().clear();
3446
3447 buffer.edit(vec![1..1], "B", cx);
3448 buffer.edit(vec![2..2], "D", cx);
3449 });
3450
3451 // after editing again, the buffer is dirty, and emits another dirty event.
3452 buffer1.update(&mut cx, |buffer, cx| {
3453 assert!(buffer.text() == "aBDc");
3454 assert!(buffer.is_dirty());
3455 assert_eq!(
3456 *events.borrow(),
3457 &[
3458 language::Event::Edited,
3459 language::Event::Dirtied,
3460 language::Event::Edited,
3461 ],
3462 );
3463 events.borrow_mut().clear();
3464
3465 // TODO - currently, after restoring the buffer to its
3466 // previously-saved state, the is still considered dirty.
3467 buffer.edit([1..3], "", cx);
3468 assert!(buffer.text() == "ac");
3469 assert!(buffer.is_dirty());
3470 });
3471
3472 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3473
3474 // When a file is deleted, the buffer is considered dirty.
3475 let events = Rc::new(RefCell::new(Vec::new()));
3476 let buffer2 = tree
3477 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3478 .await
3479 .unwrap();
3480 buffer2.update(&mut cx, |_, cx| {
3481 cx.subscribe(&buffer2, {
3482 let events = events.clone();
3483 move |_, _, event, _| events.borrow_mut().push(event.clone())
3484 })
3485 .detach();
3486 });
3487
3488 fs::remove_file(dir.path().join("file2")).unwrap();
3489 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3490 assert_eq!(
3491 *events.borrow(),
3492 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3493 );
3494
3495 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3496 let events = Rc::new(RefCell::new(Vec::new()));
3497 let buffer3 = tree
3498 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3499 .await
3500 .unwrap();
3501 buffer3.update(&mut cx, |_, cx| {
3502 cx.subscribe(&buffer3, {
3503 let events = events.clone();
3504 move |_, _, event, _| events.borrow_mut().push(event.clone())
3505 })
3506 .detach();
3507 });
3508
3509 tree.flush_fs_events(&cx).await;
3510 buffer3.update(&mut cx, |buffer, cx| {
3511 buffer.edit(Some(0..0), "x", cx);
3512 });
3513 events.borrow_mut().clear();
3514 fs::remove_file(dir.path().join("file3")).unwrap();
3515 buffer3
3516 .condition(&cx, |_, _| !events.borrow().is_empty())
3517 .await;
3518 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3519 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3520 }
3521
3522 #[gpui::test]
3523 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3524 use std::fs;
3525
3526 let initial_contents = "aaa\nbbbbb\nc\n";
3527 let dir = temp_tree(json!({ "the-file": initial_contents }));
3528 let client = Client::new();
3529 let http_client = FakeHttpClient::with_404_response();
3530 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3531
3532 let tree = Worktree::open_local(
3533 client,
3534 user_store,
3535 dir.path(),
3536 Arc::new(RealFs),
3537 Default::default(),
3538 &mut cx.to_async(),
3539 )
3540 .await
3541 .unwrap();
3542 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3543 .await;
3544
3545 let abs_path = dir.path().join("the-file");
3546 let buffer = tree
3547 .update(&mut cx, |tree, cx| {
3548 tree.open_buffer(Path::new("the-file"), cx)
3549 })
3550 .await
3551 .unwrap();
3552
3553 // TODO
3554 // Add a cursor on each row.
3555 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3556 // assert!(!buffer.is_dirty());
3557 // buffer.add_selection_set(
3558 // &(0..3)
3559 // .map(|row| Selection {
3560 // id: row as usize,
3561 // start: Point::new(row, 1),
3562 // end: Point::new(row, 1),
3563 // reversed: false,
3564 // goal: SelectionGoal::None,
3565 // })
3566 // .collect::<Vec<_>>(),
3567 // cx,
3568 // )
3569 // });
3570
3571 // Change the file on disk, adding two new lines of text, and removing
3572 // one line.
3573 buffer.read_with(&cx, |buffer, _| {
3574 assert!(!buffer.is_dirty());
3575 assert!(!buffer.has_conflict());
3576 });
3577 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3578 fs::write(&abs_path, new_contents).unwrap();
3579
3580 // Because the buffer was not modified, it is reloaded from disk. Its
3581 // contents are edited according to the diff between the old and new
3582 // file contents.
3583 buffer
3584 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3585 .await;
3586
3587 buffer.update(&mut cx, |buffer, _| {
3588 assert_eq!(buffer.text(), new_contents);
3589 assert!(!buffer.is_dirty());
3590 assert!(!buffer.has_conflict());
3591
3592 // TODO
3593 // let cursor_positions = buffer
3594 // .selection_set(selection_set_id)
3595 // .unwrap()
3596 // .selections::<Point>(&*buffer)
3597 // .map(|selection| {
3598 // assert_eq!(selection.start, selection.end);
3599 // selection.start
3600 // })
3601 // .collect::<Vec<_>>();
3602 // assert_eq!(
3603 // cursor_positions,
3604 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3605 // );
3606 });
3607
3608 // Modify the buffer
3609 buffer.update(&mut cx, |buffer, cx| {
3610 buffer.edit(vec![0..0], " ", cx);
3611 assert!(buffer.is_dirty());
3612 assert!(!buffer.has_conflict());
3613 });
3614
3615 // Change the file on disk again, adding blank lines to the beginning.
3616 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3617
3618 // Because the buffer is modified, it doesn't reload from disk, but is
3619 // marked as having a conflict.
3620 buffer
3621 .condition(&cx, |buffer, _| buffer.has_conflict())
3622 .await;
3623 }
3624
3625 #[gpui::test]
3626 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3627 let (language_server_config, mut fake_server) =
3628 LanguageServerConfig::fake(cx.background()).await;
3629 let mut languages = LanguageRegistry::new();
3630 languages.add(Arc::new(Language::new(
3631 LanguageConfig {
3632 name: "Rust".to_string(),
3633 path_suffixes: vec!["rs".to_string()],
3634 language_server: Some(language_server_config),
3635 ..Default::default()
3636 },
3637 Some(tree_sitter_rust::language()),
3638 )));
3639
3640 let dir = temp_tree(json!({
3641 "a.rs": "fn a() { A }",
3642 "b.rs": "const y: i32 = 1",
3643 }));
3644
3645 let client = Client::new();
3646 let http_client = FakeHttpClient::with_404_response();
3647 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3648
3649 let tree = Worktree::open_local(
3650 client,
3651 user_store,
3652 dir.path(),
3653 Arc::new(RealFs),
3654 Arc::new(languages),
3655 &mut cx.to_async(),
3656 )
3657 .await
3658 .unwrap();
3659 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3660 .await;
3661
3662 // Cause worktree to start the fake language server
3663 let _buffer = tree
3664 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3665 .await
3666 .unwrap();
3667
3668 fake_server
3669 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3670 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3671 version: None,
3672 diagnostics: vec![lsp::Diagnostic {
3673 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3674 severity: Some(lsp::DiagnosticSeverity::ERROR),
3675 message: "undefined variable 'A'".to_string(),
3676 ..Default::default()
3677 }],
3678 })
3679 .await;
3680
3681 let buffer = tree
3682 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3683 .await
3684 .unwrap();
3685
3686 buffer.read_with(&cx, |buffer, _| {
3687 let diagnostics = buffer
3688 .snapshot()
3689 .diagnostics_in_range::<_, Point>(0..buffer.len())
3690 .collect::<Vec<_>>();
3691 assert_eq!(
3692 diagnostics,
3693 &[DiagnosticEntry {
3694 range: Point::new(0, 9)..Point::new(0, 10),
3695 diagnostic: Diagnostic {
3696 severity: lsp::DiagnosticSeverity::ERROR,
3697 message: "undefined variable 'A'".to_string(),
3698 group_id: 0,
3699 is_primary: true,
3700 ..Default::default()
3701 }
3702 }]
3703 )
3704 });
3705 }
3706
3707 #[gpui::test]
3708 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3709 let fs = Arc::new(FakeFs::new());
3710 let client = Client::new();
3711 let http_client = FakeHttpClient::with_404_response();
3712 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3713
3714 fs.insert_tree(
3715 "/the-dir",
3716 json!({
3717 "a.rs": "
3718 fn foo(mut v: Vec<usize>) {
3719 for x in &v {
3720 v.push(1);
3721 }
3722 }
3723 "
3724 .unindent(),
3725 }),
3726 )
3727 .await;
3728
3729 let worktree = Worktree::open_local(
3730 client.clone(),
3731 user_store,
3732 "/the-dir".as_ref(),
3733 fs,
3734 Default::default(),
3735 &mut cx.to_async(),
3736 )
3737 .await
3738 .unwrap();
3739
3740 let buffer = worktree
3741 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3742 .await
3743 .unwrap();
3744
3745 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3746 let message = lsp::PublishDiagnosticsParams {
3747 uri: buffer_uri.clone(),
3748 diagnostics: vec![
3749 lsp::Diagnostic {
3750 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3751 severity: Some(DiagnosticSeverity::WARNING),
3752 message: "error 1".to_string(),
3753 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3754 location: lsp::Location {
3755 uri: buffer_uri.clone(),
3756 range: lsp::Range::new(
3757 lsp::Position::new(1, 8),
3758 lsp::Position::new(1, 9),
3759 ),
3760 },
3761 message: "error 1 hint 1".to_string(),
3762 }]),
3763 ..Default::default()
3764 },
3765 lsp::Diagnostic {
3766 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3767 severity: Some(DiagnosticSeverity::HINT),
3768 message: "error 1 hint 1".to_string(),
3769 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3770 location: lsp::Location {
3771 uri: buffer_uri.clone(),
3772 range: lsp::Range::new(
3773 lsp::Position::new(1, 8),
3774 lsp::Position::new(1, 9),
3775 ),
3776 },
3777 message: "original diagnostic".to_string(),
3778 }]),
3779 ..Default::default()
3780 },
3781 lsp::Diagnostic {
3782 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3783 severity: Some(DiagnosticSeverity::ERROR),
3784 message: "error 2".to_string(),
3785 related_information: Some(vec![
3786 lsp::DiagnosticRelatedInformation {
3787 location: lsp::Location {
3788 uri: buffer_uri.clone(),
3789 range: lsp::Range::new(
3790 lsp::Position::new(1, 13),
3791 lsp::Position::new(1, 15),
3792 ),
3793 },
3794 message: "error 2 hint 1".to_string(),
3795 },
3796 lsp::DiagnosticRelatedInformation {
3797 location: lsp::Location {
3798 uri: buffer_uri.clone(),
3799 range: lsp::Range::new(
3800 lsp::Position::new(1, 13),
3801 lsp::Position::new(1, 15),
3802 ),
3803 },
3804 message: "error 2 hint 2".to_string(),
3805 },
3806 ]),
3807 ..Default::default()
3808 },
3809 lsp::Diagnostic {
3810 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3811 severity: Some(DiagnosticSeverity::HINT),
3812 message: "error 2 hint 1".to_string(),
3813 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3814 location: lsp::Location {
3815 uri: buffer_uri.clone(),
3816 range: lsp::Range::new(
3817 lsp::Position::new(2, 8),
3818 lsp::Position::new(2, 17),
3819 ),
3820 },
3821 message: "original diagnostic".to_string(),
3822 }]),
3823 ..Default::default()
3824 },
3825 lsp::Diagnostic {
3826 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3827 severity: Some(DiagnosticSeverity::HINT),
3828 message: "error 2 hint 2".to_string(),
3829 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3830 location: lsp::Location {
3831 uri: buffer_uri.clone(),
3832 range: lsp::Range::new(
3833 lsp::Position::new(2, 8),
3834 lsp::Position::new(2, 17),
3835 ),
3836 },
3837 message: "original diagnostic".to_string(),
3838 }]),
3839 ..Default::default()
3840 },
3841 ],
3842 version: None,
3843 };
3844
3845 worktree
3846 .update(&mut cx, |tree, cx| tree.update_diagnostics(message, cx))
3847 .unwrap();
3848 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3849
3850 assert_eq!(
3851 buffer
3852 .diagnostics_in_range::<_, Point>(0..buffer.len())
3853 .collect::<Vec<_>>(),
3854 &[
3855 DiagnosticEntry {
3856 range: Point::new(1, 8)..Point::new(1, 9),
3857 diagnostic: Diagnostic {
3858 severity: DiagnosticSeverity::WARNING,
3859 message: "error 1".to_string(),
3860 group_id: 0,
3861 is_primary: true,
3862 ..Default::default()
3863 }
3864 },
3865 DiagnosticEntry {
3866 range: Point::new(1, 8)..Point::new(1, 9),
3867 diagnostic: Diagnostic {
3868 severity: DiagnosticSeverity::HINT,
3869 message: "error 1 hint 1".to_string(),
3870 group_id: 0,
3871 is_primary: false,
3872 ..Default::default()
3873 }
3874 },
3875 DiagnosticEntry {
3876 range: Point::new(1, 13)..Point::new(1, 15),
3877 diagnostic: Diagnostic {
3878 severity: DiagnosticSeverity::HINT,
3879 message: "error 2 hint 1".to_string(),
3880 group_id: 1,
3881 is_primary: false,
3882 ..Default::default()
3883 }
3884 },
3885 DiagnosticEntry {
3886 range: Point::new(1, 13)..Point::new(1, 15),
3887 diagnostic: Diagnostic {
3888 severity: DiagnosticSeverity::HINT,
3889 message: "error 2 hint 2".to_string(),
3890 group_id: 1,
3891 is_primary: false,
3892 ..Default::default()
3893 }
3894 },
3895 DiagnosticEntry {
3896 range: Point::new(2, 8)..Point::new(2, 17),
3897 diagnostic: Diagnostic {
3898 severity: DiagnosticSeverity::ERROR,
3899 message: "error 2".to_string(),
3900 group_id: 1,
3901 is_primary: true,
3902 ..Default::default()
3903 }
3904 }
3905 ]
3906 );
3907
3908 assert_eq!(
3909 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3910 &[
3911 DiagnosticEntry {
3912 range: Point::new(1, 8)..Point::new(1, 9),
3913 diagnostic: Diagnostic {
3914 severity: DiagnosticSeverity::WARNING,
3915 message: "error 1".to_string(),
3916 group_id: 0,
3917 is_primary: true,
3918 ..Default::default()
3919 }
3920 },
3921 DiagnosticEntry {
3922 range: Point::new(1, 8)..Point::new(1, 9),
3923 diagnostic: Diagnostic {
3924 severity: DiagnosticSeverity::HINT,
3925 message: "error 1 hint 1".to_string(),
3926 group_id: 0,
3927 is_primary: false,
3928 ..Default::default()
3929 }
3930 },
3931 ]
3932 );
3933 assert_eq!(
3934 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3935 &[
3936 DiagnosticEntry {
3937 range: Point::new(1, 13)..Point::new(1, 15),
3938 diagnostic: Diagnostic {
3939 severity: DiagnosticSeverity::HINT,
3940 message: "error 2 hint 1".to_string(),
3941 group_id: 1,
3942 is_primary: false,
3943 ..Default::default()
3944 }
3945 },
3946 DiagnosticEntry {
3947 range: Point::new(1, 13)..Point::new(1, 15),
3948 diagnostic: Diagnostic {
3949 severity: DiagnosticSeverity::HINT,
3950 message: "error 2 hint 2".to_string(),
3951 group_id: 1,
3952 is_primary: false,
3953 ..Default::default()
3954 }
3955 },
3956 DiagnosticEntry {
3957 range: Point::new(2, 8)..Point::new(2, 17),
3958 diagnostic: Diagnostic {
3959 severity: DiagnosticSeverity::ERROR,
3960 message: "error 2".to_string(),
3961 group_id: 1,
3962 is_primary: true,
3963 ..Default::default()
3964 }
3965 }
3966 ]
3967 );
3968 }
3969
3970 #[gpui::test(iterations = 100)]
3971 fn test_random(mut rng: StdRng) {
3972 let operations = env::var("OPERATIONS")
3973 .map(|o| o.parse().unwrap())
3974 .unwrap_or(40);
3975 let initial_entries = env::var("INITIAL_ENTRIES")
3976 .map(|o| o.parse().unwrap())
3977 .unwrap_or(20);
3978
3979 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3980 for _ in 0..initial_entries {
3981 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3982 }
3983 log::info!("Generated initial tree");
3984
3985 let (notify_tx, _notify_rx) = smol::channel::unbounded();
3986 let fs = Arc::new(RealFs);
3987 let next_entry_id = Arc::new(AtomicUsize::new(0));
3988 let mut initial_snapshot = Snapshot {
3989 id: 0,
3990 scan_id: 0,
3991 abs_path: root_dir.path().into(),
3992 entries_by_path: Default::default(),
3993 entries_by_id: Default::default(),
3994 removed_entry_ids: Default::default(),
3995 ignores: Default::default(),
3996 root_name: Default::default(),
3997 root_char_bag: Default::default(),
3998 next_entry_id: next_entry_id.clone(),
3999 };
4000 initial_snapshot.insert_entry(
4001 Entry::new(
4002 Path::new("").into(),
4003 &smol::block_on(fs.metadata(root_dir.path()))
4004 .unwrap()
4005 .unwrap(),
4006 &next_entry_id,
4007 Default::default(),
4008 ),
4009 fs.as_ref(),
4010 );
4011 let mut scanner = BackgroundScanner::new(
4012 Arc::new(Mutex::new(initial_snapshot.clone())),
4013 notify_tx,
4014 fs.clone(),
4015 Arc::new(gpui::executor::Background::new()),
4016 );
4017 smol::block_on(scanner.scan_dirs()).unwrap();
4018 scanner.snapshot().check_invariants();
4019
4020 let mut events = Vec::new();
4021 let mut snapshots = Vec::new();
4022 let mut mutations_len = operations;
4023 while mutations_len > 1 {
4024 if !events.is_empty() && rng.gen_bool(0.4) {
4025 let len = rng.gen_range(0..=events.len());
4026 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4027 log::info!("Delivering events: {:#?}", to_deliver);
4028 smol::block_on(scanner.process_events(to_deliver));
4029 scanner.snapshot().check_invariants();
4030 } else {
4031 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4032 mutations_len -= 1;
4033 }
4034
4035 if rng.gen_bool(0.2) {
4036 snapshots.push(scanner.snapshot());
4037 }
4038 }
4039 log::info!("Quiescing: {:#?}", events);
4040 smol::block_on(scanner.process_events(events));
4041 scanner.snapshot().check_invariants();
4042
4043 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4044 let mut new_scanner = BackgroundScanner::new(
4045 Arc::new(Mutex::new(initial_snapshot)),
4046 notify_tx,
4047 scanner.fs.clone(),
4048 scanner.executor.clone(),
4049 );
4050 smol::block_on(new_scanner.scan_dirs()).unwrap();
4051 assert_eq!(
4052 scanner.snapshot().to_vec(true),
4053 new_scanner.snapshot().to_vec(true)
4054 );
4055
4056 for mut prev_snapshot in snapshots {
4057 let include_ignored = rng.gen::<bool>();
4058 if !include_ignored {
4059 let mut entries_by_path_edits = Vec::new();
4060 let mut entries_by_id_edits = Vec::new();
4061 for entry in prev_snapshot
4062 .entries_by_id
4063 .cursor::<()>()
4064 .filter(|e| e.is_ignored)
4065 {
4066 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4067 entries_by_id_edits.push(Edit::Remove(entry.id));
4068 }
4069
4070 prev_snapshot
4071 .entries_by_path
4072 .edit(entries_by_path_edits, &());
4073 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4074 }
4075
4076 let update = scanner
4077 .snapshot()
4078 .build_update(&prev_snapshot, 0, 0, include_ignored);
4079 prev_snapshot.apply_update(update).unwrap();
4080 assert_eq!(
4081 prev_snapshot.to_vec(true),
4082 scanner.snapshot().to_vec(include_ignored)
4083 );
4084 }
4085 }
4086
4087 fn randomly_mutate_tree(
4088 root_path: &Path,
4089 insertion_probability: f64,
4090 rng: &mut impl Rng,
4091 ) -> Result<Vec<fsevent::Event>> {
4092 let root_path = root_path.canonicalize().unwrap();
4093 let (dirs, files) = read_dir_recursive(root_path.clone());
4094
4095 let mut events = Vec::new();
4096 let mut record_event = |path: PathBuf| {
4097 events.push(fsevent::Event {
4098 event_id: SystemTime::now()
4099 .duration_since(UNIX_EPOCH)
4100 .unwrap()
4101 .as_secs(),
4102 flags: fsevent::StreamFlags::empty(),
4103 path,
4104 });
4105 };
4106
4107 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4108 let path = dirs.choose(rng).unwrap();
4109 let new_path = path.join(gen_name(rng));
4110
4111 if rng.gen() {
4112 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4113 std::fs::create_dir(&new_path)?;
4114 } else {
4115 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4116 std::fs::write(&new_path, "")?;
4117 }
4118 record_event(new_path);
4119 } else if rng.gen_bool(0.05) {
4120 let ignore_dir_path = dirs.choose(rng).unwrap();
4121 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4122
4123 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4124 let files_to_ignore = {
4125 let len = rng.gen_range(0..=subfiles.len());
4126 subfiles.choose_multiple(rng, len)
4127 };
4128 let dirs_to_ignore = {
4129 let len = rng.gen_range(0..subdirs.len());
4130 subdirs.choose_multiple(rng, len)
4131 };
4132
4133 let mut ignore_contents = String::new();
4134 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4135 write!(
4136 ignore_contents,
4137 "{}\n",
4138 path_to_ignore
4139 .strip_prefix(&ignore_dir_path)?
4140 .to_str()
4141 .unwrap()
4142 )
4143 .unwrap();
4144 }
4145 log::info!(
4146 "Creating {:?} with contents:\n{}",
4147 ignore_path.strip_prefix(&root_path)?,
4148 ignore_contents
4149 );
4150 std::fs::write(&ignore_path, ignore_contents).unwrap();
4151 record_event(ignore_path);
4152 } else {
4153 let old_path = {
4154 let file_path = files.choose(rng);
4155 let dir_path = dirs[1..].choose(rng);
4156 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4157 };
4158
4159 let is_rename = rng.gen();
4160 if is_rename {
4161 let new_path_parent = dirs
4162 .iter()
4163 .filter(|d| !d.starts_with(old_path))
4164 .choose(rng)
4165 .unwrap();
4166
4167 let overwrite_existing_dir =
4168 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4169 let new_path = if overwrite_existing_dir {
4170 std::fs::remove_dir_all(&new_path_parent).ok();
4171 new_path_parent.to_path_buf()
4172 } else {
4173 new_path_parent.join(gen_name(rng))
4174 };
4175
4176 log::info!(
4177 "Renaming {:?} to {}{:?}",
4178 old_path.strip_prefix(&root_path)?,
4179 if overwrite_existing_dir {
4180 "overwrite "
4181 } else {
4182 ""
4183 },
4184 new_path.strip_prefix(&root_path)?
4185 );
4186 std::fs::rename(&old_path, &new_path)?;
4187 record_event(old_path.clone());
4188 record_event(new_path);
4189 } else if old_path.is_dir() {
4190 let (dirs, files) = read_dir_recursive(old_path.clone());
4191
4192 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4193 std::fs::remove_dir_all(&old_path).unwrap();
4194 for file in files {
4195 record_event(file);
4196 }
4197 for dir in dirs {
4198 record_event(dir);
4199 }
4200 } else {
4201 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4202 std::fs::remove_file(old_path).unwrap();
4203 record_event(old_path.clone());
4204 }
4205 }
4206
4207 Ok(events)
4208 }
4209
4210 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4211 let child_entries = std::fs::read_dir(&path).unwrap();
4212 let mut dirs = vec![path];
4213 let mut files = Vec::new();
4214 for child_entry in child_entries {
4215 let child_path = child_entry.unwrap().path();
4216 if child_path.is_dir() {
4217 let (child_dirs, child_files) = read_dir_recursive(child_path);
4218 dirs.extend(child_dirs);
4219 files.extend(child_files);
4220 } else {
4221 files.push(child_path);
4222 }
4223 }
4224 (dirs, files)
4225 }
4226
4227 fn gen_name(rng: &mut impl Rng) -> String {
4228 (0..6)
4229 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4230 .map(char::from)
4231 .collect()
4232 }
4233
4234 impl Snapshot {
4235 fn check_invariants(&self) {
4236 let mut files = self.files(true, 0);
4237 let mut visible_files = self.files(false, 0);
4238 for entry in self.entries_by_path.cursor::<()>() {
4239 if entry.is_file() {
4240 assert_eq!(files.next().unwrap().inode, entry.inode);
4241 if !entry.is_ignored {
4242 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4243 }
4244 }
4245 }
4246 assert!(files.next().is_none());
4247 assert!(visible_files.next().is_none());
4248
4249 let mut bfs_paths = Vec::new();
4250 let mut stack = vec![Path::new("")];
4251 while let Some(path) = stack.pop() {
4252 bfs_paths.push(path);
4253 let ix = stack.len();
4254 for child_entry in self.child_entries(path) {
4255 stack.insert(ix, &child_entry.path);
4256 }
4257 }
4258
4259 let dfs_paths = self
4260 .entries_by_path
4261 .cursor::<()>()
4262 .map(|e| e.path.as_ref())
4263 .collect::<Vec<_>>();
4264 assert_eq!(bfs_paths, dfs_paths);
4265
4266 for (ignore_parent_path, _) in &self.ignores {
4267 assert!(self.entry_for_path(ignore_parent_path).is_some());
4268 assert!(self
4269 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4270 .is_some());
4271 }
4272 }
4273
4274 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4275 let mut paths = Vec::new();
4276 for entry in self.entries_by_path.cursor::<()>() {
4277 if include_ignored || !entry.is_ignored {
4278 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4279 }
4280 }
4281 paths.sort_by(|a, b| a.0.cmp(&b.0));
4282 paths
4283 }
4284 }
4285}